Import http://svn.github.com/boto/boto.git at revision 2005
git-svn-id: svn://svn.chromium.org/boto@1 4f2e627c-b00b-48dd-b1fb-2c643665b734
diff --git a/README b/README
new file mode 100644
index 0000000..35b7232
--- /dev/null
+++ b/README
@@ -0,0 +1,65 @@
+boto 2.0b4
+13-Feb-2011
+
+Copyright (c) 2006-2011 Mitch Garnaat <mitch@garnaat.org>
+Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
+All rights reserved.
+
+http://code.google.com/p/boto
+
+Boto is a Python package that provides interfaces to Amazon Web Services.
+At the moment, boto supports:
+
+ * Simple Storage Service (S3)
+ * SimpleQueue Service (SQS)
+ * Elastic Compute Cloud (EC2)
+ * Mechanical Turk
+ * SimpleDB
+ * CloudFront
+ * CloudWatch
+ * AutoScale
+ * Elastic Load Balancer (ELB)
+ * Virtual Private Cloud (VPC)
+ * Elastic Map Reduce (EMR)
+ * Relational Data Service (RDS)
+ * Simple Notification Server (SNS)
+ * Google Storage
+ * Identity and Access Management (IAM)
+ * Route53 DNS Service (route53)
+ * Simple Email Service (SES)
+
+The intent is to support additional services in the future.
+
+The goal of boto is to provide a very simple, easy to use, lightweight
+wrapper around the Amazon services. Not all features supported by the
+Amazon Web Services will be supported in boto. Basically, those
+features I need to do what I want to do are supported first. Other
+features and requests are welcome and will be accomodated to the best
+of my ability. Patches and contributions are welcome!
+
+Boto was written using Python 2.6.5 on Mac OSX. It has also been tested
+on Linux Ubuntu using Python 2.6.5. Boto requires no additional
+libraries or packages other than those that are distributed with Python 2.6.5.
+Efforts are made to keep boto compatible with Python 2.4.x but no
+guarantees are made.
+
+Documentation for boto can be found at:
+
+http://boto.cloudhackers.com/
+
+Join our `IRC channel`_ (#boto on FreeNode).
+ IRC channel: http://webchat.freenode.net/?channels=boto
+
+Your credentials can be passed into the methods that create
+connections. Alternatively, boto will check for the existance of the
+following environment variables to ascertain your credentials:
+
+AWS_ACCESS_KEY_ID - Your AWS Access Key ID
+AWS_SECRET_ACCESS_KEY - Your AWS Secret Access Key
+
+Credentials and other boto-related settings can also be stored in a boto config
+file. See:
+
+http://code.google.com/p/boto/wiki/BotoConfig
+
+for details.
\ No newline at end of file
diff --git a/bin/bundle_image b/bin/bundle_image
new file mode 100755
index 0000000..7096979
--- /dev/null
+++ b/bin/bundle_image
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+from boto.manage.server import Server
+if __name__ == "__main__":
+ from optparse import OptionParser
+ parser = OptionParser(version="%prog 1.0", usage="Usage: %prog [options] instance-id [instance-id-2]")
+
+ # Commands
+ parser.add_option("-b", "--bucket", help="Destination Bucket", dest="bucket", default=None)
+ parser.add_option("-p", "--prefix", help="AMI Prefix", dest="prefix", default=None)
+ parser.add_option("-k", "--key", help="Private Key File", dest="key_file", default=None)
+ parser.add_option("-c", "--cert", help="Public Certificate File", dest="cert_file", default=None)
+ parser.add_option("-s", "--size", help="AMI Size", dest="size", default=None)
+ parser.add_option("-i", "--ssh-key", help="SSH Keyfile", dest="ssh_key", default=None)
+ parser.add_option("-u", "--user-name", help="SSH Username", dest="uname", default="root")
+ parser.add_option("-n", "--name", help="Name of Image", dest="name")
+ (options, args) = parser.parse_args()
+
+ for instance_id in args:
+ try:
+ s = Server.find(instance_id=instance_id).next()
+ print "Found old server object"
+ except StopIteration:
+ print "New Server Object Created"
+ s = Server.create_from_instance_id(instance_id, options.name)
+ assert(s.hostname is not None)
+ b = s.get_bundler(uname=options.uname)
+ b.bundle(bucket=options.bucket,prefix=options.prefix,key_file=options.key_file,cert_file=options.cert_file,size=int(options.size),ssh_key=options.ssh_key)
diff --git a/bin/cfadmin b/bin/cfadmin
new file mode 100755
index 0000000..97726c1
--- /dev/null
+++ b/bin/cfadmin
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+# Author: Chris Moyer
+#
+# cfadmin is similar to sdbadmin for CloudFront, it's a simple
+# console utility to perform the most frequent tasks with CloudFront
+#
+def _print_distributions(dists):
+ """Internal function to print out all the distributions provided"""
+ print "%-12s %-50s %s" % ("Status", "Domain Name", "Origin")
+ print "-"*80
+ for d in dists:
+ print "%-12s %-50s %-30s" % (d.status, d.domain_name, d.origin)
+ for cname in d.cnames:
+ print " "*12, "CNAME => %s" % cname
+ print ""
+
+def help(cf, fnc=None):
+ """Print help message, optionally about a specific function"""
+ import inspect
+ self = sys.modules['__main__']
+ if fnc:
+ try:
+ cmd = getattr(self, fnc)
+ except:
+ cmd = None
+ if not inspect.isfunction(cmd):
+ print "No function named: %s found" % fnc
+ sys.exit(2)
+ (args, varargs, varkw, defaults) = inspect.getargspec(cmd)
+ print cmd.__doc__
+ print "Usage: %s %s" % (fnc, " ".join([ "[%s]" % a for a in args[1:]]))
+ else:
+ print "Usage: cfadmin [command]"
+ for cname in dir(self):
+ if not cname.startswith("_"):
+ cmd = getattr(self, cname)
+ if inspect.isfunction(cmd):
+ doc = cmd.__doc__
+ print "\t%s - %s" % (cname, doc)
+ sys.exit(1)
+
+def ls(cf):
+ """List all distributions and streaming distributions"""
+ print "Standard Distributions"
+ _print_distributions(cf.get_all_distributions())
+ print "Streaming Distributions"
+ _print_distributions(cf.get_all_streaming_distributions())
+
+def invalidate(cf, origin_or_id, *paths):
+ """Create a cloudfront invalidation request"""
+ if not paths:
+ print "Usage: cfadmin invalidate distribution_origin_or_id [path] [path2]..."
+ sys.exit(1)
+ dist = None
+ for d in cf.get_all_distributions():
+ if d.id == origin_or_id or d.origin.dns_name == origin_or_id:
+ dist = d
+ break
+ if not dist:
+ print "Distribution not found: %s" % origin_or_id
+ sys.exit(1)
+ cf.create_invalidation_request(dist.id, paths)
+
+if __name__ == "__main__":
+ import boto
+ import sys
+ cf = boto.connect_cloudfront()
+ self = sys.modules['__main__']
+ if len(sys.argv) >= 2:
+ try:
+ cmd = getattr(self, sys.argv[1])
+ except:
+ cmd = None
+ args = sys.argv[2:]
+ else:
+ cmd = help
+ args = []
+ if not cmd:
+ cmd = help
+ try:
+ cmd(cf, *args)
+ except TypeError, e:
+ print e
+ help(cf, cmd.__name__)
diff --git a/bin/cq b/bin/cq
new file mode 100755
index 0000000..258002d
--- /dev/null
+++ b/bin/cq
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import getopt, sys
+from boto.sqs.connection import SQSConnection
+from boto.exception import SQSError
+
+def usage():
+ print 'cq [-c] [-q queue_name] [-o output_file] [-t timeout]'
+
+def main():
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], 'hcq:o:t:',
+ ['help', 'clear', 'queue',
+ 'output', 'timeout'])
+ except:
+ usage()
+ sys.exit(2)
+ queue_name = ''
+ output_file = ''
+ timeout = 30
+ clear = False
+ for o, a in opts:
+ if o in ('-h', '--help'):
+ usage()
+ sys.exit()
+ if o in ('-q', '--queue'):
+ queue_name = a
+ if o in ('-o', '--output'):
+ output_file = a
+ if o in ('-c', '--clear'):
+ clear = True
+ if o in ('-t', '--timeout'):
+ timeout = int(a)
+ c = SQSConnection()
+ if queue_name:
+ try:
+ rs = [c.create_queue(queue_name)]
+ except SQSError, e:
+ print 'An Error Occurred:'
+ print '%s: %s' % (e.status, e.reason)
+ print e.body
+ sys.exit()
+ else:
+ try:
+ rs = c.get_all_queues()
+ except SQSError, e:
+ print 'An Error Occurred:'
+ print '%s: %s' % (e.status, e.reason)
+ print e.body
+ sys.exit()
+ for q in rs:
+ if clear:
+ n = q.clear()
+ print 'clearing %d messages from %s' % (n, q.id)
+ elif output_file:
+ q.dump(output_file)
+ else:
+ print q.id, q.count(vtimeout=timeout)
+
+if __name__ == "__main__":
+ main()
+
diff --git a/bin/elbadmin b/bin/elbadmin
new file mode 100755
index 0000000..a5ec6bb
--- /dev/null
+++ b/bin/elbadmin
@@ -0,0 +1,219 @@
+#!/usr/bin/env python
+# Copyright (c) 2009 Chris Moyer http://coredumped.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+
+#
+# Elastic Load Balancer Tool
+#
+VERSION="0.1"
+usage = """%prog [options] [command]
+Commands:
+ list|ls List all Elastic Load Balancers
+ delete <name> Delete ELB <name>
+ get <name> Get all instances associated with <name>
+ create <name> Create an ELB
+ add <name> <instance> Add <instance> in ELB <name>
+ remove|rm <name> <instance> Remove <instance> from ELB <name>
+ enable|en <name> <zone> Enable Zone <zone> for ELB <name>
+ disable <name> <zone> Disable Zone <zone> for ELB <name>
+ addl <name> Add listeners (specified by -l) to the ELB <name>
+ rml <name> <port> Remove Listener(s) specified by the port on the ELB
+"""
+
+def list(elb):
+ """List all ELBs"""
+ print "%-20s %s" % ("Name", "DNS Name")
+ print "-"*80
+ for b in elb.get_all_load_balancers():
+ print "%-20s %s" % (b.name, b.dns_name)
+
+def get(elb, name):
+ """Get details about ELB <name>"""
+ elbs = elb.get_all_load_balancers(name)
+ if len(elbs) < 1:
+ print "No load balancer by the name of %s found" % name
+ return
+ for b in elbs:
+ if name in b.name:
+ print "="*80
+ print "Name: %s" % b.name
+ print "DNS Name: %s" % b.dns_name
+
+ print
+
+ print "Listeners"
+ print "---------"
+ print "%-8s %-8s %s" % ("IN", "OUT", "PROTO")
+ for l in b.listeners:
+ print "%-8s %-8s %s" % (l[0], l[1], l[2])
+
+ print
+
+ print " Zones "
+ print "---------"
+ for z in b.availability_zones:
+ print z
+
+ print
+
+ print "Instances"
+ print "---------"
+ for i in b.instances:
+ print i.id
+
+ print
+
+def create(elb, name, zones, listeners):
+ """Create an ELB named <name>"""
+ l_list = []
+ for l in listeners:
+ l = l.split(",")
+ if l[2]=='HTTPS':
+ l_list.append((int(l[0]), int(l[1]), l[2], l[3]))
+ else : l_list.append((int(l[0]), int(l[1]), l[2]))
+
+ b = elb.create_load_balancer(name, zones, l_list)
+ return get(elb, name)
+
+def delete(elb, name):
+ """Delete this ELB"""
+ b = elb.get_all_load_balancers(name)
+ if len(b) < 1:
+ print "No load balancer by the name of %s found" % name
+ return
+ for i in b:
+ if name in i.name:
+ i.delete()
+ print "Load Balancer %s deleted" % name
+
+def add_instance(elb, name, instance):
+ """Add <instance> to ELB <name>"""
+ b = elb.get_all_load_balancers(name)
+ if len(b) < 1:
+ print "No load balancer by the name of %s found" % name
+ return
+ for i in b:
+ if name in i.name:
+ i.register_instances([instance])
+ return get(elb, name)
+
+
+def remove_instance(elb, name, instance):
+ """Remove instance from elb <name>"""
+ b = elb.get_all_load_balancers(name)
+ if len(b) < 1:
+ print "No load balancer by the name of %s found" % name
+ return
+ for i in b:
+ if name in i.name:
+ i.deregister_instances([instance])
+ return get(elb, name)
+
+def enable_zone(elb, name, zone):
+ """Enable <zone> for elb"""
+ b = elb.get_all_load_balancers(name)
+ if len(b) < 1:
+ print "No load balancer by the name of %s found" % name
+ return
+ b = b[0]
+ b.enable_zones([zone])
+ return get(elb, name)
+
+def disable_zone(elb, name, zone):
+ """Disable <zone> for elb"""
+ b = elb.get_all_load_balancers(name)
+ if len(b) < 1:
+ print "No load balancer by the name of %s found" % name
+ return
+ b = b[0]
+ b.disable_zones([zone])
+ return get(elb, name)
+
+def add_listener(elb, name, listeners):
+ """Add listeners to a given load balancer"""
+ l_list = []
+ for l in listeners:
+ l = l.split(",")
+ l_list.append((int(l[0]), int(l[1]), l[2]))
+ b = elb.get_all_load_balancers(name)
+ if len(b) < 1:
+ print "No load balancer by the name of %s found" % name
+ return
+ b = b[0]
+ b.create_listeners(l_list)
+ return get(elb, name)
+
+def rm_listener(elb, name, ports):
+ """Remove listeners from a given load balancer"""
+ b = elb.get_all_load_balancers(name)
+ if len(b) < 1:
+ print "No load balancer by the name of %s found" % name
+ return
+ b = b[0]
+ b.delete_listeners(ports)
+ return get(elb, name)
+
+
+
+
+if __name__ == "__main__":
+ try:
+ import readline
+ except ImportError:
+ pass
+ import boto
+ import sys
+ from optparse import OptionParser
+ from boto.mashups.iobject import IObject
+ parser = OptionParser(version=VERSION, usage=usage)
+ parser.add_option("-z", "--zone", help="Operate on zone", action="append", default=[], dest="zones")
+ parser.add_option("-l", "--listener", help="Specify Listener in,out,proto", action="append", default=[], dest="listeners")
+
+ (options, args) = parser.parse_args()
+
+ if len(args) < 1:
+ parser.print_help()
+ sys.exit(1)
+
+ elb = boto.connect_elb()
+
+ print "%s" % (elb.region.endpoint)
+
+ command = args[0].lower()
+ if command in ("ls", "list"):
+ list(elb)
+ elif command == "get":
+ get(elb, args[1])
+ elif command == "create":
+ create(elb, args[1], options.zones, options.listeners)
+ elif command == "delete":
+ delete(elb, args[1])
+ elif command in ("add", "put"):
+ add_instance(elb, args[1], args[2])
+ elif command in ("rm", "remove"):
+ remove_instance(elb, args[1], args[2])
+ elif command in ("en", "enable"):
+ enable_zone(elb, args[1], args[2])
+ elif command == "disable":
+ disable_zone(elb, args[1], args[2])
+ elif command == "addl":
+ add_listener(elb, args[1], options.listeners)
+ elif command == "rml":
+ rm_listener(elb, args[1], args[2:])
diff --git a/bin/fetch_file b/bin/fetch_file
new file mode 100755
index 0000000..6b8c4da
--- /dev/null
+++ b/bin/fetch_file
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+# Copyright (c) 2009 Chris Moyer http://coredumped.org
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+if __name__ == "__main__":
+ from optparse import OptionParser
+ parser = OptionParser(version="0.1", usage="Usage: %prog [options] url")
+ parser.add_option("-o", "--out-file", help="Output file", dest="outfile")
+
+ (options, args) = parser.parse_args()
+ if len(args) < 1:
+ parser.print_help()
+ exit(1)
+ from boto.utils import fetch_file
+ f = fetch_file(args[0])
+ if options.outfile:
+ open(options.outfile, "w").write(f.read())
+ else:
+ print f.read()
diff --git a/bin/kill_instance b/bin/kill_instance
new file mode 100755
index 0000000..0c63741
--- /dev/null
+++ b/bin/kill_instance
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+import sys
+from optparse import OptionParser
+
+import boto
+from boto.ec2 import regions
+
+
+
+def kill_instance(region, ids):
+ """Kill an instances given it's instance IDs"""
+ # Connect the region
+ ec2 = boto.connect_ec2(region=region)
+ for instance_id in ids:
+ print "Stopping instance: %s" % instance_id
+ ec2.terminate_instances([instance_id])
+
+
+if __name__ == "__main__":
+ parser = OptionParser(usage="kill_instance [-r] id [id ...]")
+ parser.add_option("-r", "--region", help="Region (default us-east-1)", dest="region", default="us-east-1")
+ (options, args) = parser.parse_args()
+ if not args:
+ parser.print_help()
+ sys.exit(1)
+ for r in regions():
+ if r.name == options.region:
+ region = r
+ break
+ else:
+ print "Region %s not found." % options.region
+ sys.exit(1)
+
+ kill_instance(region, args)
diff --git a/bin/launch_instance b/bin/launch_instance
new file mode 100755
index 0000000..cb857d9
--- /dev/null
+++ b/bin/launch_instance
@@ -0,0 +1,197 @@
+#!/usr/bin/env python
+# Copyright (c) 2009 Chris Moyer http://coredumped.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+
+#
+# Utility to launch an EC2 Instance
+#
+VERSION="0.2"
+
+
+CLOUD_INIT_SCRIPT = """#!/usr/bin/env python
+f = open("/etc/boto.cfg", "w")
+f.write(\"\"\"%s\"\"\")
+f.close()
+"""
+import boto.pyami.config
+from boto.utils import fetch_file
+import re, os
+import ConfigParser
+
+class Config(boto.pyami.config.Config):
+ """A special config class that also adds import abilities
+ Directly in the config file. To have a config file import
+ another config file, simply use "#import <path>" where <path>
+ is either a relative path or a full URL to another config
+ """
+
+ def __init__(self):
+ ConfigParser.SafeConfigParser.__init__(self, {'working_dir' : '/mnt/pyami', 'debug' : '0'})
+
+ def add_config(self, file_url):
+ """Add a config file to this configuration
+ :param file_url: URL for the file to add, or a local path
+ :type file_url: str
+ """
+ if not re.match("^([a-zA-Z0-9]*:\/\/)(.*)", file_url):
+ if not file_url.startswith("/"):
+ file_url = os.path.join(os.getcwd(), file_url)
+ file_url = "file://%s" % file_url
+ (base_url, file_name) = file_url.rsplit("/", 1)
+ base_config = fetch_file(file_url)
+ base_config.seek(0)
+ for line in base_config.readlines():
+ match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line)
+ if match:
+ self.add_config("%s/%s" % (base_url, match.group(1)))
+ base_config.seek(0)
+ self.readfp(base_config)
+
+ def add_creds(self, ec2):
+ """Add the credentials to this config if they don't already exist"""
+ if not self.has_section('Credentials'):
+ self.add_section('Credentials')
+ self.set('Credentials', 'aws_access_key_id', ec2.aws_access_key_id)
+ self.set('Credentials', 'aws_secret_access_key', ec2.aws_secret_access_key)
+
+
+ def __str__(self):
+ """Get config as string"""
+ from StringIO import StringIO
+ s = StringIO()
+ self.write(s)
+ return s.getvalue()
+
+
+if __name__ == "__main__":
+ try:
+ import readline
+ except ImportError:
+ pass
+ import sys
+ import time
+ import boto
+ from boto.ec2 import regions
+ from optparse import OptionParser
+ from boto.mashups.iobject import IObject
+ parser = OptionParser(version=VERSION, usage="%prog [options] config_url")
+ parser.add_option("-c", "--max-count", help="Maximum number of this type of instance to launch", dest="max_count", default="1")
+ parser.add_option("--min-count", help="Minimum number of this type of instance to launch", dest="min_count", default="1")
+ parser.add_option("--cloud-init", help="Indicates that this is an instance that uses 'CloudInit', Ubuntu's cloud bootstrap process. This wraps the config in a shell script command instead of just passing it in directly", dest="cloud_init", default=False, action="store_true")
+ parser.add_option("-g", "--groups", help="Security Groups to add this instance to", action="append", dest="groups")
+ parser.add_option("-a", "--ami", help="AMI to launch", dest="ami_id")
+ parser.add_option("-t", "--type", help="Type of Instance (default m1.small)", dest="type", default="m1.small")
+ parser.add_option("-k", "--key", help="Keypair", dest="key_name")
+ parser.add_option("-z", "--zone", help="Zone (default us-east-1a)", dest="zone", default="us-east-1a")
+ parser.add_option("-r", "--region", help="Region (default us-east-1)", dest="region", default="us-east-1")
+ parser.add_option("-i", "--ip", help="Elastic IP", dest="elastic_ip")
+ parser.add_option("-n", "--no-add-cred", help="Don't add a credentials section", default=False, action="store_true", dest="nocred")
+ parser.add_option("--save-ebs", help="Save the EBS volume on shutdown, instead of deleting it", default=False, action="store_true", dest="save_ebs")
+ parser.add_option("-w", "--wait", help="Wait until instance is running", default=False, action="store_true", dest="wait")
+ parser.add_option("-d", "--dns", help="Returns public and private DNS (implicates --wait)", default=False, action="store_true", dest="dns")
+ parser.add_option("-T", "--tag", help="Set tag", default=None, action="append", dest="tags", metavar="key:value")
+
+ (options, args) = parser.parse_args()
+
+ if len(args) < 1:
+ parser.print_help()
+ sys.exit(1)
+ file_url = os.path.expanduser(args[0])
+
+ cfg = Config()
+ cfg.add_config(file_url)
+
+ for r in regions():
+ if r.name == options.region:
+ region = r
+ break
+ else:
+ print "Region %s not found." % options.region
+ sys.exit(1)
+ ec2 = boto.connect_ec2(region=region)
+ if not options.nocred:
+ cfg.add_creds(ec2)
+
+ iobj = IObject()
+ if options.ami_id:
+ ami = ec2.get_image(options.ami_id)
+ else:
+ ami_id = options.ami_id
+ l = [(a, a.id, a.location) for a in ec2.get_all_images()]
+ ami = iobj.choose_from_list(l, prompt='Choose AMI')
+
+ if options.key_name:
+ key_name = options.key_name
+ else:
+ l = [(k, k.name, '') for k in ec2.get_all_key_pairs()]
+ key_name = iobj.choose_from_list(l, prompt='Choose Keypair').name
+
+ if options.groups:
+ groups = options.groups
+ else:
+ groups = []
+ l = [(g, g.name, g.description) for g in ec2.get_all_security_groups()]
+ g = iobj.choose_from_list(l, prompt='Choose Primary Security Group')
+ while g != None:
+ groups.append(g)
+ l.remove((g, g.name, g.description))
+ g = iobj.choose_from_list(l, prompt='Choose Additional Security Group (0 to quit)')
+
+ user_data = str(cfg)
+ # If it's a cloud init AMI,
+ # then we need to wrap the config in our
+ # little wrapper shell script
+ if options.cloud_init:
+ user_data = CLOUD_INIT_SCRIPT % user_data
+ shutdown_proc = "terminate"
+ if options.save_ebs:
+ shutdown_proc = "save"
+
+ r = ami.run(min_count=int(options.min_count), max_count=int(options.max_count),
+ key_name=key_name, user_data=user_data,
+ security_groups=groups, instance_type=options.type,
+ placement=options.zone, instance_initiated_shutdown_behavior=shutdown_proc)
+
+ instance = r.instances[0]
+
+ if options.tags:
+ for tag_pair in options.tags:
+ name = tag_pair
+ value = ''
+ if ':' in tag_pair:
+ name, value = tag_pair.split(':', 1)
+ instance.add_tag(name, value)
+
+ if options.dns:
+ options.wait = True
+
+ if not options.wait:
+ sys.exit(0)
+
+ while True:
+ instance.update()
+ if instance.state == 'running':
+ break
+ time.sleep(3)
+
+ if options.dns:
+ print "Public DNS name: %s" % instance.public_dns_name
+ print "Private DNS name: %s" % instance.private_dns_name
+
diff --git a/bin/list_instances b/bin/list_instances
new file mode 100755
index 0000000..5abe9b6
--- /dev/null
+++ b/bin/list_instances
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+
+import sys
+from operator import attrgetter
+from optparse import OptionParser
+
+import boto
+from boto.ec2 import regions
+
+
+HEADERS = {
+ 'ID': {'get': attrgetter('id'), 'length':15},
+ 'Zone': {'get': attrgetter('placement'), 'length':15},
+ 'Groups': {'get': attrgetter('groups'), 'length':30},
+ 'Hostname': {'get': attrgetter('public_dns_name'), 'length':50},
+ 'State': {'get': attrgetter('state'), 'length':15},
+ 'Image': {'get': attrgetter('image_id'), 'length':15},
+ 'Type': {'get': attrgetter('instance_type'), 'length':15},
+ 'IP': {'get': attrgetter('ip_address'), 'length':16},
+ 'PrivateIP': {'get': attrgetter('private_ip_address'), 'length':16},
+ 'Key': {'get': attrgetter('key_name'), 'length':25},
+ 'T:': {'length': 30},
+}
+
+def get_column(name, instance=None):
+ if name.startswith('T:'):
+ _, tag = name.split(':', 1)
+ return instance.tags.get(tag, '')
+ return HEADERS[name]['get'](instance)
+
+
+def main():
+ parser = OptionParser()
+ parser.add_option("-r", "--region", help="Region (default us-east-1)", dest="region", default="us-east-1")
+ parser.add_option("-H", "--headers", help="Set headers (use 'T:tagname' for including tags)", default=None, action="store", dest="headers", metavar="ID,Zone,Groups,Hostname,State,T:Name")
+ (options, args) = parser.parse_args()
+
+ # Connect the region
+ for r in regions():
+ if r.name == options.region:
+ region = r
+ break
+ else:
+ print "Region %s not found." % options.region
+ sys.exit(1)
+ ec2 = boto.connect_ec2(region=region)
+
+ # Read headers
+ if options.headers:
+ headers = tuple(options.headers.split(','))
+ else:
+ headers = ("ID", 'Zone', "Groups", "Hostname")
+
+ # Create format string
+ format_string = ""
+ for h in headers:
+ if h.startswith('T:'):
+ format_string += "%%-%ds" % HEADERS['T:']['length']
+ else:
+ format_string += "%%-%ds" % HEADERS[h]['length']
+
+
+ # List and print
+ print format_string % headers
+ print "-" * len(format_string % headers)
+ for r in ec2.get_all_instances():
+ groups = [g.id for g in r.groups]
+ for i in r.instances:
+ i.groups = ','.join(groups)
+ print format_string % tuple(get_column(h, i) for h in headers)
+
+if __name__ == "__main__":
+ main()
diff --git a/bin/lss3 b/bin/lss3
new file mode 100755
index 0000000..1fba89d
--- /dev/null
+++ b/bin/lss3
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+import boto
+
+def sizeof_fmt(num):
+ for x in ['b ','KB','MB','GB','TB', 'XB']:
+ if num < 1024.0:
+ return "%3.1f %s" % (num, x)
+ num /= 1024.0
+ return "%3.1f %s" % (num, x)
+
+def list_bucket(b):
+ """List everything in a bucket"""
+ total = 0
+ for k in b:
+ mode = "-rwx---"
+ for g in k.get_acl().acl.grants:
+ if g.id == None:
+ if g.permission == "READ":
+ mode = "-rwxr--"
+ elif g.permission == "FULL_CONTROL":
+ mode = "-rwxrwx"
+ print "%s\t%010s\t%s" % (mode, sizeof_fmt(k.size), k.name)
+ total += k.size
+ print "="*60
+ print "TOTAL: \t%010s" % sizeof_fmt(total)
+
+def list_buckets(s3):
+ """List all the buckets"""
+ for b in s3.get_all_buckets():
+ print b.name
+
+if __name__ == "__main__":
+ import sys
+ s3 = boto.connect_s3()
+ if len(sys.argv) < 2:
+ list_buckets(s3)
+ else:
+ for name in sys.argv[1:]:
+ list_bucket(s3.get_bucket(name))
diff --git a/bin/pyami_sendmail b/bin/pyami_sendmail
new file mode 100755
index 0000000..78e3003
--- /dev/null
+++ b/bin/pyami_sendmail
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+# Copyright (c) 2010 Chris Moyer http://coredumped.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+
+#
+# Send Mail from a PYAMI instance, or anything that has a boto.cfg
+# properly set up
+#
+VERSION="0.1"
+usage = """%prog [options]
+Sends whatever is on stdin to the recipient specified by your boto.cfg
+or whoevery you specify in the options here.
+"""
+
+if __name__ == "__main__":
+ from boto.utils import notify
+ import sys
+ from optparse import OptionParser
+ parser = OptionParser(version=VERSION, usage=usage)
+ parser.add_option("-t", "--to", help="Optional to address to send to (default from your boto.cfg)", action="store", default=None, dest="to")
+ parser.add_option("-s", "--subject", help="Optional Subject to send this report as", action="store", default="Report", dest="subject")
+ parser.add_option("-f", "--file", help="Optionally, read from a file instead of STDIN", action="store", default=None, dest="file")
+
+ (options, args) = parser.parse_args()
+ if options.file:
+ body = open(options.file, 'r').read()
+ else:
+ body = sys.stdin.read()
+
+ notify(options.subject, body=body, to_string=options.to)
diff --git a/bin/route53 b/bin/route53
new file mode 100755
index 0000000..55f86a5
--- /dev/null
+++ b/bin/route53
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+# Author: Chris Moyer
+#
+# route53 is similar to sdbadmin for Route53, it's a simple
+# console utility to perform the most frequent tasks with Route53
+
+def _print_zone_info(zoneinfo):
+ print "="*80
+ print "| ID: %s" % zoneinfo['Id'].split("/")[-1]
+ print "| Name: %s" % zoneinfo['Name']
+ print "| Ref: %s" % zoneinfo['CallerReference']
+ print "="*80
+ print zoneinfo['Config']
+ print
+
+
+def create(conn, hostname, caller_reference=None, comment=''):
+ """Create a hosted zone, returning the nameservers"""
+ response = conn.create_hosted_zone(hostname, caller_reference, comment)
+ print "Pending, please add the following Name Servers:"
+ for ns in response.NameServers:
+ print "\t", ns
+
+def delete_zone(conn, hosted_zone_id):
+ """Delete a hosted zone by ID"""
+ response = conn.delete_hosted_zone(hosted_zone_id)
+ print response
+
+def ls(conn):
+ """List all hosted zones"""
+ response = conn.get_all_hosted_zones()
+ for zoneinfo in response['ListHostedZonesResponse']['HostedZones']:
+ _print_zone_info(zoneinfo)
+
+def get(conn, hosted_zone_id, type=None, name=None, maxitems=None):
+ """Get all the records for a single zone"""
+ response = conn.get_all_rrsets(hosted_zone_id, type, name, maxitems=maxitems)
+ print '%-20s %-20s %-20s %s' % ("Name", "Type", "TTL", "Value(s)")
+ for record in response:
+ print '%-20s %-20s %-20s %s' % (record.name, record.type, record.ttl, ",".join(record.resource_records))
+
+
+def add_record(conn, hosted_zone_id, name, type, value, ttl=600, comment=""):
+ """Add a new record to a zone"""
+ from boto.route53.record import ResourceRecordSets
+ changes = ResourceRecordSets(conn, hosted_zone_id, comment)
+ change = changes.add_change("CREATE", name, type, ttl)
+ change.add_value(value)
+ print changes.commit()
+
+def del_record(conn, hosted_zone_id, name, type, value, ttl=600, comment=""):
+ """Delete a record from a zone"""
+ from boto.route53.record import ResourceRecordSets
+ changes = ResourceRecordSets(conn, hosted_zone_id, comment)
+ change = changes.add_change("DELETE", name, type, ttl)
+ change.add_value(value)
+ print changes.commit()
+
+def help(conn, fnc=None):
+ """Prints this help message"""
+ import inspect
+ self = sys.modules['__main__']
+ if fnc:
+ try:
+ cmd = getattr(self, fnc)
+ except:
+ cmd = None
+ if not inspect.isfunction(cmd):
+ print "No function named: %s found" % fnc
+ sys.exit(2)
+ (args, varargs, varkw, defaults) = inspect.getargspec(cmd)
+ print cmd.__doc__
+ print "Usage: %s %s" % (fnc, " ".join([ "[%s]" % a for a in args[1:]]))
+ else:
+ print "Usage: route53 [command]"
+ for cname in dir(self):
+ if not cname.startswith("_"):
+ cmd = getattr(self, cname)
+ if inspect.isfunction(cmd):
+ doc = cmd.__doc__
+ print "\t%s - %s" % (cname, doc)
+ sys.exit(1)
+
+
+if __name__ == "__main__":
+ import boto
+ import sys
+ conn = boto.connect_route53()
+ self = sys.modules['__main__']
+ if len(sys.argv) >= 2:
+ try:
+ cmd = getattr(self, sys.argv[1])
+ except:
+ cmd = None
+ args = sys.argv[2:]
+ else:
+ cmd = help
+ args = []
+ if not cmd:
+ cmd = help
+ try:
+ cmd(conn, *args)
+ except TypeError, e:
+ print e
+ help(conn, cmd.__name__)
diff --git a/bin/s3put b/bin/s3put
new file mode 100755
index 0000000..b5467d9
--- /dev/null
+++ b/bin/s3put
@@ -0,0 +1,196 @@
+#!/usr/bin/env python
+# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import getopt, sys, os
+import boto
+from boto.exception import S3ResponseError
+
+usage_string = """
+SYNOPSIS
+ s3put [-a/--access_key <access_key>] [-s/--secret_key <secret_key>]
+ -b/--bucket <bucket_name> [-c/--callback <num_cb>]
+ [-d/--debug <debug_level>] [-i/--ignore <ignore_dirs>]
+ [-n/--no_op] [-p/--prefix <prefix>] [-q/--quiet]
+ [-g/--grant grant] [-w/--no_overwrite] path
+
+ Where
+ access_key - Your AWS Access Key ID. If not supplied, boto will
+ use the value of the environment variable
+ AWS_ACCESS_KEY_ID
+ secret_key - Your AWS Secret Access Key. If not supplied, boto
+ will use the value of the environment variable
+ AWS_SECRET_ACCESS_KEY
+ bucket_name - The name of the S3 bucket the file(s) should be
+ copied to.
+ path - A path to a directory or file that represents the items
+ to be uploaded. If the path points to an individual file,
+ that file will be uploaded to the specified bucket. If the
+ path points to a directory, s3_it will recursively traverse
+ the directory and upload all files to the specified bucket.
+ debug_level - 0 means no debug output (default), 1 means normal
+ debug output from boto, and 2 means boto debug output
+ plus request/response output from httplib
+ ignore_dirs - a comma-separated list of directory names that will
+ be ignored and not uploaded to S3.
+ num_cb - The number of progress callbacks to display. The default
+ is zero which means no callbacks. If you supplied a value
+ of "-c 10" for example, the progress callback would be
+ called 10 times for each file transferred.
+ prefix - A file path prefix that will be stripped from the full
+ path of the file when determining the key name in S3.
+ For example, if the full path of a file is:
+ /home/foo/bar/fie.baz
+ and the prefix is specified as "-p /home/foo/" the
+ resulting key name in S3 will be:
+ /bar/fie.baz
+ The prefix must end in a trailing separator and if it
+ does not then one will be added.
+ grant - A canned ACL policy that will be granted on each file
+ transferred to S3. The value of provided must be one
+ of the "canned" ACL policies supported by S3:
+ private|public-read|public-read-write|authenticated-read
+ no_overwrite - No files will be overwritten on S3, if the file/key
+ exists on s3 it will be kept. This is useful for
+ resuming interrupted transfers. Note this is not a
+ sync, even if the file has been updated locally if
+ the key exists on s3 the file on s3 will not be
+ updated.
+
+ If the -n option is provided, no files will be transferred to S3 but
+ informational messages will be printed about what would happen.
+"""
+def usage():
+ print usage_string
+ sys.exit()
+
+def submit_cb(bytes_so_far, total_bytes):
+ print '%d bytes transferred / %d bytes total' % (bytes_so_far, total_bytes)
+
+def get_key_name(fullpath, prefix):
+ key_name = fullpath[len(prefix):]
+ l = key_name.split(os.sep)
+ return '/'.join(l)
+
+def main():
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], 'a:b:c::d:g:hi:np:qs:vw',
+ ['access_key', 'bucket', 'callback', 'debug', 'help', 'grant',
+ 'ignore', 'no_op', 'prefix', 'quiet', 'secret_key', 'no_overwrite'])
+ except:
+ usage()
+ ignore_dirs = []
+ aws_access_key_id = None
+ aws_secret_access_key = None
+ bucket_name = ''
+ total = 0
+ debug = 0
+ cb = None
+ num_cb = 0
+ quiet = False
+ no_op = False
+ prefix = '/'
+ grant = None
+ no_overwrite = False
+ for o, a in opts:
+ if o in ('-h', '--help'):
+ usage()
+ sys.exit()
+ if o in ('-a', '--access_key'):
+ aws_access_key_id = a
+ if o in ('-b', '--bucket'):
+ bucket_name = a
+ if o in ('-c', '--callback'):
+ num_cb = int(a)
+ cb = submit_cb
+ if o in ('-d', '--debug'):
+ debug = int(a)
+ if o in ('-g', '--grant'):
+ grant = a
+ if o in ('-i', '--ignore'):
+ ignore_dirs = a.split(',')
+ if o in ('-n', '--no_op'):
+ no_op = True
+ if o in ('w', '--no_overwrite'):
+ no_overwrite = True
+ if o in ('-p', '--prefix'):
+ prefix = a
+ if prefix[-1] != os.sep:
+ prefix = prefix + os.sep
+ if o in ('-q', '--quiet'):
+ quiet = True
+ if o in ('-s', '--secret_key'):
+ aws_secret_access_key = a
+ if len(args) != 1:
+ print usage()
+ path = os.path.expanduser(args[0])
+ path = os.path.expandvars(path)
+ path = os.path.abspath(path)
+ if bucket_name:
+ c = boto.connect_s3(aws_access_key_id=aws_access_key_id,
+ aws_secret_access_key=aws_secret_access_key)
+ c.debug = debug
+ b = c.get_bucket(bucket_name)
+ if os.path.isdir(path):
+ if no_overwrite:
+ if not quiet:
+ print 'Getting list of existing keys to check against'
+ keys = []
+ for key in b.list():
+ keys.append(key.name)
+ for root, dirs, files in os.walk(path):
+ for ignore in ignore_dirs:
+ if ignore in dirs:
+ dirs.remove(ignore)
+ for file in files:
+ fullpath = os.path.join(root, file)
+ key_name = get_key_name(fullpath, prefix)
+ copy_file = True
+ if no_overwrite:
+ if key_name in keys:
+ copy_file = False
+ if not quiet:
+ print 'Skipping %s as it exists in s3' % file
+ if copy_file:
+ if not quiet:
+ print 'Copying %s to %s/%s' % (file, bucket_name, key_name)
+ if not no_op:
+ k = b.new_key(key_name)
+ k.set_contents_from_filename(fullpath, cb=cb,
+ num_cb=num_cb, policy=grant)
+ total += 1
+ elif os.path.isfile(path):
+ key_name = os.path.split(path)[1]
+ copy_file = True
+ if no_overwrite:
+ if b.get_key(key_name):
+ copy_file = False
+ if not quiet:
+ print 'Skipping %s as it exists in s3' % path
+ if copy_file:
+ k = b.new_key(key_name)
+ k.set_contents_from_filename(path, cb=cb, num_cb=num_cb, policy=grant)
+ else:
+ print usage()
+
+if __name__ == "__main__":
+ main()
+
diff --git a/bin/sdbadmin b/bin/sdbadmin
new file mode 100755
index 0000000..e8ff9b5
--- /dev/null
+++ b/bin/sdbadmin
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+# Copyright (c) 2009 Chris Moyer http://kopertop.blogspot.com/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+
+#
+# Tools to dump and recover an SDB domain
+#
+VERSION = "%prog version 1.0"
+import boto
+import time
+from boto import sdb
+
+def choice_input(options, default=None, title=None):
+ """
+ Choice input
+ """
+ if title == None:
+ title = "Please choose"
+ print title
+ objects = []
+ for n, obj in enumerate(options):
+ print "%s: %s" % (n, obj)
+ objects.append(obj)
+ choice = int(raw_input(">>> "))
+ try:
+ choice = objects[choice]
+ except:
+ choice = default
+ return choice
+
+def confirm(message="Are you sure?"):
+ choice = raw_input("%s [yN] " % message)
+ return choice and len(choice) > 0 and choice[0].lower() == "y"
+
+
+def dump_db(domain, file_name):
+ """
+ Dump SDB domain to file
+ """
+ doc = domain.to_xml(open(file_name, "w"))
+
+def empty_db(domain):
+ """
+ Remove all entries from domain
+ """
+ for item in domain:
+ item.delete()
+
+def load_db(domain, file):
+ """
+ Load a domain from a file, this doesn't overwrite any existing
+ data in the file so if you want to do a full recovery and restore
+ you need to call empty_db before calling this
+
+ :param domain: The SDB Domain object to load to
+ :param file: The File to load the DB from
+ """
+ domain.from_xml(file)
+
+def create_db(domain_name, region_name):
+ """Create a new DB
+
+ :param domain: Name of the domain to create
+ :type domain: str
+ """
+ sdb = boto.sdb.connect_to_region(region_name)
+ return sdb.create_domain(domain_name)
+
+if __name__ == "__main__":
+ from optparse import OptionParser
+ parser = OptionParser(version=VERSION, usage="Usage: %prog [--dump|--load|--empty|--list|-l] [options]")
+
+ # Commands
+ parser.add_option("--dump", help="Dump domain to file", dest="dump", default=False, action="store_true")
+ parser.add_option("--load", help="Load domain contents from file", dest="load", default=False, action="store_true")
+ parser.add_option("--empty", help="Empty all contents of domain", dest="empty", default=False, action="store_true")
+ parser.add_option("-l", "--list", help="List All domains", dest="list", default=False, action="store_true")
+ parser.add_option("-c", "--create", help="Create domain", dest="create", default=False, action="store_true")
+
+ parser.add_option("-a", "--all-domains", help="Operate on all domains", action="store_true", default=False, dest="all_domains")
+ parser.add_option("-d", "--domain", help="Do functions on domain (may be more then one)", action="append", dest="domains")
+ parser.add_option("-f", "--file", help="Input/Output file we're operating on", dest="file_name")
+ parser.add_option("-r", "--region", help="Region (e.g. us-east-1[default] or eu-west-1)", default="us-east-1", dest="region_name")
+ (options, args) = parser.parse_args()
+
+ if options.create:
+ for domain_name in options.domains:
+ create_db(domain_name, options.region_name)
+ exit()
+
+ sdb = boto.sdb.connect_to_region(options.region_name)
+ if options.list:
+ for db in sdb.get_all_domains():
+ print db
+ exit()
+
+ if not options.dump and not options.load and not options.empty:
+ parser.print_help()
+ exit()
+
+
+
+
+ #
+ # Setup
+ #
+ if options.domains:
+ domains = []
+ for domain_name in options.domains:
+ domains.append(sdb.get_domain(domain_name))
+ elif options.all_domains:
+ domains = sdb.get_all_domains()
+ else:
+ domains = [choice_input(options=sdb.get_all_domains(), title="No domain specified, please choose one")]
+
+
+ #
+ # Execute the commands
+ #
+ stime = time.time()
+ if options.empty:
+ if confirm("WARNING!!! Are you sure you want to empty the following domains?: %s" % domains):
+ stime = time.time()
+ for domain in domains:
+ print "--------> Emptying %s <--------" % domain.name
+ empty_db(domain)
+ else:
+ print "Canceling operations"
+ exit()
+
+ if options.dump:
+ for domain in domains:
+ print "--------> Dumping %s <---------" % domain.name
+ if options.file_name:
+ file_name = options.file_name
+ else:
+ file_name = "%s.db" % domain.name
+ dump_db(domain, file_name)
+
+ if options.load:
+ for domain in domains:
+ print "---------> Loading %s <----------" % domain.name
+ if options.file_name:
+ file_name = options.file_name
+ else:
+ file_name = "%s.db" % domain.name
+ load_db(domain, open(file_name, "rb"))
+
+
+ total_time = round(time.time() - stime, 2)
+ print "--------> Finished in %s <--------" % total_time
diff --git a/bin/taskadmin b/bin/taskadmin
new file mode 100755
index 0000000..5d5302a
--- /dev/null
+++ b/bin/taskadmin
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+# Copyright (c) 2009 Chris Moyer http://coredumped.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+
+#
+# Task/Job Administration utility
+#
+VERSION="0.1"
+__version__ = VERSION
+usage = """%prog [options] [command]
+Commands:
+ list|ls List all Tasks in SDB
+ delete <id> Delete Task with id <id>
+ get <name> Get Task <name>
+ create|mk <name> <hour> <command> Create a new Task <name> with command <command> running every <hour>
+"""
+
+def list():
+ """List all Tasks in SDB"""
+ from boto.manage.task import Task
+ print "%-8s %-40s %s" % ("Hour", "Name", "Command")
+ print "-"*100
+ for t in Task.all():
+ print "%-8s %-40s %s" % (t.hour, t.name, t.command)
+
+def get(name):
+ """Get a task
+ :param name: The name of the task to fetch
+ :type name: str
+ """
+ from boto.manage.task import Task
+ q = Task.find()
+ q.filter("name like", "%s%%" % name)
+ for t in q:
+ print "="*80
+ print "| ", t.id
+ print "|%s" % ("-"*79)
+ print "| Name: ", t.name
+ print "| Hour: ", t.hour
+ print "| Command: ", t.command
+ if t.last_executed:
+ print "| Last Run: ", t.last_executed.ctime()
+ print "| Last Status: ", t.last_status
+ print "| Last Run Log: ", t.last_output
+ print "="*80
+
+def delete(id):
+ from boto.manage.task import Task
+ t = Task.get_by_id(id)
+ print "Deleting task: %s" % t.name
+ if raw_input("Are you sure? ").lower() in ["y", "yes"]:
+ t.delete()
+ print "Deleted"
+ else:
+ print "Canceled"
+
+def create(name, hour, command):
+ """Create a new task
+ :param name: Name of the task to create
+ :type name: str
+ :param hour: What hour to run it at, "*" for every hour
+ :type hour: str
+ :param command: The command to execute
+ :type command: str
+ """
+ from boto.manage.task import Task
+ t = Task()
+ t.name = name
+ t.hour = hour
+ t.command = command
+ t.put()
+ print "Created task: %s" % t.id
+
+if __name__ == "__main__":
+ try:
+ import readline
+ except ImportError:
+ pass
+ import boto
+ import sys
+ from optparse import OptionParser
+ from boto.mashups.iobject import IObject
+ parser = OptionParser(version=__version__, usage=usage)
+
+ (options, args) = parser.parse_args()
+
+ if len(args) < 1:
+ parser.print_help()
+ sys.exit(1)
+
+ command = args[0].lower()
+ if command in ("ls", "list"):
+ list()
+ elif command == "get":
+ get(args[1])
+ elif command == "create":
+ create(args[1], args[2], args[3])
+ elif command == "delete":
+ delete(args[1])
diff --git a/boto/__init__.py b/boto/__init__.py
new file mode 100644
index 0000000..d11b578
--- /dev/null
+++ b/boto/__init__.py
@@ -0,0 +1,542 @@
+# Copyright (c) 2006-2011 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import boto
+from boto.pyami.config import Config, BotoConfigLocations
+from boto.storage_uri import BucketStorageUri, FileStorageUri
+import boto.plugin
+import os, re, sys
+import logging
+import logging.config
+from boto.exception import InvalidUriError
+
+__version__ = '2.0b4'
+Version = __version__ # for backware compatibility
+
+UserAgent = 'Boto/%s (%s)' % (__version__, sys.platform)
+config = Config()
+
+def init_logging():
+ for file in BotoConfigLocations:
+ try:
+ logging.config.fileConfig(os.path.expanduser(file))
+ except:
+ pass
+
+class NullHandler(logging.Handler):
+ def emit(self, record):
+ pass
+
+log = logging.getLogger('boto')
+log.addHandler(NullHandler())
+init_logging()
+
+# convenience function to set logging to a particular file
+def set_file_logger(name, filepath, level=logging.INFO, format_string=None):
+ global log
+ if not format_string:
+ format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
+ logger = logging.getLogger(name)
+ logger.setLevel(level)
+ fh = logging.FileHandler(filepath)
+ fh.setLevel(level)
+ formatter = logging.Formatter(format_string)
+ fh.setFormatter(formatter)
+ logger.addHandler(fh)
+ log = logger
+
+def set_stream_logger(name, level=logging.DEBUG, format_string=None):
+ global log
+ if not format_string:
+ format_string = "%(asctime)s %(name)s [%(levelname)s]:%(message)s"
+ logger = logging.getLogger(name)
+ logger.setLevel(level)
+ fh = logging.StreamHandler()
+ fh.setLevel(level)
+ formatter = logging.Formatter(format_string)
+ fh.setFormatter(formatter)
+ logger.addHandler(fh)
+ log = logger
+
+def connect_sqs(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.sqs.connection.SQSConnection`
+ :return: A connection to Amazon's SQS
+ """
+ from boto.sqs.connection import SQSConnection
+ return SQSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_s3(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.s3.connection.S3Connection`
+ :return: A connection to Amazon's S3
+ """
+ from boto.s3.connection import S3Connection
+ return S3Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_gs(gs_access_key_id=None, gs_secret_access_key=None, **kwargs):
+ """
+ @type gs_access_key_id: string
+ @param gs_access_key_id: Your Google Storage Access Key ID
+
+ @type gs_secret_access_key: string
+ @param gs_secret_access_key: Your Google Storage Secret Access Key
+
+ @rtype: L{GSConnection<boto.gs.connection.GSConnection>}
+ @return: A connection to Google's Storage service
+ """
+ from boto.gs.connection import GSConnection
+ return GSConnection(gs_access_key_id, gs_secret_access_key, **kwargs)
+
+def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.ec2.connection.EC2Connection`
+ :return: A connection to Amazon's EC2
+ """
+ from boto.ec2.connection import EC2Connection
+ return EC2Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_elb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.ec2.elb.ELBConnection`
+ :return: A connection to Amazon's Load Balancing Service
+ """
+ from boto.ec2.elb import ELBConnection
+ return ELBConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_autoscale(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.ec2.autoscale.AutoScaleConnection`
+ :return: A connection to Amazon's Auto Scaling Service
+ """
+ from boto.ec2.autoscale import AutoScaleConnection
+ return AutoScaleConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_cloudwatch(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.ec2.cloudwatch.CloudWatchConnection`
+ :return: A connection to Amazon's EC2 Monitoring service
+ """
+ from boto.ec2.cloudwatch import CloudWatchConnection
+ return CloudWatchConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_sdb(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.sdb.connection.SDBConnection`
+ :return: A connection to Amazon's SDB
+ """
+ from boto.sdb.connection import SDBConnection
+ return SDBConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_fps(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.fps.connection.FPSConnection`
+ :return: A connection to FPS
+ """
+ from boto.fps.connection import FPSConnection
+ return FPSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_mturk(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.mturk.connection.MTurkConnection`
+ :return: A connection to MTurk
+ """
+ from boto.mturk.connection import MTurkConnection
+ return MTurkConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_cloudfront(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.fps.connection.FPSConnection`
+ :return: A connection to FPS
+ """
+ from boto.cloudfront import CloudFrontConnection
+ return CloudFrontConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_vpc(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.vpc.VPCConnection`
+ :return: A connection to VPC
+ """
+ from boto.vpc import VPCConnection
+ return VPCConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_rds(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.rds.RDSConnection`
+ :return: A connection to RDS
+ """
+ from boto.rds import RDSConnection
+ return RDSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_emr(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.emr.EmrConnection`
+ :return: A connection to Elastic mapreduce
+ """
+ from boto.emr import EmrConnection
+ return EmrConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_sns(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.sns.SNSConnection`
+ :return: A connection to Amazon's SNS
+ """
+ from boto.sns import SNSConnection
+ return SNSConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+
+def connect_iam(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.iam.IAMConnection`
+ :return: A connection to Amazon's IAM
+ """
+ from boto.iam import IAMConnection
+ return IAMConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_route53(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.dns.Route53Connection`
+ :return: A connection to Amazon's Route53 DNS Service
+ """
+ from boto.route53 import Route53Connection
+ return Route53Connection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_euca(host, aws_access_key_id=None, aws_secret_access_key=None,
+ port=8773, path='/services/Eucalyptus', is_secure=False,
+ **kwargs):
+ """
+ Connect to a Eucalyptus service.
+
+ :type host: string
+ :param host: the host name or ip address of the Eucalyptus server
+
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.ec2.connection.EC2Connection`
+ :return: A connection to Eucalyptus server
+ """
+ from boto.ec2 import EC2Connection
+ from boto.ec2.regioninfo import RegionInfo
+
+ reg = RegionInfo(name='eucalyptus', endpoint=host)
+ return EC2Connection(aws_access_key_id, aws_secret_access_key,
+ region=reg, port=port, path=path,
+ is_secure=is_secure, **kwargs)
+
+def connect_walrus(host, aws_access_key_id=None, aws_secret_access_key=None,
+ port=8773, path='/services/Walrus', is_secure=False,
+ **kwargs):
+ """
+ Connect to a Walrus service.
+
+ :type host: string
+ :param host: the host name or ip address of the Walrus server
+
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.s3.connection.S3Connection`
+ :return: A connection to Walrus
+ """
+ from boto.s3.connection import S3Connection
+ from boto.s3.connection import OrdinaryCallingFormat
+
+ return S3Connection(aws_access_key_id, aws_secret_access_key,
+ host=host, port=port, path=path,
+ calling_format=OrdinaryCallingFormat(),
+ is_secure=is_secure, **kwargs)
+
+def connect_ses(aws_access_key_id=None, aws_secret_access_key=None, **kwargs):
+ """
+ :type aws_access_key_id: string
+ :param aws_access_key_id: Your AWS Access Key ID
+
+ :type aws_secret_access_key: string
+ :param aws_secret_access_key: Your AWS Secret Access Key
+
+ :rtype: :class:`boto.ses.SESConnection`
+ :return: A connection to Amazon's SES
+ """
+ from boto.ses import SESConnection
+ return SESConnection(aws_access_key_id, aws_secret_access_key, **kwargs)
+
+def connect_ia(ia_access_key_id=None, ia_secret_access_key=None,
+ is_secure=False, **kwargs):
+ """
+ Connect to the Internet Archive via their S3-like API.
+
+ :type ia_access_key_id: string
+ :param ia_access_key_id: Your IA Access Key ID. This will also look in your
+ boto config file for an entry in the Credentials
+ section called "ia_access_key_id"
+
+ :type ia_secret_access_key: string
+ :param ia_secret_access_key: Your IA Secret Access Key. This will also look in your
+ boto config file for an entry in the Credentials
+ section called "ia_secret_access_key"
+
+ :rtype: :class:`boto.s3.connection.S3Connection`
+ :return: A connection to the Internet Archive
+ """
+ from boto.s3.connection import S3Connection
+ from boto.s3.connection import OrdinaryCallingFormat
+
+ access_key = config.get('Credentials', 'ia_access_key_id',
+ ia_access_key_id)
+ secret_key = config.get('Credentials', 'ia_secret_access_key',
+ ia_secret_access_key)
+
+ return S3Connection(access_key, secret_key,
+ host='s3.us.archive.org',
+ calling_format=OrdinaryCallingFormat(),
+ is_secure=is_secure, **kwargs)
+
+def check_extensions(module_name, module_path):
+ """
+ This function checks for extensions to boto modules. It should be called in the
+ __init__.py file of all boto modules. See:
+ http://code.google.com/p/boto/wiki/ExtendModules
+
+ for details.
+ """
+ option_name = '%s_extend' % module_name
+ version = config.get('Boto', option_name, None)
+ if version:
+ dirname = module_path[0]
+ path = os.path.join(dirname, version)
+ if os.path.isdir(path):
+ log.info('extending module %s with: %s' % (module_name, path))
+ module_path.insert(0, path)
+
+_aws_cache = {}
+
+def _get_aws_conn(service):
+ global _aws_cache
+ conn = _aws_cache.get(service)
+ if not conn:
+ meth = getattr(sys.modules[__name__], 'connect_' + service)
+ conn = meth()
+ _aws_cache[service] = conn
+ return conn
+
+def lookup(service, name):
+ global _aws_cache
+ conn = _get_aws_conn(service)
+ obj = _aws_cache.get('.'.join((service, name)), None)
+ if not obj:
+ obj = conn.lookup(name)
+ _aws_cache['.'.join((service, name))] = obj
+ return obj
+
+def storage_uri(uri_str, default_scheme='file', debug=0, validate=True,
+ bucket_storage_uri_class=BucketStorageUri):
+ """
+ Instantiate a StorageUri from a URI string.
+
+ :type uri_str: string
+ :param uri_str: URI naming bucket + optional object.
+ :type default_scheme: string
+ :param default_scheme: default scheme for scheme-less URIs.
+ :type debug: int
+ :param debug: debug level to pass in to boto connection (range 0..2).
+ :type validate: bool
+ :param validate: whether to check for bucket name validity.
+ :type bucket_storage_uri_class: BucketStorageUri interface.
+ :param bucket_storage_uri_class: Allows mocking for unit tests.
+
+ We allow validate to be disabled to allow caller
+ to implement bucket-level wildcarding (outside the boto library;
+ see gsutil).
+
+ :rtype: :class:`boto.StorageUri` subclass
+ :return: StorageUri subclass for given URI.
+
+ ``uri_str`` must be one of the following formats:
+
+ * gs://bucket/name
+ * s3://bucket/name
+ * gs://bucket
+ * s3://bucket
+ * filename
+
+ The last example uses the default scheme ('file', unless overridden)
+ """
+
+ # Manually parse URI components instead of using urlparse.urlparse because
+ # what we're calling URIs don't really fit the standard syntax for URIs
+ # (the latter includes an optional host/net location part).
+ end_scheme_idx = uri_str.find('://')
+ if end_scheme_idx == -1:
+ # Check for common error: user specifies gs:bucket instead
+ # of gs://bucket. Some URI parsers allow this, but it can cause
+ # confusion for callers, so we don't.
+ if uri_str.find(':') != -1:
+ raise InvalidUriError('"%s" contains ":" instead of "://"' % uri_str)
+ scheme = default_scheme.lower()
+ path = uri_str
+ else:
+ scheme = uri_str[0:end_scheme_idx].lower()
+ path = uri_str[end_scheme_idx + 3:]
+
+ if scheme not in ['file', 's3', 'gs']:
+ raise InvalidUriError('Unrecognized scheme "%s"' % scheme)
+ if scheme == 'file':
+ # For file URIs we have no bucket name, and use the complete path
+ # (minus 'file://') as the object name.
+ return FileStorageUri(path, debug)
+ else:
+ path_parts = path.split('/', 1)
+ bucket_name = path_parts[0]
+ if (validate and bucket_name and
+ # Disallow buckets violating charset or not [3..255] chars total.
+ (not re.match('^[a-z0-9][a-z0-9\._-]{1,253}[a-z0-9]$', bucket_name)
+ # Disallow buckets with individual DNS labels longer than 63.
+ or re.search('[-_a-z0-9]{64}', bucket_name))):
+ raise InvalidUriError('Invalid bucket name in URI "%s"' % uri_str)
+ # If enabled, ensure the bucket name is valid, to avoid possibly
+ # confusing other parts of the code. (For example if we didn't
+ # catch bucket names containing ':', when a user tried to connect to
+ # the server with that name they might get a confusing error about
+ # non-integer port numbers.)
+ object_name = ''
+ if len(path_parts) > 1:
+ object_name = path_parts[1]
+ return bucket_storage_uri_class(scheme, bucket_name, object_name, debug)
+
+def storage_uri_for_key(key):
+ """Returns a StorageUri for the given key.
+
+ :type key: :class:`boto.s3.key.Key` or subclass
+ :param key: URI naming bucket + optional object.
+ """
+ if not isinstance(key, boto.s3.key.Key):
+ raise InvalidUriError('Requested key (%s) is not a subclass of '
+ 'boto.s3.key.Key' % str(type(key)))
+ prov_name = key.bucket.connection.provider.get_provider_name()
+ uri_str = '%s://%s/%s' % (prov_name, key.bucket.name, key.name)
+ return storage_uri(uri_str)
+
+boto.plugin.load_plugins(config)
diff --git a/boto/auth.py b/boto/auth.py
new file mode 100644
index 0000000..6c6c1f2
--- /dev/null
+++ b/boto/auth.py
@@ -0,0 +1,319 @@
+# Copyright 2010 Google Inc.
+# Copyright (c) 2011 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2011, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+"""
+Handles authentication required to AWS and GS
+"""
+
+import base64
+import boto
+import boto.auth_handler
+import boto.exception
+import boto.plugin
+import boto.utils
+import hmac
+import sys
+import time
+import urllib
+
+from boto.auth_handler import AuthHandler
+from boto.exception import BotoClientError
+#
+# the following is necessary because of the incompatibilities
+# between Python 2.4, 2.5, and 2.6 as well as the fact that some
+# people running 2.4 have installed hashlib as a separate module
+# this fix was provided by boto user mccormix.
+# see: http://code.google.com/p/boto/issues/detail?id=172
+# for more details.
+#
+try:
+ from hashlib import sha1 as sha
+ from hashlib import sha256 as sha256
+
+ if sys.version[:3] == "2.4":
+ # we are using an hmac that expects a .new() method.
+ class Faker:
+ def __init__(self, which):
+ self.which = which
+ self.digest_size = self.which().digest_size
+
+ def new(self, *args, **kwargs):
+ return self.which(*args, **kwargs)
+
+ sha = Faker(sha)
+ sha256 = Faker(sha256)
+
+except ImportError:
+ import sha
+ sha256 = None
+
+class HmacKeys(object):
+ """Key based Auth handler helper."""
+
+ def __init__(self, host, config, provider):
+ if provider.access_key is None or provider.secret_key is None:
+ raise boto.auth_handler.NotReadyToAuthenticate()
+ self._provider = provider
+ self._hmac = hmac.new(self._provider.secret_key, digestmod=sha)
+ if sha256:
+ self._hmac_256 = hmac.new(self._provider.secret_key, digestmod=sha256)
+ else:
+ self._hmac_256 = None
+
+ def algorithm(self):
+ if self._hmac_256:
+ return 'HmacSHA256'
+ else:
+ return 'HmacSHA1'
+
+ def sign_string(self, string_to_sign):
+ boto.log.debug('Canonical: %s' % string_to_sign)
+ if self._hmac_256:
+ hmac = self._hmac_256.copy()
+ else:
+ hmac = self._hmac.copy()
+ hmac.update(string_to_sign)
+ return base64.encodestring(hmac.digest()).strip()
+
+class HmacAuthV1Handler(AuthHandler, HmacKeys):
+ """ Implements the HMAC request signing used by S3 and GS."""
+
+ capability = ['hmac-v1', 's3']
+
+ def __init__(self, host, config, provider):
+ AuthHandler.__init__(self, host, config, provider)
+ HmacKeys.__init__(self, host, config, provider)
+ self._hmac_256 = None
+
+ def add_auth(self, http_request, **kwargs):
+ headers = http_request.headers
+ method = http_request.method
+ auth_path = http_request.auth_path
+ if not headers.has_key('Date'):
+ headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
+ time.gmtime())
+
+ c_string = boto.utils.canonical_string(method, auth_path, headers,
+ None, self._provider)
+ b64_hmac = self.sign_string(c_string)
+ auth_hdr = self._provider.auth_header
+ headers['Authorization'] = ("%s %s:%s" %
+ (auth_hdr,
+ self._provider.access_key, b64_hmac))
+
+class HmacAuthV2Handler(AuthHandler, HmacKeys):
+ """
+ Implements the simplified HMAC authorization used by CloudFront.
+ """
+ capability = ['hmac-v2', 'cloudfront']
+
+ def __init__(self, host, config, provider):
+ AuthHandler.__init__(self, host, config, provider)
+ HmacKeys.__init__(self, host, config, provider)
+ self._hmac_256 = None
+
+ def add_auth(self, http_request, **kwargs):
+ headers = http_request.headers
+ if not headers.has_key('Date'):
+ headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
+ time.gmtime())
+
+ b64_hmac = self.sign_string(headers['Date'])
+ auth_hdr = self._provider.auth_header
+ headers['Authorization'] = ("%s %s:%s" %
+ (auth_hdr,
+ self._provider.access_key, b64_hmac))
+
+class HmacAuthV3Handler(AuthHandler, HmacKeys):
+ """Implements the new Version 3 HMAC authorization used by Route53."""
+
+ capability = ['hmac-v3', 'route53', 'ses']
+
+ def __init__(self, host, config, provider):
+ AuthHandler.__init__(self, host, config, provider)
+ HmacKeys.__init__(self, host, config, provider)
+
+ def add_auth(self, http_request, **kwargs):
+ headers = http_request.headers
+ if not headers.has_key('Date'):
+ headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT",
+ time.gmtime())
+
+ b64_hmac = self.sign_string(headers['Date'])
+ s = "AWS3-HTTPS AWSAccessKeyId=%s," % self._provider.access_key
+ s += "Algorithm=%s,Signature=%s" % (self.algorithm(), b64_hmac)
+ headers['X-Amzn-Authorization'] = s
+
+class QuerySignatureHelper(HmacKeys):
+ """Helper for Query signature based Auth handler.
+
+ Concrete sub class need to implement _calc_sigature method.
+ """
+
+ def add_auth(self, http_request, **kwargs):
+ headers = http_request.headers
+ params = http_request.params
+ params['AWSAccessKeyId'] = self._provider.access_key
+ params['SignatureVersion'] = self.SignatureVersion
+ params['Timestamp'] = boto.utils.get_ts()
+ qs, signature = self._calc_signature(
+ http_request.params, http_request.method,
+ http_request.path, http_request.host)
+ boto.log.debug('query_string: %s Signature: %s' % (qs, signature))
+ if http_request.method == 'POST':
+ headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
+ http_request.body = qs + '&Signature=' + urllib.quote(signature)
+ else:
+ http_request.body = ''
+ http_request.path = (http_request.path + '?' + qs + '&Signature=' + urllib.quote(signature))
+ # Now that query params are part of the path, clear the 'params' field
+ # in request.
+ http_request.params = {}
+
+class QuerySignatureV0AuthHandler(QuerySignatureHelper, AuthHandler):
+ """Class SQS query signature based Auth handler."""
+
+ SignatureVersion = 0
+ capability = ['sign-v0']
+
+ def _calc_signature(self, params, *args):
+ boto.log.debug('using _calc_signature_0')
+ hmac = self._hmac.copy()
+ s = params['Action'] + params['Timestamp']
+ hmac.update(s)
+ keys = params.keys()
+ keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower()))
+ pairs = []
+ for key in keys:
+ val = bot.utils.get_utf8_value(params[key])
+ pairs.append(key + '=' + urllib.quote(val))
+ qs = '&'.join(pairs)
+ return (qs, base64.b64encode(hmac.digest()))
+
+class QuerySignatureV1AuthHandler(QuerySignatureHelper, AuthHandler):
+ """
+ Provides Query Signature V1 Authentication.
+ """
+
+ SignatureVersion = 1
+ capability = ['sign-v1', 'mturk']
+
+ def _calc_signature(self, params, *args):
+ boto.log.debug('using _calc_signature_1')
+ hmac = self._hmac.copy()
+ keys = params.keys()
+ keys.sort(cmp = lambda x, y: cmp(x.lower(), y.lower()))
+ pairs = []
+ for key in keys:
+ hmac.update(key)
+ val = boto.utils.get_utf8_value(params[key])
+ hmac.update(val)
+ pairs.append(key + '=' + urllib.quote(val))
+ qs = '&'.join(pairs)
+ return (qs, base64.b64encode(hmac.digest()))
+
+class QuerySignatureV2AuthHandler(QuerySignatureHelper, AuthHandler):
+ """Provides Query Signature V2 Authentication."""
+
+ SignatureVersion = 2
+ capability = ['sign-v2', 'ec2', 'ec2', 'emr', 'fps', 'ecs',
+ 'sdb', 'iam', 'rds', 'sns', 'sqs']
+
+ def _calc_signature(self, params, verb, path, server_name):
+ boto.log.debug('using _calc_signature_2')
+ string_to_sign = '%s\n%s\n%s\n' % (verb, server_name.lower(), path)
+ if self._hmac_256:
+ hmac = self._hmac_256.copy()
+ params['SignatureMethod'] = 'HmacSHA256'
+ else:
+ hmac = self._hmac.copy()
+ params['SignatureMethod'] = 'HmacSHA1'
+ keys = params.keys()
+ keys.sort()
+ pairs = []
+ for key in keys:
+ val = boto.utils.get_utf8_value(params[key])
+ pairs.append(urllib.quote(key, safe='') + '=' +
+ urllib.quote(val, safe='-_~'))
+ qs = '&'.join(pairs)
+ boto.log.debug('query string: %s' % qs)
+ string_to_sign += qs
+ boto.log.debug('string_to_sign: %s' % string_to_sign)
+ hmac.update(string_to_sign)
+ b64 = base64.b64encode(hmac.digest())
+ boto.log.debug('len(b64)=%d' % len(b64))
+ boto.log.debug('base64 encoded digest: %s' % b64)
+ return (qs, b64)
+
+
+def get_auth_handler(host, config, provider, requested_capability=None):
+ """Finds an AuthHandler that is ready to authenticate.
+
+ Lists through all the registered AuthHandlers to find one that is willing
+ to handle for the requested capabilities, config and provider.
+
+ :type host: string
+ :param host: The name of the host
+
+ :type config:
+ :param config:
+
+ :type provider:
+ :param provider:
+
+ Returns:
+ An implementation of AuthHandler.
+
+ Raises:
+ boto.exception.NoAuthHandlerFound:
+ boto.exception.TooManyAuthHandlerReadyToAuthenticate:
+ """
+ ready_handlers = []
+ auth_handlers = boto.plugin.get_plugin(AuthHandler, requested_capability)
+ total_handlers = len(auth_handlers)
+ for handler in auth_handlers:
+ try:
+ ready_handlers.append(handler(host, config, provider))
+ except boto.auth_handler.NotReadyToAuthenticate:
+ pass
+
+ if not ready_handlers:
+ checked_handlers = auth_handlers
+ names = [handler.__name__ for handler in checked_handlers]
+ raise boto.exception.NoAuthHandlerFound(
+ 'No handler was ready to authenticate. %d handlers were checked.'
+ ' %s ' % (len(names), str(names)))
+
+ if len(ready_handlers) > 1:
+ # NOTE: Even though it would be nice to accept more than one handler
+ # by using one of the many ready handlers, we are never sure that each
+ # of them are referring to the same storage account. Since we cannot
+ # easily guarantee that, it is always safe to fail, rather than operate
+ # on the wrong account.
+ names = [handler.__class__.__name__ for handler in ready_handlers]
+ raise boto.exception.TooManyAuthHandlerReadyToAuthenticate(
+ '%d AuthHandlers ready to authenticate, '
+ 'only 1 expected: %s' % (len(names), str(names)))
+
+ return ready_handlers[0]
diff --git a/boto/auth_handler.py b/boto/auth_handler.py
new file mode 100644
index 0000000..ab2d317
--- /dev/null
+++ b/boto/auth_handler.py
@@ -0,0 +1,58 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Defines an interface which all Auth handlers need to implement.
+"""
+
+from plugin import Plugin
+
+class NotReadyToAuthenticate(Exception):
+ pass
+
+class AuthHandler(Plugin):
+
+ capability = []
+
+ def __init__(self, host, config, provider):
+ """Constructs the handlers.
+ :type host: string
+ :param host: The host to which the request is being sent.
+
+ :type config: boto.pyami.Config
+ :param config: Boto configuration.
+
+ :type provider: boto.provider.Provider
+ :param provider: Provider details.
+
+ Raises:
+ NotReadyToAuthenticate: if this handler is not willing to
+ authenticate for the given provider and config.
+ """
+ pass
+
+ def add_auth(self, http_request):
+ """Invoked to add authentication details to request.
+
+ :type http_request: boto.connection.HTTPRequest
+ :param http_request: HTTP request that needs to be authenticated.
+ """
+ pass
diff --git a/boto/cloudfront/__init__.py b/boto/cloudfront/__init__.py
new file mode 100644
index 0000000..bd02b00
--- /dev/null
+++ b/boto/cloudfront/__init__.py
@@ -0,0 +1,248 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import xml.sax
+import time
+import boto
+from boto.connection import AWSAuthConnection
+from boto import handler
+from boto.cloudfront.distribution import Distribution, DistributionSummary, DistributionConfig
+from boto.cloudfront.distribution import StreamingDistribution, StreamingDistributionSummary, StreamingDistributionConfig
+from boto.cloudfront.identity import OriginAccessIdentity
+from boto.cloudfront.identity import OriginAccessIdentitySummary
+from boto.cloudfront.identity import OriginAccessIdentityConfig
+from boto.cloudfront.invalidation import InvalidationBatch
+from boto.resultset import ResultSet
+from boto.cloudfront.exception import CloudFrontServerError
+
+class CloudFrontConnection(AWSAuthConnection):
+
+ DefaultHost = 'cloudfront.amazonaws.com'
+ Version = '2010-11-01'
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ port=None, proxy=None, proxy_port=None,
+ host=DefaultHost, debug=0):
+ AWSAuthConnection.__init__(self, host,
+ aws_access_key_id, aws_secret_access_key,
+ True, port, proxy, proxy_port, debug=debug)
+
+ def get_etag(self, response):
+ response_headers = response.msg
+ for key in response_headers.keys():
+ if key.lower() == 'etag':
+ return response_headers[key]
+ return None
+
+ def _required_auth_capability(self):
+ return ['cloudfront']
+
+ # Generics
+
+ def _get_all_objects(self, resource, tags):
+ if not tags:
+ tags=[('DistributionSummary', DistributionSummary)]
+ response = self.make_request('GET', '/%s/%s' % (self.Version, resource))
+ body = response.read()
+ boto.log.debug(body)
+ if response.status >= 300:
+ raise CloudFrontServerError(response.status, response.reason, body)
+ rs = ResultSet(tags)
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs
+
+ def _get_info(self, id, resource, dist_class):
+ uri = '/%s/%s/%s' % (self.Version, resource, id)
+ response = self.make_request('GET', uri)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status >= 300:
+ raise CloudFrontServerError(response.status, response.reason, body)
+ d = dist_class(connection=self)
+ response_headers = response.msg
+ for key in response_headers.keys():
+ if key.lower() == 'etag':
+ d.etag = response_headers[key]
+ h = handler.XmlHandler(d, self)
+ xml.sax.parseString(body, h)
+ return d
+
+ def _get_config(self, id, resource, config_class):
+ uri = '/%s/%s/%s/config' % (self.Version, resource, id)
+ response = self.make_request('GET', uri)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status >= 300:
+ raise CloudFrontServerError(response.status, response.reason, body)
+ d = config_class(connection=self)
+ d.etag = self.get_etag(response)
+ h = handler.XmlHandler(d, self)
+ xml.sax.parseString(body, h)
+ return d
+
+ def _set_config(self, distribution_id, etag, config):
+ if isinstance(config, StreamingDistributionConfig):
+ resource = 'streaming-distribution'
+ else:
+ resource = 'distribution'
+ uri = '/%s/%s/%s/config' % (self.Version, resource, distribution_id)
+ headers = {'If-Match' : etag, 'Content-Type' : 'text/xml'}
+ response = self.make_request('PUT', uri, headers, config.to_xml())
+ body = response.read()
+ boto.log.debug(body)
+ if response.status != 200:
+ raise CloudFrontServerError(response.status, response.reason, body)
+ return self.get_etag(response)
+
+ def _create_object(self, config, resource, dist_class):
+ response = self.make_request('POST', '/%s/%s' % (self.Version, resource),
+ {'Content-Type' : 'text/xml'}, data=config.to_xml())
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 201:
+ d = dist_class(connection=self)
+ h = handler.XmlHandler(d, self)
+ xml.sax.parseString(body, h)
+ d.etag = self.get_etag(response)
+ return d
+ else:
+ raise CloudFrontServerError(response.status, response.reason, body)
+
+ def _delete_object(self, id, etag, resource):
+ uri = '/%s/%s/%s' % (self.Version, resource, id)
+ response = self.make_request('DELETE', uri, {'If-Match' : etag})
+ body = response.read()
+ boto.log.debug(body)
+ if response.status != 204:
+ raise CloudFrontServerError(response.status, response.reason, body)
+
+ # Distributions
+
+ def get_all_distributions(self):
+ tags=[('DistributionSummary', DistributionSummary)]
+ return self._get_all_objects('distribution', tags)
+
+ def get_distribution_info(self, distribution_id):
+ return self._get_info(distribution_id, 'distribution', Distribution)
+
+ def get_distribution_config(self, distribution_id):
+ return self._get_config(distribution_id, 'distribution',
+ DistributionConfig)
+
+ def set_distribution_config(self, distribution_id, etag, config):
+ return self._set_config(distribution_id, etag, config)
+
+ def create_distribution(self, origin, enabled, caller_reference='',
+ cnames=None, comment=''):
+ config = DistributionConfig(origin=origin, enabled=enabled,
+ caller_reference=caller_reference,
+ cnames=cnames, comment=comment)
+ return self._create_object(config, 'distribution', Distribution)
+
+ def delete_distribution(self, distribution_id, etag):
+ return self._delete_object(distribution_id, etag, 'distribution')
+
+ # Streaming Distributions
+
+ def get_all_streaming_distributions(self):
+ tags=[('StreamingDistributionSummary', StreamingDistributionSummary)]
+ return self._get_all_objects('streaming-distribution', tags)
+
+ def get_streaming_distribution_info(self, distribution_id):
+ return self._get_info(distribution_id, 'streaming-distribution',
+ StreamingDistribution)
+
+ def get_streaming_distribution_config(self, distribution_id):
+ return self._get_config(distribution_id, 'streaming-distribution',
+ StreamingDistributionConfig)
+
+ def set_streaming_distribution_config(self, distribution_id, etag, config):
+ return self._set_config(distribution_id, etag, config)
+
+ def create_streaming_distribution(self, origin, enabled,
+ caller_reference='',
+ cnames=None, comment=''):
+ config = StreamingDistributionConfig(origin=origin, enabled=enabled,
+ caller_reference=caller_reference,
+ cnames=cnames, comment=comment)
+ return self._create_object(config, 'streaming-distribution',
+ StreamingDistribution)
+
+ def delete_streaming_distribution(self, distribution_id, etag):
+ return self._delete_object(distribution_id, etag, 'streaming-distribution')
+
+ # Origin Access Identity
+
+ def get_all_origin_access_identity(self):
+ tags=[('CloudFrontOriginAccessIdentitySummary',
+ OriginAccessIdentitySummary)]
+ return self._get_all_objects('origin-access-identity/cloudfront', tags)
+
+ def get_origin_access_identity_info(self, access_id):
+ return self._get_info(access_id, 'origin-access-identity/cloudfront',
+ OriginAccessIdentity)
+
+ def get_origin_access_identity_config(self, access_id):
+ return self._get_config(access_id,
+ 'origin-access-identity/cloudfront',
+ OriginAccessIdentityConfig)
+
+ def set_origin_access_identity_config(self, access_id,
+ etag, config):
+ return self._set_config(access_id, etag, config)
+
+ def create_origin_access_identity(self, caller_reference='', comment=''):
+ config = OriginAccessIdentityConfig(caller_reference=caller_reference,
+ comment=comment)
+ return self._create_object(config, 'origin-access-identity/cloudfront',
+ OriginAccessIdentity)
+
+ def delete_origin_access_identity(self, access_id, etag):
+ return self._delete_object(access_id, etag,
+ 'origin-access-identity/cloudfront')
+
+ # Object Invalidation
+
+ def create_invalidation_request(self, distribution_id, paths,
+ caller_reference=None):
+ """Creates a new invalidation request
+ :see: http://goo.gl/8vECq
+ """
+ # We allow you to pass in either an array or
+ # an InvalidationBatch object
+ if not isinstance(paths, InvalidationBatch):
+ paths = InvalidationBatch(paths)
+ paths.connection = self
+ uri = '/%s/distribution/%s/invalidation' % (self.Version,
+ distribution_id)
+ response = self.make_request('POST', uri,
+ {'Content-Type' : 'text/xml'},
+ data=paths.to_xml())
+ body = response.read()
+ if response.status == 201:
+ h = handler.XmlHandler(paths, self)
+ xml.sax.parseString(body, h)
+ return paths
+ else:
+ raise CloudFrontServerError(response.status, response.reason, body)
+
diff --git a/boto/cloudfront/distribution.py b/boto/cloudfront/distribution.py
new file mode 100644
index 0000000..ed245cb
--- /dev/null
+++ b/boto/cloudfront/distribution.py
@@ -0,0 +1,540 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import uuid
+from boto.cloudfront.identity import OriginAccessIdentity
+from boto.cloudfront.object import Object, StreamingObject
+from boto.cloudfront.signers import ActiveTrustedSigners, TrustedSigners
+from boto.cloudfront.logging import LoggingInfo
+from boto.cloudfront.origin import S3Origin, CustomOrigin
+from boto.s3.acl import ACL
+
+class DistributionConfig:
+
+ def __init__(self, connection=None, origin=None, enabled=False,
+ caller_reference='', cnames=None, comment='',
+ trusted_signers=None, default_root_object=None,
+ logging=None):
+ """
+ :param origin: Origin information to associate with the
+ distribution. If your distribution will use
+ an Amazon S3 origin, then this should be an
+ S3Origin object. If your distribution will use
+ a custom origin (non Amazon S3), then this
+ should be a CustomOrigin object.
+ :type origin: :class:`boto.cloudfront.origin.S3Origin` or
+ :class:`boto.cloudfront.origin.CustomOrigin`
+
+ :param enabled: Whether the distribution is enabled to accept
+ end user requests for content.
+ :type enabled: bool
+
+ :param caller_reference: A unique number that ensures the
+ request can't be replayed. If no
+ caller_reference is provided, boto
+ will generate a type 4 UUID for use
+ as the caller reference.
+ :type enabled: str
+
+ :param cnames: A CNAME alias you want to associate with this
+ distribution. You can have up to 10 CNAME aliases
+ per distribution.
+ :type enabled: array of str
+
+ :param comment: Any comments you want to include about the
+ distribution.
+ :type comment: str
+
+ :param trusted_signers: Specifies any AWS accounts you want to
+ permit to create signed URLs for private
+ content. If you want the distribution to
+ use signed URLs, this should contain a
+ TrustedSigners object; if you want the
+ distribution to use basic URLs, leave
+ this None.
+ :type trusted_signers: :class`boto.cloudfront.signers.TrustedSigners`
+
+ :param default_root_object: Designates a default root object.
+ Only include a DefaultRootObject value
+ if you are going to assign a default
+ root object for the distribution.
+ :type comment: str
+
+ :param logging: Controls whether access logs are written for the
+ distribution. If you want to turn on access logs,
+ this should contain a LoggingInfo object; otherwise
+ it should contain None.
+ :type logging: :class`boto.cloudfront.logging.LoggingInfo`
+
+ """
+ self.connection = connection
+ self.origin = origin
+ self.enabled = enabled
+ if caller_reference:
+ self.caller_reference = caller_reference
+ else:
+ self.caller_reference = str(uuid.uuid4())
+ self.cnames = []
+ if cnames:
+ self.cnames = cnames
+ self.comment = comment
+ self.trusted_signers = trusted_signers
+ self.logging = None
+ self.default_root_object = default_root_object
+
+ def to_xml(self):
+ s = '<?xml version="1.0" encoding="UTF-8"?>\n'
+ s += '<DistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
+ if self.origin:
+ s += self.origin.to_xml()
+ s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
+ for cname in self.cnames:
+ s += ' <CNAME>%s</CNAME>\n' % cname
+ if self.comment:
+ s += ' <Comment>%s</Comment>\n' % self.comment
+ s += ' <Enabled>'
+ if self.enabled:
+ s += 'true'
+ else:
+ s += 'false'
+ s += '</Enabled>\n'
+ if self.trusted_signers:
+ s += '<TrustedSigners>\n'
+ for signer in self.trusted_signers:
+ if signer == 'Self':
+ s += ' <Self></Self>\n'
+ else:
+ s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer
+ s += '</TrustedSigners>\n'
+ if self.logging:
+ s += '<Logging>\n'
+ s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket
+ s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix
+ s += '</Logging>\n'
+ if self.default_root_object:
+ dro = self.default_root_object
+ s += '<DefaultRootObject>%s</DefaultRootObject>\n' % dro
+ s += '</DistributionConfig>\n'
+ return s
+
+ def startElement(self, name, attrs, connection):
+ if name == 'TrustedSigners':
+ self.trusted_signers = TrustedSigners()
+ return self.trusted_signers
+ elif name == 'Logging':
+ self.logging = LoggingInfo()
+ return self.logging
+ elif name == 'S3Origin':
+ self.origin = S3Origin()
+ return self.origin
+ elif name == 'CustomOrigin':
+ self.origin = CustomOrigin()
+ return self.origin
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'CNAME':
+ self.cnames.append(value)
+ elif name == 'Comment':
+ self.comment = value
+ elif name == 'Enabled':
+ if value.lower() == 'true':
+ self.enabled = True
+ else:
+ self.enabled = False
+ elif name == 'CallerReference':
+ self.caller_reference = value
+ elif name == 'DefaultRootObject':
+ self.default_root_object = value
+ else:
+ setattr(self, name, value)
+
+class StreamingDistributionConfig(DistributionConfig):
+
+ def __init__(self, connection=None, origin='', enabled=False,
+ caller_reference='', cnames=None, comment='',
+ trusted_signers=None, logging=None):
+ DistributionConfig.__init__(self, connection=connection,
+ origin=origin, enabled=enabled,
+ caller_reference=caller_reference,
+ cnames=cnames, comment=comment,
+ trusted_signers=trusted_signers,
+ logging=logging)
+ def to_xml(self):
+ s = '<?xml version="1.0" encoding="UTF-8"?>\n'
+ s += '<StreamingDistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
+ if self.origin:
+ s += self.origin.to_xml()
+ s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
+ for cname in self.cnames:
+ s += ' <CNAME>%s</CNAME>\n' % cname
+ if self.comment:
+ s += ' <Comment>%s</Comment>\n' % self.comment
+ s += ' <Enabled>'
+ if self.enabled:
+ s += 'true'
+ else:
+ s += 'false'
+ s += '</Enabled>\n'
+ if self.trusted_signers:
+ s += '<TrustedSigners>\n'
+ for signer in self.trusted_signers:
+ if signer == 'Self':
+ s += ' <Self/>\n'
+ else:
+ s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer
+ s += '</TrustedSigners>\n'
+ if self.logging:
+ s += '<Logging>\n'
+ s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket
+ s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix
+ s += '</Logging>\n'
+ s += '</StreamingDistributionConfig>\n'
+ return s
+
+class DistributionSummary:
+
+ def __init__(self, connection=None, domain_name='', id='',
+ last_modified_time=None, status='', origin=None,
+ cname='', comment='', enabled=False):
+ self.connection = connection
+ self.domain_name = domain_name
+ self.id = id
+ self.last_modified_time = last_modified_time
+ self.status = status
+ self.origin = origin
+ self.enabled = enabled
+ self.cnames = []
+ if cname:
+ self.cnames.append(cname)
+ self.comment = comment
+ self.trusted_signers = None
+ self.etag = None
+ self.streaming = False
+
+ def startElement(self, name, attrs, connection):
+ if name == 'TrustedSigners':
+ self.trusted_signers = TrustedSigners()
+ return self.trusted_signers
+ elif name == 'S3Origin':
+ self.origin = S3Origin()
+ return self.origin
+ elif name == 'CustomOrigin':
+ self.origin = CustomOrigin()
+ return self.origin
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Id':
+ self.id = value
+ elif name == 'Status':
+ self.status = value
+ elif name == 'LastModifiedTime':
+ self.last_modified_time = value
+ elif name == 'DomainName':
+ self.domain_name = value
+ elif name == 'Origin':
+ self.origin = value
+ elif name == 'CNAME':
+ self.cnames.append(value)
+ elif name == 'Comment':
+ self.comment = value
+ elif name == 'Enabled':
+ if value.lower() == 'true':
+ self.enabled = True
+ else:
+ self.enabled = False
+ elif name == 'StreamingDistributionSummary':
+ self.streaming = True
+ else:
+ setattr(self, name, value)
+
+ def get_distribution(self):
+ return self.connection.get_distribution_info(self.id)
+
+class StreamingDistributionSummary(DistributionSummary):
+
+ def get_distribution(self):
+ return self.connection.get_streaming_distribution_info(self.id)
+
+class Distribution:
+
+ def __init__(self, connection=None, config=None, domain_name='',
+ id='', last_modified_time=None, status=''):
+ self.connection = connection
+ self.config = config
+ self.domain_name = domain_name
+ self.id = id
+ self.last_modified_time = last_modified_time
+ self.status = status
+ self.active_signers = None
+ self.etag = None
+ self._bucket = None
+ self._object_class = Object
+
+ def startElement(self, name, attrs, connection):
+ if name == 'DistributionConfig':
+ self.config = DistributionConfig()
+ return self.config
+ elif name == 'ActiveTrustedSigners':
+ self.active_signers = ActiveTrustedSigners()
+ return self.active_signers
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Id':
+ self.id = value
+ elif name == 'LastModifiedTime':
+ self.last_modified_time = value
+ elif name == 'Status':
+ self.status = value
+ elif name == 'DomainName':
+ self.domain_name = value
+ else:
+ setattr(self, name, value)
+
+ def update(self, enabled=None, cnames=None, comment=None):
+ """
+ Update the configuration of the Distribution. The only values
+ of the DistributionConfig that can be updated are:
+
+ * CNAMES
+ * Comment
+ * Whether the Distribution is enabled or not
+
+ :type enabled: bool
+ :param enabled: Whether the Distribution is active or not.
+
+ :type cnames: list of str
+ :param cnames: The DNS CNAME's associated with this
+ Distribution. Maximum of 10 values.
+
+ :type comment: str or unicode
+ :param comment: The comment associated with the Distribution.
+
+ """
+ new_config = DistributionConfig(self.connection, self.config.origin,
+ self.config.enabled, self.config.caller_reference,
+ self.config.cnames, self.config.comment,
+ self.config.trusted_signers,
+ self.config.default_root_object)
+ if enabled != None:
+ new_config.enabled = enabled
+ if cnames != None:
+ new_config.cnames = cnames
+ if comment != None:
+ new_config.comment = comment
+ self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config)
+ self.config = new_config
+ self._object_class = Object
+
+ def enable(self):
+ """
+ Deactivate the Distribution. A convenience wrapper around
+ the update method.
+ """
+ self.update(enabled=True)
+
+ def disable(self):
+ """
+ Activate the Distribution. A convenience wrapper around
+ the update method.
+ """
+ self.update(enabled=False)
+
+ def delete(self):
+ """
+ Delete this CloudFront Distribution. The content
+ associated with the Distribution is not deleted from
+ the underlying Origin bucket in S3.
+ """
+ self.connection.delete_distribution(self.id, self.etag)
+
+ def _get_bucket(self):
+ if not self._bucket:
+ bucket_name = self.config.origin.replace('.s3.amazonaws.com', '')
+ from boto.s3.connection import S3Connection
+ s3 = S3Connection(self.connection.aws_access_key_id,
+ self.connection.aws_secret_access_key,
+ proxy=self.connection.proxy,
+ proxy_port=self.connection.proxy_port,
+ proxy_user=self.connection.proxy_user,
+ proxy_pass=self.connection.proxy_pass)
+ self._bucket = s3.get_bucket(bucket_name)
+ self._bucket.distribution = self
+ self._bucket.set_key_class(self._object_class)
+ return self._bucket
+
+ def get_objects(self):
+ """
+ Return a list of all content objects in this distribution.
+
+ :rtype: list of :class:`boto.cloudfront.object.Object`
+ :return: The content objects
+ """
+ bucket = self._get_bucket()
+ objs = []
+ for key in bucket:
+ objs.append(key)
+ return objs
+
+ def set_permissions(self, object, replace=False):
+ """
+ Sets the S3 ACL grants for the given object to the appropriate
+ value based on the type of Distribution. If the Distribution
+ is serving private content the ACL will be set to include the
+ Origin Access Identity associated with the Distribution. If
+ the Distribution is serving public content the content will
+ be set up with "public-read".
+
+ :type object: :class:`boto.cloudfront.object.Object`
+ :param enabled: The Object whose ACL is being set
+
+ :type replace: bool
+ :param replace: If False, the Origin Access Identity will be
+ appended to the existing ACL for the object.
+ If True, the ACL for the object will be
+ completely replaced with one that grants
+ READ permission to the Origin Access Identity.
+
+ """
+ if isinstance(self.config.origin, S3Origin):
+ if self.config.origin.origin_access_identity:
+ id = self.config.origin.origin_access_identity.split('/')[-1]
+ oai = self.connection.get_origin_access_identity_info(id)
+ policy = object.get_acl()
+ if replace:
+ policy.acl = ACL()
+ policy.acl.add_user_grant('READ', oai.s3_user_id)
+ object.set_acl(policy)
+ else:
+ object.set_canned_acl('public-read')
+
+ def set_permissions_all(self, replace=False):
+ """
+ Sets the S3 ACL grants for all objects in the Distribution
+ to the appropriate value based on the type of Distribution.
+
+ :type replace: bool
+ :param replace: If False, the Origin Access Identity will be
+ appended to the existing ACL for the object.
+ If True, the ACL for the object will be
+ completely replaced with one that grants
+ READ permission to the Origin Access Identity.
+
+ """
+ bucket = self._get_bucket()
+ for key in bucket:
+ self.set_permissions(key, replace)
+
+ def add_object(self, name, content, headers=None, replace=True):
+ """
+ Adds a new content object to the Distribution. The content
+ for the object will be copied to a new Key in the S3 Bucket
+ and the permissions will be set appropriately for the type
+ of Distribution.
+
+ :type name: str or unicode
+ :param name: The name or key of the new object.
+
+ :type content: file-like object
+ :param content: A file-like object that contains the content
+ for the new object.
+
+ :type headers: dict
+ :param headers: A dictionary containing additional headers
+ you would like associated with the new
+ object in S3.
+
+ :rtype: :class:`boto.cloudfront.object.Object`
+ :return: The newly created object.
+ """
+ if self.config.origin_access_identity:
+ policy = 'private'
+ else:
+ policy = 'public-read'
+ bucket = self._get_bucket()
+ object = bucket.new_key(name)
+ object.set_contents_from_file(content, headers=headers, policy=policy)
+ if self.config.origin_access_identity:
+ self.set_permissions(object, replace)
+ return object
+
+class StreamingDistribution(Distribution):
+
+ def __init__(self, connection=None, config=None, domain_name='',
+ id='', last_modified_time=None, status=''):
+ Distribution.__init__(self, connection, config, domain_name,
+ id, last_modified_time, status)
+ self._object_class = StreamingObject
+
+ def startElement(self, name, attrs, connection):
+ if name == 'StreamingDistributionConfig':
+ self.config = StreamingDistributionConfig()
+ return self.config
+ else:
+ return Distribution.startElement(self, name, attrs, connection)
+
+ def update(self, enabled=None, cnames=None, comment=None):
+ """
+ Update the configuration of the StreamingDistribution. The only values
+ of the StreamingDistributionConfig that can be updated are:
+
+ * CNAMES
+ * Comment
+ * Whether the Distribution is enabled or not
+
+ :type enabled: bool
+ :param enabled: Whether the StreamingDistribution is active or not.
+
+ :type cnames: list of str
+ :param cnames: The DNS CNAME's associated with this
+ Distribution. Maximum of 10 values.
+
+ :type comment: str or unicode
+ :param comment: The comment associated with the Distribution.
+
+ """
+ new_config = StreamingDistributionConfig(self.connection,
+ self.config.origin,
+ self.config.enabled,
+ self.config.caller_reference,
+ self.config.cnames,
+ self.config.comment,
+ self.config.trusted_signers)
+ if enabled != None:
+ new_config.enabled = enabled
+ if cnames != None:
+ new_config.cnames = cnames
+ if comment != None:
+ new_config.comment = comment
+ self.etag = self.connection.set_streaming_distribution_config(self.id,
+ self.etag,
+ new_config)
+ self.config = new_config
+ self._object_class = StreamingObject
+
+ def delete(self):
+ self.connection.delete_streaming_distribution(self.id, self.etag)
+
+
diff --git a/boto/cloudfront/exception.py b/boto/cloudfront/exception.py
new file mode 100644
index 0000000..7680642
--- /dev/null
+++ b/boto/cloudfront/exception.py
@@ -0,0 +1,26 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.exception import BotoServerError
+
+class CloudFrontServerError(BotoServerError):
+
+ pass
diff --git a/boto/cloudfront/identity.py b/boto/cloudfront/identity.py
new file mode 100644
index 0000000..1571e87
--- /dev/null
+++ b/boto/cloudfront/identity.py
@@ -0,0 +1,122 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import uuid
+
+class OriginAccessIdentity:
+
+ def __init__(self, connection=None, config=None, id='',
+ s3_user_id='', comment=''):
+ self.connection = connection
+ self.config = config
+ self.id = id
+ self.s3_user_id = s3_user_id
+ self.comment = comment
+ self.etag = None
+
+ def startElement(self, name, attrs, connection):
+ if name == 'CloudFrontOriginAccessIdentityConfig':
+ self.config = OriginAccessIdentityConfig()
+ return self.config
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Id':
+ self.id = value
+ elif name == 'S3CanonicalUserId':
+ self.s3_user_id = value
+ elif name == 'Comment':
+ self.comment = value
+ else:
+ setattr(self, name, value)
+
+ def update(self, comment=None):
+ new_config = OriginAccessIdentityConfig(self.connection,
+ self.config.caller_reference,
+ self.config.comment)
+ if comment != None:
+ new_config.comment = comment
+ self.etag = self.connection.set_origin_identity_config(self.id, self.etag, new_config)
+ self.config = new_config
+
+ def delete(self):
+ return self.connection.delete_origin_access_identity(self.id, self.etag)
+
+ def uri(self):
+ return 'origin-access-identity/cloudfront/%s' % self.id
+
+class OriginAccessIdentityConfig:
+
+ def __init__(self, connection=None, caller_reference='', comment=''):
+ self.connection = connection
+ if caller_reference:
+ self.caller_reference = caller_reference
+ else:
+ self.caller_reference = str(uuid.uuid4())
+ self.comment = comment
+
+ def to_xml(self):
+ s = '<?xml version="1.0" encoding="UTF-8"?>\n'
+ s += '<CloudFrontOriginAccessIdentityConfig xmlns="http://cloudfront.amazonaws.com/doc/2009-09-09/">\n'
+ s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
+ if self.comment:
+ s += ' <Comment>%s</Comment>\n' % self.comment
+ s += '</CloudFrontOriginAccessIdentityConfig>\n'
+ return s
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Comment':
+ self.comment = value
+ elif name == 'CallerReference':
+ self.caller_reference = value
+ else:
+ setattr(self, name, value)
+
+class OriginAccessIdentitySummary:
+
+ def __init__(self, connection=None, id='',
+ s3_user_id='', comment=''):
+ self.connection = connection
+ self.id = id
+ self.s3_user_id = s3_user_id
+ self.comment = comment
+ self.etag = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Id':
+ self.id = value
+ elif name == 'S3CanonicalUserId':
+ self.s3_user_id = value
+ elif name == 'Comment':
+ self.comment = value
+ else:
+ setattr(self, name, value)
+
+ def get_origin_access_identity(self):
+ return self.connection.get_origin_access_identity_info(self.id)
+
diff --git a/boto/cloudfront/invalidation.py b/boto/cloudfront/invalidation.py
new file mode 100644
index 0000000..ea13a67
--- /dev/null
+++ b/boto/cloudfront/invalidation.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2006-2010 Chris Moyer http://coredumped.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import uuid
+import urllib
+
+class InvalidationBatch(object):
+ """A simple invalidation request.
+ :see: http://docs.amazonwebservices.com/AmazonCloudFront/2010-08-01/APIReference/index.html?InvalidationBatchDatatype.html
+ """
+
+ def __init__(self, paths=[], connection=None, distribution=None, caller_reference=''):
+ """Create a new invalidation request:
+ :paths: An array of paths to invalidate
+ """
+ self.paths = paths
+ self.distribution = distribution
+ self.caller_reference = caller_reference
+ if not self.caller_reference:
+ self.caller_reference = str(uuid.uuid4())
+
+ # If we passed in a distribution,
+ # then we use that as the connection object
+ if distribution:
+ self.connection = connection
+ else:
+ self.connection = connection
+
+ def add(self, path):
+ """Add another path to this invalidation request"""
+ return self.paths.append(path)
+
+ def remove(self, path):
+ """Remove a path from this invalidation request"""
+ return self.paths.remove(path)
+
+ def __iter__(self):
+ return iter(self.paths)
+
+ def __getitem__(self, i):
+ return self.paths[i]
+
+ def __setitem__(self, k, v):
+ self.paths[k] = v
+
+ def escape(self, p):
+ """Escape a path, make sure it begins with a slash and contains no invalid characters"""
+ if not p[0] == "/":
+ p = "/%s" % p
+ return urllib.quote(p)
+
+ def to_xml(self):
+ """Get this batch as XML"""
+ assert self.connection != None
+ s = '<?xml version="1.0" encoding="UTF-8"?>\n'
+ s += '<InvalidationBatch xmlns="http://cloudfront.amazonaws.com/doc/%s/">\n' % self.connection.Version
+ for p in self.paths:
+ s += ' <Path>%s</Path>\n' % self.escape(p)
+ s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
+ s += '</InvalidationBatch>\n'
+ return s
+
+ def startElement(self, name, attrs, connection):
+ if name == "InvalidationBatch":
+ self.paths = []
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Path':
+ self.paths.append(value)
+ elif name == "Status":
+ self.status = value
+ elif name == "Id":
+ self.id = value
+ elif name == "CreateTime":
+ self.create_time = value
+ elif name == "CallerReference":
+ self.caller_reference = value
+ return None
diff --git a/boto/cloudfront/logging.py b/boto/cloudfront/logging.py
new file mode 100644
index 0000000..6c2f4fd
--- /dev/null
+++ b/boto/cloudfront/logging.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class LoggingInfo(object):
+
+ def __init__(self, bucket='', prefix=''):
+ self.bucket = bucket
+ self.prefix = prefix
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Bucket':
+ self.bucket = value
+ elif name == 'Prefix':
+ self.prefix = value
+ else:
+ setattr(self, name, value)
+
diff --git a/boto/cloudfront/object.py b/boto/cloudfront/object.py
new file mode 100644
index 0000000..3574d13
--- /dev/null
+++ b/boto/cloudfront/object.py
@@ -0,0 +1,48 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.s3.key import Key
+
+class Object(Key):
+
+ def __init__(self, bucket, name=None):
+ Key.__init__(self, bucket, name=name)
+ self.distribution = bucket.distribution
+
+ def __repr__(self):
+ return '<Object: %s/%s>' % (self.distribution.config.origin, self.name)
+
+ def url(self, scheme='http'):
+ url = '%s://' % scheme
+ url += self.distribution.domain_name
+ if scheme.lower().startswith('rtmp'):
+ url += '/cfx/st/'
+ else:
+ url += '/'
+ url += self.name
+ return url
+
+class StreamingObject(Object):
+
+ def url(self, scheme='rtmp'):
+ return Object.url(self, scheme)
+
+
diff --git a/boto/cloudfront/origin.py b/boto/cloudfront/origin.py
new file mode 100644
index 0000000..57af846
--- /dev/null
+++ b/boto/cloudfront/origin.py
@@ -0,0 +1,150 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from identity import OriginAccessIdentity
+
+def get_oai_value(origin_access_identity):
+ if isinstance(origin_access_identity, OriginAccessIdentity):
+ return origin_access_identity.uri()
+ else:
+ return origin_access_identity
+
+class S3Origin(object):
+ """
+ Origin information to associate with the distribution.
+ If your distribution will use an Amazon S3 origin,
+ then you use the S3Origin element.
+ """
+
+ def __init__(self, dns_name=None, origin_access_identity=None):
+ """
+ :param dns_name: The DNS name of your Amazon S3 bucket to
+ associate with the distribution.
+ For example: mybucket.s3.amazonaws.com.
+ :type dns_name: str
+
+ :param origin_access_identity: The CloudFront origin access
+ identity to associate with the
+ distribution. If you want the
+ distribution to serve private content,
+ include this element; if you want the
+ distribution to serve public content,
+ remove this element.
+ :type origin_access_identity: str
+
+ """
+ self.dns_name = dns_name
+ self.origin_access_identity = origin_access_identity
+
+ def __repr__(self):
+ return '<S3Origin: %s>' % self.dns_name
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'DNSName':
+ self.dns_name = value
+ elif name == 'OriginAccessIdentity':
+ self.origin_access_identity = value
+ else:
+ setattr(self, name, value)
+
+ def to_xml(self):
+ s = ' <S3Origin>\n'
+ s += ' <DNSName>%s</DNSName>\n' % self.dns_name
+ if self.origin_access_identity:
+ val = get_oai_value(self.origin_access_identity)
+ s += ' <OriginAccessIdentity>%s</OriginAccessIdentity>\n' % val
+ s += ' </S3Origin>\n'
+ return s
+
+class CustomOrigin(object):
+ """
+ Origin information to associate with the distribution.
+ If your distribution will use a non-Amazon S3 origin,
+ then you use the CustomOrigin element.
+ """
+
+ def __init__(self, dns_name=None, http_port=80, https_port=443,
+ origin_protocol_policy=None):
+ """
+ :param dns_name: The DNS name of your Amazon S3 bucket to
+ associate with the distribution.
+ For example: mybucket.s3.amazonaws.com.
+ :type dns_name: str
+
+ :param http_port: The HTTP port the custom origin listens on.
+ :type http_port: int
+
+ :param https_port: The HTTPS port the custom origin listens on.
+ :type http_port: int
+
+ :param origin_protocol_policy: The origin protocol policy to
+ apply to your origin. If you
+ specify http-only, CloudFront
+ will use HTTP only to access the origin.
+ If you specify match-viewer, CloudFront
+ will fetch from your origin using HTTP
+ or HTTPS, based on the protocol of the
+ viewer request.
+ :type origin_protocol_policy: str
+
+ """
+ self.dns_name = dns_name
+ self.http_port = http_port
+ self.https_port = https_port
+ self.origin_protocol_policy = origin_protocol_policy
+
+ def __repr__(self):
+ return '<CustomOrigin: %s>' % self.dns_name
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'DNSName':
+ self.dns_name = value
+ elif name == 'HTTPPort':
+ try:
+ self.http_port = int(value)
+ except ValueError:
+ self.http_port = value
+ elif name == 'HTTPSPort':
+ try:
+ self.https_port = int(value)
+ except ValueError:
+ self.https_port = value
+ elif name == 'OriginProtocolPolicy':
+ self.origin_protocol_policy = value
+ else:
+ setattr(self, name, value)
+
+ def to_xml(self):
+ s = ' <CustomOrigin>\n'
+ s += ' <DNSName>%s</DNSName>\n' % self.dns_name
+ s += ' <HTTPPort>%d</HTTPPort>\n' % self.http_port
+ s += ' <HTTPSPort>%d</HTTPSPort>\n' % self.https_port
+ s += ' <OriginProtocolPolicy>%s</OriginProtocolPolicy>\n' % self.origin_protocol_policy
+ s += ' </CustomOrigin>\n'
+ return s
+
diff --git a/boto/cloudfront/signers.py b/boto/cloudfront/signers.py
new file mode 100644
index 0000000..0b0cd50
--- /dev/null
+++ b/boto/cloudfront/signers.py
@@ -0,0 +1,60 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class Signer:
+
+ def __init__(self):
+ self.id = None
+ self.key_pair_ids = []
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Self':
+ self.id = 'Self'
+ elif name == 'AwsAccountNumber':
+ self.id = value
+ elif name == 'KeyPairId':
+ self.key_pair_ids.append(value)
+
+class ActiveTrustedSigners(list):
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Signer':
+ s = Signer()
+ self.append(s)
+ return s
+
+ def endElement(self, name, value, connection):
+ pass
+
+class TrustedSigners(list):
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Self':
+ self.append(name)
+ elif name == 'AwsAccountNumber':
+ self.append(value)
+
diff --git a/boto/connection.py b/boto/connection.py
new file mode 100644
index 0000000..76e9ffe
--- /dev/null
+++ b/boto/connection.py
@@ -0,0 +1,637 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010 Google
+# Copyright (c) 2008 rPath, Inc.
+# Copyright (c) 2009 The Echo Nest Corporation
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+#
+# Parts of this code were copied or derived from sample code supplied by AWS.
+# The following notice applies to that code.
+#
+# This software code is made available "AS IS" without warranties of any
+# kind. You may copy, display, modify and redistribute the software
+# code either by itself or as incorporated into your code; provided that
+# you do not remove any proprietary notices. Your use of this software
+# code is at your own risk and you waive any claim against Amazon
+# Digital Services, Inc. or its affiliates with respect to your use of
+# this software code. (c) 2006 Amazon Digital Services, Inc. or its
+# affiliates.
+
+"""
+Handles basic connections to AWS
+"""
+
+import base64
+import errno
+import httplib
+import os
+import Queue
+import re
+import socket
+import sys
+import time
+import urllib, urlparse
+import xml.sax
+
+import auth
+import auth_handler
+import boto
+import boto.utils
+
+from boto import config, UserAgent, handler
+from boto.exception import AWSConnectionError, BotoClientError, BotoServerError
+from boto.provider import Provider
+from boto.resultset import ResultSet
+
+
+PORTS_BY_SECURITY = { True: 443, False: 80 }
+
+class ConnectionPool:
+ def __init__(self, hosts, connections_per_host):
+ self._hosts = boto.utils.LRUCache(hosts)
+ self.connections_per_host = connections_per_host
+
+ def __getitem__(self, key):
+ if key not in self._hosts:
+ self._hosts[key] = Queue.Queue(self.connections_per_host)
+ return self._hosts[key]
+
+ def __repr__(self):
+ return 'ConnectionPool:%s' % ','.join(self._hosts._dict.keys())
+
+class HTTPRequest(object):
+
+ def __init__(self, method, protocol, host, port, path, auth_path,
+ params, headers, body):
+ """Represents an HTTP request.
+
+ :type method: string
+ :param method: The HTTP method name, 'GET', 'POST', 'PUT' etc.
+
+ :type protocol: string
+ :param protocol: The http protocol used, 'http' or 'https'.
+
+ :type host: string
+ :param host: Host to which the request is addressed. eg. abc.com
+
+ :type port: int
+ :param port: port on which the request is being sent. Zero means unset,
+ in which case default port will be chosen.
+
+ :type path: string
+ :param path: URL path that is bein accessed.
+
+ :type auth_path: string
+ :param path: The part of the URL path used when creating the
+ authentication string.
+
+ :type params: dict
+ :param params: HTTP url query parameters, with key as name of the param,
+ and value as value of param.
+
+ :type headers: dict
+ :param headers: HTTP headers, with key as name of the header and value
+ as value of header.
+
+ :type body: string
+ :param body: Body of the HTTP request. If not present, will be None or
+ empty string ('').
+ """
+ self.method = method
+ self.protocol = protocol
+ self.host = host
+ self.port = port
+ self.path = path
+ self.auth_path = auth_path
+ self.params = params
+ self.headers = headers
+ self.body = body
+
+ def __str__(self):
+ return (('method:(%s) protocol:(%s) host(%s) port(%s) path(%s) '
+ 'params(%s) headers(%s) body(%s)') % (self.method,
+ self.protocol, self.host, self.port, self.path, self.params,
+ self.headers, self.body))
+
+class AWSAuthConnection(object):
+ def __init__(self, host, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, debug=0,
+ https_connection_factory=None, path='/', provider='aws'):
+ """
+ :type host: str
+ :param host: The host to make the connection to
+
+ :keyword str aws_access_key_id: Your AWS Access Key ID (provided by
+ Amazon). If none is specified, the value in your
+ ``AWS_ACCESS_KEY_ID`` environmental variable is used.
+ :keyword str aws_secret_access_key: Your AWS Secret Access Key
+ (provided by Amazon). If none is specified, the value in your
+ ``AWS_SECRET_ACCESS_KEY`` environmental variable is used.
+
+ :type is_secure: boolean
+ :param is_secure: Whether the connection is over SSL
+
+ :type https_connection_factory: list or tuple
+ :param https_connection_factory: A pair of an HTTP connection
+ factory and the exceptions to catch.
+ The factory should have a similar
+ interface to L{httplib.HTTPSConnection}.
+
+ :param str proxy: Address/hostname for a proxy server
+
+ :type proxy_port: int
+ :param proxy_port: The port to use when connecting over a proxy
+
+ :type proxy_user: str
+ :param proxy_user: The username to connect with on the proxy
+
+ :type proxy_pass: str
+ :param proxy_pass: The password to use when connection over a proxy.
+
+ :type port: int
+ :param port: The port to use to connect
+ """
+ self.num_retries = 5
+ # Override passed-in is_secure setting if value was defined in config.
+ if config.has_option('Boto', 'is_secure'):
+ is_secure = config.getboolean('Boto', 'is_secure')
+ self.is_secure = is_secure
+ self.handle_proxy(proxy, proxy_port, proxy_user, proxy_pass)
+ # define exceptions from httplib that we want to catch and retry
+ self.http_exceptions = (httplib.HTTPException, socket.error,
+ socket.gaierror)
+ # define values in socket exceptions we don't want to catch
+ self.socket_exception_values = (errno.EINTR,)
+ if https_connection_factory is not None:
+ self.https_connection_factory = https_connection_factory[0]
+ self.http_exceptions += https_connection_factory[1]
+ else:
+ self.https_connection_factory = None
+ if (is_secure):
+ self.protocol = 'https'
+ else:
+ self.protocol = 'http'
+ self.host = host
+ self.path = path
+ if debug:
+ self.debug = debug
+ else:
+ self.debug = config.getint('Boto', 'debug', debug)
+ if port:
+ self.port = port
+ else:
+ self.port = PORTS_BY_SECURITY[is_secure]
+
+ self.provider = Provider(provider,
+ aws_access_key_id,
+ aws_secret_access_key)
+
+ # allow config file to override default host
+ if self.provider.host:
+ self.host = self.provider.host
+
+ # cache up to 20 connections per host, up to 20 hosts
+ self._pool = ConnectionPool(20, 20)
+ self._connection = (self.server_name(), self.is_secure)
+ self._last_rs = None
+ self._auth_handler = auth.get_auth_handler(
+ host, config, self.provider, self._required_auth_capability())
+
+ def __repr__(self):
+ return '%s:%s' % (self.__class__.__name__, self.host)
+
+ def _required_auth_capability(self):
+ return []
+
+ def _cached_name(self, host, is_secure):
+ if host is None:
+ host = self.server_name()
+ cached_name = is_secure and 'https://' or 'http://'
+ cached_name += host
+ return cached_name
+
+ def connection(self):
+ return self.get_http_connection(*self._connection)
+ connection = property(connection)
+
+ def aws_access_key_id(self):
+ return self.provider.access_key
+ aws_access_key_id = property(aws_access_key_id)
+ gs_access_key_id = aws_access_key_id
+ access_key = aws_access_key_id
+
+ def aws_secret_access_key(self):
+ return self.provider.secret_key
+ aws_secret_access_key = property(aws_secret_access_key)
+ gs_secret_access_key = aws_secret_access_key
+ secret_key = aws_secret_access_key
+
+ def get_path(self, path='/'):
+ pos = path.find('?')
+ if pos >= 0:
+ params = path[pos:]
+ path = path[:pos]
+ else:
+ params = None
+ if path[-1] == '/':
+ need_trailing = True
+ else:
+ need_trailing = False
+ path_elements = self.path.split('/')
+ path_elements.extend(path.split('/'))
+ path_elements = [p for p in path_elements if p]
+ path = '/' + '/'.join(path_elements)
+ if path[-1] != '/' and need_trailing:
+ path += '/'
+ if params:
+ path = path + params
+ return path
+
+ def server_name(self, port=None):
+ if not port:
+ port = self.port
+ if port == 80:
+ signature_host = self.host
+ else:
+ # This unfortunate little hack can be attributed to
+ # a difference in the 2.6 version of httplib. In old
+ # versions, it would append ":443" to the hostname sent
+ # in the Host header and so we needed to make sure we
+ # did the same when calculating the V2 signature. In 2.6
+ # (and higher!)
+ # it no longer does that. Hence, this kludge.
+ if sys.version[:3] in ('2.6', '2.7') and port == 443:
+ signature_host = self.host
+ else:
+ signature_host = '%s:%d' % (self.host, port)
+ return signature_host
+
+ def handle_proxy(self, proxy, proxy_port, proxy_user, proxy_pass):
+ self.proxy = proxy
+ self.proxy_port = proxy_port
+ self.proxy_user = proxy_user
+ self.proxy_pass = proxy_pass
+ if os.environ.has_key('http_proxy') and not self.proxy:
+ pattern = re.compile(
+ '(?:http://)?' \
+ '(?:(?P<user>\w+):(?P<pass>.*)@)?' \
+ '(?P<host>[\w\-\.]+)' \
+ '(?::(?P<port>\d+))?'
+ )
+ match = pattern.match(os.environ['http_proxy'])
+ if match:
+ self.proxy = match.group('host')
+ self.proxy_port = match.group('port')
+ self.proxy_user = match.group('user')
+ self.proxy_pass = match.group('pass')
+ else:
+ if not self.proxy:
+ self.proxy = config.get_value('Boto', 'proxy', None)
+ if not self.proxy_port:
+ self.proxy_port = config.get_value('Boto', 'proxy_port', None)
+ if not self.proxy_user:
+ self.proxy_user = config.get_value('Boto', 'proxy_user', None)
+ if not self.proxy_pass:
+ self.proxy_pass = config.get_value('Boto', 'proxy_pass', None)
+
+ if not self.proxy_port and self.proxy:
+ print "http_proxy environment variable does not specify " \
+ "a port, using default"
+ self.proxy_port = self.port
+ self.use_proxy = (self.proxy != None)
+
+ def get_http_connection(self, host, is_secure):
+ queue = self._pool[self._cached_name(host, is_secure)]
+ try:
+ return queue.get_nowait()
+ except Queue.Empty:
+ return self.new_http_connection(host, is_secure)
+
+ def new_http_connection(self, host, is_secure):
+ if self.use_proxy:
+ host = '%s:%d' % (self.proxy, int(self.proxy_port))
+ if host is None:
+ host = self.server_name()
+ if is_secure:
+ boto.log.debug('establishing HTTPS connection')
+ if self.use_proxy:
+ connection = self.proxy_ssl()
+ elif self.https_connection_factory:
+ connection = self.https_connection_factory(host)
+ else:
+ connection = httplib.HTTPSConnection(host)
+ else:
+ boto.log.debug('establishing HTTP connection')
+ connection = httplib.HTTPConnection(host)
+ if self.debug > 1:
+ connection.set_debuglevel(self.debug)
+ # self.connection must be maintained for backwards-compatibility
+ # however, it must be dynamically pulled from the connection pool
+ # set a private variable which will enable that
+ if host.split(':')[0] == self.host and is_secure == self.is_secure:
+ self._connection = (host, is_secure)
+ return connection
+
+ def put_http_connection(self, host, is_secure, connection):
+ try:
+ self._pool[self._cached_name(host, is_secure)].put_nowait(connection)
+ except Queue.Full:
+ # gracefully fail in case of pool overflow
+ connection.close()
+
+ def proxy_ssl(self):
+ host = '%s:%d' % (self.host, self.port)
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ sock.connect((self.proxy, int(self.proxy_port)))
+ except:
+ raise
+ sock.sendall("CONNECT %s HTTP/1.0\r\n" % host)
+ sock.sendall("User-Agent: %s\r\n" % UserAgent)
+ if self.proxy_user and self.proxy_pass:
+ for k, v in self.get_proxy_auth_header().items():
+ sock.sendall("%s: %s\r\n" % (k, v))
+ sock.sendall("\r\n")
+ resp = httplib.HTTPResponse(sock, strict=True)
+ resp.begin()
+
+ if resp.status != 200:
+ # Fake a socket error, use a code that make it obvious it hasn't
+ # been generated by the socket library
+ raise socket.error(-71,
+ "Error talking to HTTP proxy %s:%s: %s (%s)" %
+ (self.proxy, self.proxy_port, resp.status, resp.reason))
+
+ # We can safely close the response, it duped the original socket
+ resp.close()
+
+ h = httplib.HTTPConnection(host)
+
+ # Wrap the socket in an SSL socket
+ if hasattr(httplib, 'ssl'):
+ sslSock = httplib.ssl.SSLSocket(sock)
+ else: # Old Python, no ssl module
+ sslSock = socket.ssl(sock, None, None)
+ sslSock = httplib.FakeSocket(sock, sslSock)
+ # This is a bit unclean
+ h.sock = sslSock
+ return h
+
+ def prefix_proxy_to_path(self, path, host=None):
+ path = self.protocol + '://' + (host or self.server_name()) + path
+ return path
+
+ def get_proxy_auth_header(self):
+ auth = base64.encodestring(self.proxy_user + ':' + self.proxy_pass)
+ return {'Proxy-Authorization': 'Basic %s' % auth}
+
+ def _mexe(self, method, path, data, headers, host=None, sender=None,
+ override_num_retries=None):
+ """
+ mexe - Multi-execute inside a loop, retrying multiple times to handle
+ transient Internet errors by simply trying again.
+ Also handles redirects.
+
+ This code was inspired by the S3Utils classes posted to the boto-users
+ Google group by Larry Bates. Thanks!
+ """
+ boto.log.debug('Method: %s' % method)
+ boto.log.debug('Path: %s' % path)
+ boto.log.debug('Data: %s' % data)
+ boto.log.debug('Headers: %s' % headers)
+ boto.log.debug('Host: %s' % host)
+ response = None
+ body = None
+ e = None
+ if override_num_retries is None:
+ num_retries = config.getint('Boto', 'num_retries', self.num_retries)
+ else:
+ num_retries = override_num_retries
+ i = 0
+ connection = self.get_http_connection(host, self.is_secure)
+ while i <= num_retries:
+ try:
+ if callable(sender):
+ response = sender(connection, method, path, data, headers)
+ else:
+ connection.request(method, path, data, headers)
+ response = connection.getresponse()
+ location = response.getheader('location')
+ # -- gross hack --
+ # httplib gets confused with chunked responses to HEAD requests
+ # so I have to fake it out
+ if method == 'HEAD' and getattr(response, 'chunked', False):
+ response.chunked = 0
+ if response.status == 500 or response.status == 503:
+ boto.log.debug('received %d response, retrying in %d seconds' % (response.status, 2 ** i))
+ body = response.read()
+ elif response.status == 408:
+ body = response.read()
+ print '-------------------------'
+ print ' 4 0 8 '
+ print 'path=%s' % path
+ print body
+ print '-------------------------'
+ elif response.status < 300 or response.status >= 400 or \
+ not location:
+ self.put_http_connection(host, self.is_secure, connection)
+ return response
+ else:
+ scheme, host, path, params, query, fragment = \
+ urlparse.urlparse(location)
+ if query:
+ path += '?' + query
+ boto.log.debug('Redirecting: %s' % scheme + '://' + host + path)
+ connection = self.get_http_connection(host, scheme == 'https')
+ continue
+ except KeyboardInterrupt:
+ sys.exit('Keyboard Interrupt')
+ except self.http_exceptions, e:
+ boto.log.debug('encountered %s exception, reconnecting' % \
+ e.__class__.__name__)
+ connection = self.new_http_connection(host, self.is_secure)
+ time.sleep(2 ** i)
+ i += 1
+ # If we made it here, it's because we have exhausted our retries and stil haven't
+ # succeeded. So, if we have a response object, use it to raise an exception.
+ # Otherwise, raise the exception that must have already happened.
+ if response:
+ raise BotoServerError(response.status, response.reason, body)
+ elif e:
+ raise e
+ else:
+ raise BotoClientError('Please report this exception as a Boto Issue!')
+
+ def build_base_http_request(self, method, path, auth_path,
+ params=None, headers=None, data='', host=None):
+ path = self.get_path(path)
+ if auth_path is not None:
+ auth_path = self.get_path(auth_path)
+ if params == None:
+ params = {}
+ else:
+ params = params.copy()
+ if headers == None:
+ headers = {}
+ else:
+ headers = headers.copy()
+ host = host or self.host
+ if self.use_proxy:
+ path = self.prefix_proxy_to_path(path, host)
+ if self.proxy_user and self.proxy_pass and not self.is_secure:
+ # If is_secure, we don't have to set the proxy authentication
+ # header here, we did that in the CONNECT to the proxy.
+ headers.update(self.get_proxy_auth_header())
+ return HTTPRequest(method, self.protocol, host, self.port,
+ path, auth_path, params, headers, data)
+
+ def fill_in_auth(self, http_request, **kwargs):
+ headers = http_request.headers
+ for key in headers:
+ val = headers[key]
+ if isinstance(val, unicode):
+ headers[key] = urllib.quote_plus(val.encode('utf-8'))
+
+ self._auth_handler.add_auth(http_request, **kwargs)
+
+ headers['User-Agent'] = UserAgent
+ if not headers.has_key('Content-Length'):
+ headers['Content-Length'] = str(len(http_request.body))
+ return http_request
+
+ def _send_http_request(self, http_request, sender=None,
+ override_num_retries=None):
+ return self._mexe(http_request.method, http_request.path,
+ http_request.body, http_request.headers,
+ http_request.host, sender, override_num_retries)
+
+ def make_request(self, method, path, headers=None, data='', host=None,
+ auth_path=None, sender=None, override_num_retries=None):
+ """Makes a request to the server, with stock multiple-retry logic."""
+ http_request = self.build_base_http_request(method, path, auth_path,
+ {}, headers, data, host)
+ http_request = self.fill_in_auth(http_request)
+ return self._send_http_request(http_request, sender,
+ override_num_retries)
+
+ def close(self):
+ """(Optional) Close any open HTTP connections. This is non-destructive,
+ and making a new request will open a connection again."""
+
+ boto.log.debug('closing all HTTP connections')
+ self.connection = None # compat field
+
+class AWSQueryConnection(AWSAuthConnection):
+
+ APIVersion = ''
+ ResponseError = BotoServerError
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, host=None, debug=0,
+ https_connection_factory=None, path='/'):
+ AWSAuthConnection.__init__(self, host, aws_access_key_id, aws_secret_access_key,
+ is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
+ debug, https_connection_factory, path)
+
+ def _required_auth_capability(self):
+ return []
+
+ def get_utf8_value(self, value):
+ return boto.utils.get_utf8_value(value)
+
+ def make_request(self, action, params=None, path='/', verb='GET'):
+ http_request = self.build_base_http_request(verb, path, None,
+ params, {}, '',
+ self.server_name())
+ if action:
+ http_request.params['Action'] = action
+ http_request.params['Version'] = self.APIVersion
+ http_request = self.fill_in_auth(http_request)
+ return self._send_http_request(http_request)
+
+ def build_list_params(self, params, items, label):
+ if isinstance(items, str):
+ items = [items]
+ for i in range(1, len(items) + 1):
+ params['%s.%d' % (label, i)] = items[i - 1]
+
+ # generics
+
+ def get_list(self, action, params, markers, path='/', parent=None, verb='GET'):
+ if not parent:
+ parent = self
+ response = self.make_request(action, params, path, verb)
+ body = response.read()
+ boto.log.debug(body)
+ if not body:
+ boto.log.error('Null body %s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+ elif response.status == 200:
+ rs = ResultSet(markers)
+ h = handler.XmlHandler(rs, parent)
+ xml.sax.parseString(body, h)
+ return rs
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
+ def get_object(self, action, params, cls, path='/', parent=None, verb='GET'):
+ if not parent:
+ parent = self
+ response = self.make_request(action, params, path, verb)
+ body = response.read()
+ boto.log.debug(body)
+ if not body:
+ boto.log.error('Null body %s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+ elif response.status == 200:
+ obj = cls(parent)
+ h = handler.XmlHandler(obj, parent)
+ xml.sax.parseString(body, h)
+ return obj
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
+ def get_status(self, action, params, path='/', parent=None, verb='GET'):
+ if not parent:
+ parent = self
+ response = self.make_request(action, params, path, verb)
+ body = response.read()
+ boto.log.debug(body)
+ if not body:
+ boto.log.error('Null body %s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+ elif response.status == 200:
+ rs = ResultSet()
+ h = handler.XmlHandler(rs, parent)
+ xml.sax.parseString(body, h)
+ return rs.status
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
diff --git a/boto/contrib/__init__.py b/boto/contrib/__init__.py
new file mode 100644
index 0000000..303dbb6
--- /dev/null
+++ b/boto/contrib/__init__.py
@@ -0,0 +1,22 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
diff --git a/boto/contrib/m2helpers.py b/boto/contrib/m2helpers.py
new file mode 100644
index 0000000..82d2730
--- /dev/null
+++ b/boto/contrib/m2helpers.py
@@ -0,0 +1,52 @@
+# Copyright (c) 2006,2007 Jon Colverson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+This module was contributed by Jon Colverson. It provides a couple of helper
+functions that allow you to use M2Crypto's implementation of HTTPSConnection
+rather than the default version in httplib.py. The main benefit is that
+M2Crypto's version verifies the certificate of the server.
+
+To use this feature, do something like this:
+
+from boto.ec2.connection import EC2Connection
+
+ec2 = EC2Connection(ACCESS_KEY_ID, SECRET_ACCESS_KEY,
+ https_connection_factory=https_connection_factory(cafile=CA_FILE))
+
+See http://code.google.com/p/boto/issues/detail?id=57 for more details.
+"""
+from M2Crypto import SSL
+from M2Crypto.httpslib import HTTPSConnection
+
+def secure_context(cafile=None, capath=None):
+ ctx = SSL.Context()
+ ctx.set_verify(SSL.verify_peer | SSL.verify_fail_if_no_peer_cert, depth=9)
+ if ctx.load_verify_locations(cafile=cafile, capath=capath) != 1:
+ raise Exception("Couldn't load certificates")
+ return ctx
+
+def https_connection_factory(cafile=None, capath=None):
+ def factory(*args, **kwargs):
+ return HTTPSConnection(
+ ssl_context=secure_context(cafile=cafile, capath=capath),
+ *args, **kwargs)
+ return (factory, (SSL.SSLError,))
diff --git a/boto/contrib/ymlmessage.py b/boto/contrib/ymlmessage.py
new file mode 100644
index 0000000..b9a2c93
--- /dev/null
+++ b/boto/contrib/ymlmessage.py
@@ -0,0 +1,52 @@
+# Copyright (c) 2006,2007 Chris Moyer
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+This module was contributed by Chris Moyer. It provides a subclass of the
+SQS Message class that supports YAML as the body of the message.
+
+This module requires the yaml module.
+"""
+from boto.sqs.message import Message
+import yaml
+
+class YAMLMessage(Message):
+ """
+ The YAMLMessage class provides a YAML compatible message. Encoding and
+ decoding are handled automaticaly.
+
+ Access this message data like such:
+
+ m.data = [ 1, 2, 3]
+ m.data[0] # Returns 1
+
+ This depends on the PyYAML package
+ """
+
+ def __init__(self, queue=None, body='', xml_attrs=None):
+ self.data = None
+ Message.__init__(self, queue, body)
+
+ def set_body(self, body):
+ self.data = yaml.load(body)
+
+ def get_body(self):
+ return yaml.dump(self.data)
diff --git a/boto/ec2/__init__.py b/boto/ec2/__init__.py
new file mode 100644
index 0000000..8bb3f53
--- /dev/null
+++ b/boto/ec2/__init__.py
@@ -0,0 +1,52 @@
+# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+"""
+This module provides an interface to the Elastic Compute Cloud (EC2)
+service from AWS.
+"""
+from boto.ec2.connection import EC2Connection
+
+def regions(**kw_params):
+ """
+ Get all available regions for the EC2 service.
+ You may pass any of the arguments accepted by the EC2Connection
+ object's constructor as keyword arguments and they will be
+ passed along to the EC2Connection object.
+
+ :rtype: list
+ :return: A list of :class:`boto.ec2.regioninfo.RegionInfo`
+ """
+ c = EC2Connection(**kw_params)
+ return c.get_all_regions()
+
+def connect_to_region(region_name, **kw_params):
+ for region in regions(**kw_params):
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
+
+def get_region(region_name, **kw_params):
+ for region in regions(**kw_params):
+ if region.name == region_name:
+ return region
+ return None
+
diff --git a/boto/ec2/address.py b/boto/ec2/address.py
new file mode 100644
index 0000000..60ed406
--- /dev/null
+++ b/boto/ec2/address.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents an EC2 Elastic IP Address
+"""
+
+from boto.ec2.ec2object import EC2Object
+
+class Address(EC2Object):
+
+ def __init__(self, connection=None, public_ip=None, instance_id=None):
+ EC2Object.__init__(self, connection)
+ self.connection = connection
+ self.public_ip = public_ip
+ self.instance_id = instance_id
+
+ def __repr__(self):
+ return 'Address:%s' % self.public_ip
+
+ def endElement(self, name, value, connection):
+ if name == 'publicIp':
+ self.public_ip = value
+ elif name == 'instanceId':
+ self.instance_id = value
+ else:
+ setattr(self, name, value)
+
+ def release(self):
+ return self.connection.release_address(self.public_ip)
+
+ delete = release
+
+ def associate(self, instance_id):
+ return self.connection.associate_address(instance_id, self.public_ip)
+
+ def disassociate(self):
+ return self.connection.disassociate_address(self.public_ip)
+
+
diff --git a/boto/ec2/autoscale/__init__.py b/boto/ec2/autoscale/__init__.py
new file mode 100644
index 0000000..5d68b32
--- /dev/null
+++ b/boto/ec2/autoscale/__init__.py
@@ -0,0 +1,244 @@
+# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+This module provides an interface to the Elastic Compute Cloud (EC2)
+Auto Scaling service.
+"""
+
+import boto
+from boto.connection import AWSQueryConnection
+from boto.ec2.regioninfo import RegionInfo
+from boto.ec2.autoscale.request import Request
+from boto.ec2.autoscale.trigger import Trigger
+from boto.ec2.autoscale.launchconfig import LaunchConfiguration
+from boto.ec2.autoscale.group import AutoScalingGroup
+from boto.ec2.autoscale.activity import Activity
+
+
+class AutoScaleConnection(AWSQueryConnection):
+ APIVersion = boto.config.get('Boto', 'autoscale_version', '2009-05-15')
+ Endpoint = boto.config.get('Boto', 'autoscale_endpoint',
+ 'autoscaling.amazonaws.com')
+ DefaultRegionName = 'us-east-1'
+ DefaultRegionEndpoint = 'autoscaling.amazonaws.com'
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, debug=1,
+ https_connection_factory=None, region=None, path='/'):
+ """
+ Init method to create a new connection to the AutoScaling service.
+
+ B{Note:} The host argument is overridden by the host specified in the
+ boto configuration file.
+ """
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint,
+ AutoScaleConnection)
+ self.region = region
+ AWSQueryConnection.__init__(self, aws_access_key_id,
+ aws_secret_access_key,
+ is_secure, port, proxy, proxy_port,
+ proxy_user, proxy_pass,
+ self.region.endpoint, debug,
+ https_connection_factory, path=path)
+
+ def _required_auth_capability(self):
+ return ['ec2']
+
+ def build_list_params(self, params, items, label):
+ """ items is a list of dictionaries or strings:
+ [{'Protocol' : 'HTTP',
+ 'LoadBalancerPort' : '80',
+ 'InstancePort' : '80'},..] etc.
+ or
+ ['us-east-1b',...]
+ """
+ # different from EC2 list params
+ for i in xrange(1, len(items)+1):
+ if isinstance(items[i-1], dict):
+ for k, v in items[i-1].iteritems():
+ params['%s.member.%d.%s' % (label, i, k)] = v
+ elif isinstance(items[i-1], basestring):
+ params['%s.member.%d' % (label, i)] = items[i-1]
+
+ def _update_group(self, op, as_group):
+ params = {
+ 'AutoScalingGroupName' : as_group.name,
+ 'Cooldown' : as_group.cooldown,
+ 'LaunchConfigurationName' : as_group.launch_config_name,
+ 'MinSize' : as_group.min_size,
+ 'MaxSize' : as_group.max_size,
+ }
+ if op.startswith('Create'):
+ if as_group.availability_zones:
+ zones = as_group.availability_zones
+ else:
+ zones = [as_group.availability_zone]
+ self.build_list_params(params, as_group.load_balancers,
+ 'LoadBalancerNames')
+ self.build_list_params(params, zones,
+ 'AvailabilityZones')
+ return self.get_object(op, params, Request)
+
+ def create_auto_scaling_group(self, as_group):
+ """
+ Create auto scaling group.
+ """
+ return self._update_group('CreateAutoScalingGroup', as_group)
+
+ def create_launch_configuration(self, launch_config):
+ """
+ Creates a new Launch Configuration.
+
+ :type launch_config: boto.ec2.autoscale.launchconfig.LaunchConfiguration
+ :param launch_config: LaunchConfiguraiton object.
+
+ """
+ params = {
+ 'ImageId' : launch_config.image_id,
+ 'KeyName' : launch_config.key_name,
+ 'LaunchConfigurationName' : launch_config.name,
+ 'InstanceType' : launch_config.instance_type,
+ }
+ if launch_config.user_data:
+ params['UserData'] = launch_config.user_data
+ if launch_config.kernel_id:
+ params['KernelId'] = launch_config.kernel_id
+ if launch_config.ramdisk_id:
+ params['RamdiskId'] = launch_config.ramdisk_id
+ if launch_config.block_device_mappings:
+ self.build_list_params(params, launch_config.block_device_mappings,
+ 'BlockDeviceMappings')
+ self.build_list_params(params, launch_config.security_groups,
+ 'SecurityGroups')
+ return self.get_object('CreateLaunchConfiguration', params,
+ Request, verb='POST')
+
+ def create_trigger(self, trigger):
+ """
+
+ """
+ params = {'TriggerName' : trigger.name,
+ 'AutoScalingGroupName' : trigger.autoscale_group.name,
+ 'MeasureName' : trigger.measure_name,
+ 'Statistic' : trigger.statistic,
+ 'Period' : trigger.period,
+ 'Unit' : trigger.unit,
+ 'LowerThreshold' : trigger.lower_threshold,
+ 'LowerBreachScaleIncrement' : trigger.lower_breach_scale_increment,
+ 'UpperThreshold' : trigger.upper_threshold,
+ 'UpperBreachScaleIncrement' : trigger.upper_breach_scale_increment,
+ 'BreachDuration' : trigger.breach_duration}
+ # dimensions should be a list of tuples
+ dimensions = []
+ for dim in trigger.dimensions:
+ name, value = dim
+ dimensions.append(dict(Name=name, Value=value))
+ self.build_list_params(params, dimensions, 'Dimensions')
+
+ req = self.get_object('CreateOrUpdateScalingTrigger', params,
+ Request)
+ return req
+
+ def get_all_groups(self, names=None):
+ """
+ """
+ params = {}
+ if names:
+ self.build_list_params(params, names, 'AutoScalingGroupNames')
+ return self.get_list('DescribeAutoScalingGroups', params,
+ [('member', AutoScalingGroup)])
+
+ def get_all_launch_configurations(self, names=None):
+ """
+ """
+ params = {}
+ if names:
+ self.build_list_params(params, names, 'LaunchConfigurationNames')
+ return self.get_list('DescribeLaunchConfigurations', params,
+ [('member', LaunchConfiguration)])
+
+ def get_all_activities(self, autoscale_group,
+ activity_ids=None,
+ max_records=100):
+ """
+ Get all activities for the given autoscaling group.
+
+ :type autoscale_group: str or AutoScalingGroup object
+ :param autoscale_group: The auto scaling group to get activities on.
+
+ @max_records: int
+ :param max_records: Maximum amount of activities to return.
+ """
+ name = autoscale_group
+ if isinstance(autoscale_group, AutoScalingGroup):
+ name = autoscale_group.name
+ params = {'AutoScalingGroupName' : name}
+ if activity_ids:
+ self.build_list_params(params, activity_ids, 'ActivityIds')
+ return self.get_list('DescribeScalingActivities', params,
+ [('member', Activity)])
+
+ def get_all_triggers(self, autoscale_group):
+ params = {'AutoScalingGroupName' : autoscale_group}
+ return self.get_list('DescribeTriggers', params,
+ [('member', Trigger)])
+
+ def terminate_instance(self, instance_id, decrement_capacity=True):
+ params = {
+ 'InstanceId' : instance_id,
+ 'ShouldDecrementDesiredCapacity' : decrement_capacity
+ }
+ return self.get_object('TerminateInstanceInAutoScalingGroup', params,
+ Activity)
+
+ def set_instance_health(self, instance_id, health_status,
+ should_respect_grace_period=True):
+ """
+ Explicitly set the health status of an instance.
+
+ :type instance_id: str
+ :param instance_id: The identifier of the EC2 instance.
+
+ :type health_status: str
+ :param health_status: The health status of the instance.
+ "Healthy" means that the instance is
+ healthy and should remain in service.
+ "Unhealthy" means that the instance is
+ unhealthy. Auto Scaling should terminate
+ and replace it.
+
+ :type should_respect_grace_period: bool
+ :param should_respect_grace_period: If True, this call should
+ respect the grace period
+ associated with the group.
+ """
+ params = {'InstanceId' : instance_id,
+ 'HealthStatus' : health_status}
+ if should_respect_grace_period:
+ params['ShouldRespectGracePeriod'] = 'true'
+ else:
+ params['ShouldRespectGracePeriod'] = 'false'
+ return self.get_status('SetInstanceHealth', params)
+
diff --git a/boto/ec2/autoscale/activity.py b/boto/ec2/autoscale/activity.py
new file mode 100644
index 0000000..f895d65
--- /dev/null
+++ b/boto/ec2/autoscale/activity.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+class Activity(object):
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.start_time = None
+ self.activity_id = None
+ self.progress = None
+ self.status_code = None
+ self.cause = None
+ self.description = None
+
+ def __repr__(self):
+ return 'Activity:%s status:%s progress:%s' % (self.description,
+ self.status_code,
+ self.progress)
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'ActivityId':
+ self.activity_id = value
+ elif name == 'StartTime':
+ self.start_time = value
+ elif name == 'Progress':
+ self.progress = value
+ elif name == 'Cause':
+ self.cause = value
+ elif name == 'Description':
+ self.description = value
+ elif name == 'StatusCode':
+ self.status_code = value
+ else:
+ setattr(self, name, value)
+
diff --git a/boto/ec2/autoscale/group.py b/boto/ec2/autoscale/group.py
new file mode 100644
index 0000000..3fa6d68
--- /dev/null
+++ b/boto/ec2/autoscale/group.py
@@ -0,0 +1,189 @@
+# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import weakref
+
+from boto.ec2.elb.listelement import ListElement
+from boto.resultset import ResultSet
+from boto.ec2.autoscale.trigger import Trigger
+from boto.ec2.autoscale.request import Request
+
+class Instance(object):
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.instance_id = ''
+
+ def __repr__(self):
+ return 'Instance:%s' % self.instance_id
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'InstanceId':
+ self.instance_id = value
+ else:
+ setattr(self, name, value)
+
+
+class AutoScalingGroup(object):
+ def __init__(self, connection=None, group_name=None,
+ availability_zone=None, launch_config=None,
+ availability_zones=None,
+ load_balancers=None, cooldown=0,
+ min_size=None, max_size=None):
+ """
+ Creates a new AutoScalingGroup with the specified name.
+
+ You must not have already used up your entire quota of
+ AutoScalingGroups in order for this call to be successful. Once the
+ creation request is completed, the AutoScalingGroup is ready to be
+ used in other calls.
+
+ :type name: str
+ :param name: Name of autoscaling group.
+
+ :type availability_zone: str
+ :param availability_zone: An availability zone. DEPRECATED - use the
+ availability_zones parameter, which expects
+ a list of availability zone
+ strings
+
+ :type availability_zone: list
+ :param availability_zone: List of availability zones.
+
+ :type launch_config: str
+ :param launch_config: Name of launch configuration name.
+
+ :type load_balancers: list
+ :param load_balancers: List of load balancers.
+
+ :type minsize: int
+ :param minsize: Minimum size of group
+
+ :type maxsize: int
+ :param maxsize: Maximum size of group
+
+ :type cooldown: int
+ :param cooldown: Amount of time after a Scaling Activity completes
+ before any further scaling activities can start.
+
+ :rtype: tuple
+ :return: Updated healthcheck for the instances.
+ """
+ self.name = group_name
+ self.connection = connection
+ self.min_size = min_size
+ self.max_size = max_size
+ self.created_time = None
+ self.cooldown = cooldown
+ self.launch_config = launch_config
+ if self.launch_config:
+ self.launch_config_name = self.launch_config.name
+ else:
+ self.launch_config_name = None
+ self.desired_capacity = None
+ lbs = load_balancers or []
+ self.load_balancers = ListElement(lbs)
+ zones = availability_zones or []
+ self.availability_zone = availability_zone
+ self.availability_zones = ListElement(zones)
+ self.instances = None
+
+ def __repr__(self):
+ return 'AutoScalingGroup:%s' % self.name
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Instances':
+ self.instances = ResultSet([('member', Instance)])
+ return self.instances
+ elif name == 'LoadBalancerNames':
+ return self.load_balancers
+ elif name == 'AvailabilityZones':
+ return self.availability_zones
+ else:
+ return
+
+ def endElement(self, name, value, connection):
+ if name == 'MinSize':
+ self.min_size = value
+ elif name == 'CreatedTime':
+ self.created_time = value
+ elif name == 'Cooldown':
+ self.cooldown = value
+ elif name == 'LaunchConfigurationName':
+ self.launch_config_name = value
+ elif name == 'DesiredCapacity':
+ self.desired_capacity = value
+ elif name == 'MaxSize':
+ self.max_size = value
+ elif name == 'AutoScalingGroupName':
+ self.name = value
+ else:
+ setattr(self, name, value)
+
+ def set_capacity(self, capacity):
+ """ Set the desired capacity for the group. """
+ params = {
+ 'AutoScalingGroupName' : self.name,
+ 'DesiredCapacity' : capacity,
+ }
+ req = self.connection.get_object('SetDesiredCapacity', params,
+ Request)
+ self.connection.last_request = req
+ return req
+
+ def update(self):
+ """ Sync local changes with AutoScaling group. """
+ return self.connection._update_group('UpdateAutoScalingGroup', self)
+
+ def shutdown_instances(self):
+ """ Convenience method which shuts down all instances associated with
+ this group.
+ """
+ self.min_size = 0
+ self.max_size = 0
+ self.update()
+
+ def get_all_triggers(self):
+ """ Get all triggers for this auto scaling group. """
+ params = {'AutoScalingGroupName' : self.name}
+ triggers = self.connection.get_list('DescribeTriggers', params,
+ [('member', Trigger)])
+
+ # allow triggers to be able to access the autoscale group
+ for tr in triggers:
+ tr.autoscale_group = weakref.proxy(self)
+
+ return triggers
+
+ def delete(self):
+ """ Delete this auto-scaling group. """
+ params = {'AutoScalingGroupName' : self.name}
+ return self.connection.get_object('DeleteAutoScalingGroup', params,
+ Request)
+
+ def get_activities(self, activity_ids=None, max_records=100):
+ """
+ Get all activies for this group.
+ """
+ return self.connection.get_all_activities(self, activity_ids, max_records)
+
diff --git a/boto/ec2/autoscale/instance.py b/boto/ec2/autoscale/instance.py
new file mode 100644
index 0000000..ffdd5b1
--- /dev/null
+++ b/boto/ec2/autoscale/instance.py
@@ -0,0 +1,46 @@
+# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+class Instance(object):
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.instance_id = ''
+ self.lifecycle_state = None
+ self.availability_zone = ''
+
+ def __repr__(self):
+ return 'Instance:%s' % self.instance_id
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'InstanceId':
+ self.instance_id = value
+ elif name == 'LifecycleState':
+ self.lifecycle_state = value
+ elif name == 'AvailabilityZone':
+ self.availability_zone = value
+ else:
+ setattr(self, name, value)
+
+
diff --git a/boto/ec2/autoscale/launchconfig.py b/boto/ec2/autoscale/launchconfig.py
new file mode 100644
index 0000000..7587cb6
--- /dev/null
+++ b/boto/ec2/autoscale/launchconfig.py
@@ -0,0 +1,98 @@
+# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+from boto.ec2.autoscale.request import Request
+from boto.ec2.elb.listelement import ListElement
+
+
+class LaunchConfiguration(object):
+ def __init__(self, connection=None, name=None, image_id=None,
+ key_name=None, security_groups=None, user_data=None,
+ instance_type='m1.small', kernel_id=None,
+ ramdisk_id=None, block_device_mappings=None):
+ """
+ A launch configuration.
+
+ :type name: str
+ :param name: Name of the launch configuration to create.
+
+ :type image_id: str
+ :param image_id: Unique ID of the Amazon Machine Image (AMI) which was
+ assigned during registration.
+
+ :type key_name: str
+ :param key_name: The name of the EC2 key pair.
+
+ :type security_groups: list
+ :param security_groups: Names of the security groups with which to
+ associate the EC2 instances.
+
+ """
+ self.connection = connection
+ self.name = name
+ self.instance_type = instance_type
+ self.block_device_mappings = block_device_mappings
+ self.key_name = key_name
+ sec_groups = security_groups or []
+ self.security_groups = ListElement(sec_groups)
+ self.image_id = image_id
+ self.ramdisk_id = ramdisk_id
+ self.created_time = None
+ self.kernel_id = kernel_id
+ self.user_data = user_data
+ self.created_time = None
+
+ def __repr__(self):
+ return 'LaunchConfiguration:%s' % self.name
+
+ def startElement(self, name, attrs, connection):
+ if name == 'SecurityGroups':
+ return self.security_groups
+ else:
+ return
+
+ def endElement(self, name, value, connection):
+ if name == 'InstanceType':
+ self.instance_type = value
+ elif name == 'LaunchConfigurationName':
+ self.name = value
+ elif name == 'KeyName':
+ self.key_name = value
+ elif name == 'ImageId':
+ self.image_id = value
+ elif name == 'CreatedTime':
+ self.created_time = value
+ elif name == 'KernelId':
+ self.kernel_id = value
+ elif name == 'RamdiskId':
+ self.ramdisk_id = value
+ elif name == 'UserData':
+ self.user_data = value
+ else:
+ setattr(self, name, value)
+
+ def delete(self):
+ """ Delete this launch configuration. """
+ params = {'LaunchConfigurationName' : self.name}
+ return self.connection.get_object('DeleteLaunchConfiguration', params,
+ Request)
+
diff --git a/boto/ec2/autoscale/request.py b/boto/ec2/autoscale/request.py
new file mode 100644
index 0000000..c066dff
--- /dev/null
+++ b/boto/ec2/autoscale/request.py
@@ -0,0 +1,38 @@
+# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class Request(object):
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.request_id = ''
+
+ def __repr__(self):
+ return 'Request:%s' % self.request_id
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'RequestId':
+ self.request_id = value
+ else:
+ setattr(self, name, value)
+
diff --git a/boto/ec2/autoscale/trigger.py b/boto/ec2/autoscale/trigger.py
new file mode 100644
index 0000000..2840e67
--- /dev/null
+++ b/boto/ec2/autoscale/trigger.py
@@ -0,0 +1,134 @@
+# Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import weakref
+
+from boto.ec2.autoscale.request import Request
+
+
+class Trigger(object):
+ """
+ An auto scaling trigger.
+ """
+
+ def __init__(self, connection=None, name=None, autoscale_group=None,
+ dimensions=None, measure_name=None,
+ statistic=None, unit=None, period=60,
+ lower_threshold=None,
+ lower_breach_scale_increment=None,
+ upper_threshold=None,
+ upper_breach_scale_increment=None,
+ breach_duration=None):
+ """
+ Initialize an auto-scaling trigger object.
+
+ :type name: str
+ :param name: The name for this trigger
+
+ :type autoscale_group: str
+ :param autoscale_group: The name of the AutoScalingGroup that will be
+ associated with the trigger. The AutoScalingGroup
+ that will be affected by the trigger when it is
+ activated.
+
+ :type dimensions: list
+ :param dimensions: List of tuples, i.e.
+ ('ImageId', 'i-13lasde') etc.
+
+ :type measure_name: str
+ :param measure_name: The measure name associated with the metric used by
+ the trigger to determine when to activate, for
+ example, CPU, network I/O, or disk I/O.
+
+ :type statistic: str
+ :param statistic: The particular statistic used by the trigger when
+ fetching metric statistics to examine.
+
+ :type period: int
+ :param period: The period associated with the metric statistics in
+ seconds. Valid Values: 60 or a multiple of 60.
+
+ :type unit: str
+ :param unit: The unit of measurement.
+ """
+ self.name = name
+ self.connection = connection
+ self.dimensions = dimensions
+ self.breach_duration = breach_duration
+ self.upper_breach_scale_increment = upper_breach_scale_increment
+ self.created_time = None
+ self.upper_threshold = upper_threshold
+ self.status = None
+ self.lower_threshold = lower_threshold
+ self.period = period
+ self.lower_breach_scale_increment = lower_breach_scale_increment
+ self.statistic = statistic
+ self.unit = unit
+ self.namespace = None
+ if autoscale_group:
+ self.autoscale_group = weakref.proxy(autoscale_group)
+ else:
+ self.autoscale_group = None
+ self.measure_name = measure_name
+
+ def __repr__(self):
+ return 'Trigger:%s' % (self.name)
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'BreachDuration':
+ self.breach_duration = value
+ elif name == 'TriggerName':
+ self.name = value
+ elif name == 'Period':
+ self.period = value
+ elif name == 'CreatedTime':
+ self.created_time = value
+ elif name == 'Statistic':
+ self.statistic = value
+ elif name == 'Unit':
+ self.unit = value
+ elif name == 'Namespace':
+ self.namespace = value
+ elif name == 'AutoScalingGroupName':
+ self.autoscale_group_name = value
+ elif name == 'MeasureName':
+ self.measure_name = value
+ else:
+ setattr(self, name, value)
+
+ def update(self):
+ """ Write out differences to trigger. """
+ self.connection.create_trigger(self)
+
+ def delete(self):
+ """ Delete this trigger. """
+ params = {
+ 'TriggerName' : self.name,
+ 'AutoScalingGroupName' : self.autoscale_group_name,
+ }
+ req =self.connection.get_object('DeleteTrigger', params,
+ Request)
+ self.connection.last_request = req
+ return req
+
diff --git a/boto/ec2/blockdevicemapping.py b/boto/ec2/blockdevicemapping.py
new file mode 100644
index 0000000..efbc38b
--- /dev/null
+++ b/boto/ec2/blockdevicemapping.py
@@ -0,0 +1,103 @@
+# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+class BlockDeviceType(object):
+
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.ephemeral_name = None
+ self.no_device = False
+ self.volume_id = None
+ self.snapshot_id = None
+ self.status = None
+ self.attach_time = None
+ self.delete_on_termination = False
+ self.size = None
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name =='volumeId':
+ self.volume_id = value
+ elif name == 'virtualName':
+ self.ephemeral_name = value
+ elif name =='NoDevice':
+ self.no_device = (value == 'true')
+ elif name =='snapshotId':
+ self.snapshot_id = value
+ elif name == 'volumeSize':
+ self.size = int(value)
+ elif name == 'status':
+ self.status = value
+ elif name == 'attachTime':
+ self.attach_time = value
+ elif name == 'deleteOnTermination':
+ if value == 'true':
+ self.delete_on_termination = True
+ else:
+ self.delete_on_termination = False
+ else:
+ setattr(self, name, value)
+
+# for backwards compatibility
+EBSBlockDeviceType = BlockDeviceType
+
+class BlockDeviceMapping(dict):
+
+ def __init__(self, connection=None):
+ dict.__init__(self)
+ self.connection = connection
+ self.current_name = None
+ self.current_value = None
+
+ def startElement(self, name, attrs, connection):
+ if name == 'ebs':
+ self.current_value = BlockDeviceType(self)
+ return self.current_value
+
+ def endElement(self, name, value, connection):
+ if name == 'device' or name == 'deviceName':
+ self.current_name = value
+ elif name == 'item':
+ self[self.current_name] = self.current_value
+
+ def build_list_params(self, params, prefix=''):
+ i = 1
+ for dev_name in self:
+ pre = '%sBlockDeviceMapping.%d' % (prefix, i)
+ params['%s.DeviceName' % pre] = dev_name
+ block_dev = self[dev_name]
+ if block_dev.ephemeral_name:
+ params['%s.VirtualName' % pre] = block_dev.ephemeral_name
+ else:
+ if block_dev.no_device:
+ params['%s.Ebs.NoDevice' % pre] = 'true'
+ if block_dev.snapshot_id:
+ params['%s.Ebs.SnapshotId' % pre] = block_dev.snapshot_id
+ if block_dev.size:
+ params['%s.Ebs.VolumeSize' % pre] = block_dev.size
+ if block_dev.delete_on_termination:
+ params['%s.Ebs.DeleteOnTermination' % pre] = 'true'
+ else:
+ params['%s.Ebs.DeleteOnTermination' % pre] = 'false'
+ i += 1
diff --git a/boto/ec2/bundleinstance.py b/boto/ec2/bundleinstance.py
new file mode 100644
index 0000000..9651992
--- /dev/null
+++ b/boto/ec2/bundleinstance.py
@@ -0,0 +1,78 @@
+# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents an EC2 Bundle Task
+"""
+
+from boto.ec2.ec2object import EC2Object
+
+class BundleInstanceTask(EC2Object):
+
+ def __init__(self, connection=None):
+ EC2Object.__init__(self, connection)
+ self.id = None
+ self.instance_id = None
+ self.progress = None
+ self.start_time = None
+ self.state = None
+ self.bucket = None
+ self.prefix = None
+ self.upload_policy = None
+ self.upload_policy_signature = None
+ self.update_time = None
+ self.code = None
+ self.message = None
+
+ def __repr__(self):
+ return 'BundleInstanceTask:%s' % self.id
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'bundleId':
+ self.id = value
+ elif name == 'instanceId':
+ self.instance_id = value
+ elif name == 'progress':
+ self.progress = value
+ elif name == 'startTime':
+ self.start_time = value
+ elif name == 'state':
+ self.state = value
+ elif name == 'bucket':
+ self.bucket = value
+ elif name == 'prefix':
+ self.prefix = value
+ elif name == 'uploadPolicy':
+ self.upload_policy = value
+ elif name == 'uploadPolicySignature':
+ self.upload_policy_signature = value
+ elif name == 'updateTime':
+ self.update_time = value
+ elif name == 'code':
+ self.code = value
+ elif name == 'message':
+ self.message = value
+ else:
+ setattr(self, name, value)
+
diff --git a/boto/ec2/buyreservation.py b/boto/ec2/buyreservation.py
new file mode 100644
index 0000000..fcd8a77
--- /dev/null
+++ b/boto/ec2/buyreservation.py
@@ -0,0 +1,84 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import boto.ec2
+from boto.sdb.db.property import StringProperty, IntegerProperty
+from boto.manage import propget
+
+InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge',
+ 'c1.medium', 'c1.xlarge', 'm2.xlarge',
+ 'm2.2xlarge', 'm2.4xlarge', 'cc1.4xlarge',
+ 't1.micro']
+
+class BuyReservation(object):
+
+ def get_region(self, params):
+ if not params.get('region', None):
+ prop = StringProperty(name='region', verbose_name='EC2 Region',
+ choices=boto.ec2.regions)
+ params['region'] = propget.get(prop, choices=boto.ec2.regions)
+
+ def get_instance_type(self, params):
+ if not params.get('instance_type', None):
+ prop = StringProperty(name='instance_type', verbose_name='Instance Type',
+ choices=InstanceTypes)
+ params['instance_type'] = propget.get(prop)
+
+ def get_quantity(self, params):
+ if not params.get('quantity', None):
+ prop = IntegerProperty(name='quantity', verbose_name='Number of Instances')
+ params['quantity'] = propget.get(prop)
+
+ def get_zone(self, params):
+ if not params.get('zone', None):
+ prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone',
+ choices=self.ec2.get_all_zones)
+ params['zone'] = propget.get(prop)
+
+ def get(self, params):
+ self.get_region(params)
+ self.ec2 = params['region'].connect()
+ self.get_instance_type(params)
+ self.get_zone(params)
+ self.get_quantity(params)
+
+if __name__ == "__main__":
+ obj = BuyReservation()
+ params = {}
+ obj.get(params)
+ offerings = obj.ec2.get_all_reserved_instances_offerings(instance_type=params['instance_type'],
+ availability_zone=params['zone'].name)
+ print '\nThe following Reserved Instances Offerings are available:\n'
+ for offering in offerings:
+ offering.describe()
+ prop = StringProperty(name='offering', verbose_name='Offering',
+ choices=offerings)
+ offering = propget.get(prop)
+ print '\nYou have chosen this offering:'
+ offering.describe()
+ unit_price = float(offering.fixed_price)
+ total_price = unit_price * params['quantity']
+ print '!!! You are about to purchase %d of these offerings for a total of $%.2f !!!' % (params['quantity'], total_price)
+ answer = raw_input('Are you sure you want to do this? If so, enter YES: ')
+ if answer.strip().lower() == 'yes':
+ offering.purchase(params['quantity'])
+ else:
+ print 'Purchase cancelled'
diff --git a/boto/ec2/cloudwatch/__init__.py b/boto/ec2/cloudwatch/__init__.py
new file mode 100644
index 0000000..a02baa3
--- /dev/null
+++ b/boto/ec2/cloudwatch/__init__.py
@@ -0,0 +1,502 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+"""
+This module provides an interface to the Elastic Compute Cloud (EC2)
+CloudWatch service from AWS.
+
+The 5 Minute How-To Guide
+-------------------------
+First, make sure you have something to monitor. You can either create a
+LoadBalancer or enable monitoring on an existing EC2 instance. To enable
+monitoring, you can either call the monitor_instance method on the
+EC2Connection object or call the monitor method on the Instance object.
+
+It takes a while for the monitoring data to start accumulating but once
+it does, you can do this:
+
+>>> import boto
+>>> c = boto.connect_cloudwatch()
+>>> metrics = c.list_metrics()
+>>> metrics
+[Metric:NetworkIn,
+ Metric:NetworkOut,
+ Metric:NetworkOut(InstanceType,m1.small),
+ Metric:NetworkIn(InstanceId,i-e573e68c),
+ Metric:CPUUtilization(InstanceId,i-e573e68c),
+ Metric:DiskWriteBytes(InstanceType,m1.small),
+ Metric:DiskWriteBytes(ImageId,ami-a1ffb63),
+ Metric:NetworkOut(ImageId,ami-a1ffb63),
+ Metric:DiskWriteOps(InstanceType,m1.small),
+ Metric:DiskReadBytes(InstanceType,m1.small),
+ Metric:DiskReadOps(ImageId,ami-a1ffb63),
+ Metric:CPUUtilization(InstanceType,m1.small),
+ Metric:NetworkIn(ImageId,ami-a1ffb63),
+ Metric:DiskReadOps(InstanceType,m1.small),
+ Metric:DiskReadBytes,
+ Metric:CPUUtilization,
+ Metric:DiskWriteBytes(InstanceId,i-e573e68c),
+ Metric:DiskWriteOps(InstanceId,i-e573e68c),
+ Metric:DiskWriteOps,
+ Metric:DiskReadOps,
+ Metric:CPUUtilization(ImageId,ami-a1ffb63),
+ Metric:DiskReadOps(InstanceId,i-e573e68c),
+ Metric:NetworkOut(InstanceId,i-e573e68c),
+ Metric:DiskReadBytes(ImageId,ami-a1ffb63),
+ Metric:DiskReadBytes(InstanceId,i-e573e68c),
+ Metric:DiskWriteBytes,
+ Metric:NetworkIn(InstanceType,m1.small),
+ Metric:DiskWriteOps(ImageId,ami-a1ffb63)]
+
+The list_metrics call will return a list of all of the available metrics
+that you can query against. Each entry in the list is a Metric object.
+As you can see from the list above, some of the metrics are generic metrics
+and some have Dimensions associated with them (e.g. InstanceType=m1.small).
+The Dimension can be used to refine your query. So, for example, I could
+query the metric Metric:CPUUtilization which would create the desired statistic
+by aggregating cpu utilization data across all sources of information available
+or I could refine that by querying the metric
+Metric:CPUUtilization(InstanceId,i-e573e68c) which would use only the data
+associated with the instance identified by the instance ID i-e573e68c.
+
+Because for this example, I'm only monitoring a single instance, the set
+of metrics available to me are fairly limited. If I was monitoring many
+instances, using many different instance types and AMI's and also several
+load balancers, the list of available metrics would grow considerably.
+
+Once you have the list of available metrics, you can actually
+query the CloudWatch system for that metric. Let's choose the CPU utilization
+metric for our instance.
+
+>>> m = metrics[5]
+>>> m
+Metric:CPUUtilization(InstanceId,i-e573e68c)
+
+The Metric object has a query method that lets us actually perform
+the query against the collected data in CloudWatch. To call that,
+we need a start time and end time to control the time span of data
+that we are interested in. For this example, let's say we want the
+data for the previous hour:
+
+>>> import datetime
+>>> end = datetime.datetime.now()
+>>> start = end - datetime.timedelta(hours=1)
+
+We also need to supply the Statistic that we want reported and
+the Units to use for the results. The Statistic can be one of these
+values:
+
+['Minimum', 'Maximum', 'Sum', 'Average', 'SampleCount']
+
+And Units must be one of the following:
+
+['Seconds', 'Percent', 'Bytes', 'Bits', 'Count',
+'Bytes/Second', 'Bits/Second', 'Count/Second']
+
+The query method also takes an optional parameter, period. This
+parameter controls the granularity (in seconds) of the data returned.
+The smallest period is 60 seconds and the value must be a multiple
+of 60 seconds. So, let's ask for the average as a percent:
+
+>>> datapoints = m.query(start, end, 'Average', 'Percent')
+>>> len(datapoints)
+60
+
+Our period was 60 seconds and our duration was one hour so
+we should get 60 data points back and we can see that we did.
+Each element in the datapoints list is a DataPoint object
+which is a simple subclass of a Python dict object. Each
+Datapoint object contains all of the information available
+about that particular data point.
+
+>>> d = datapoints[0]
+>>> d
+{u'Average': 0.0,
+ u'SampleCount': 1.0,
+ u'Timestamp': u'2009-05-21T19:55:00Z',
+ u'Unit': u'Percent'}
+
+My server obviously isn't very busy right now!
+"""
+try:
+ import simplejson as json
+except ImportError:
+ import json
+from boto.connection import AWSQueryConnection
+from boto.ec2.cloudwatch.metric import Metric
+from boto.ec2.cloudwatch.alarm import MetricAlarm, AlarmHistoryItem
+from boto.ec2.cloudwatch.datapoint import Datapoint
+from boto.regioninfo import RegionInfo
+import boto
+
+RegionData = {
+ 'us-east-1' : 'monitoring.us-east-1.amazonaws.com',
+ 'us-west-1' : 'monitoring.us-west-1.amazonaws.com',
+ 'eu-west-1' : 'monitoring.eu-west-1.amazonaws.com',
+ 'ap-southeast-1' : 'monitoring.ap-southeast-1.amazonaws.com'}
+
+def regions():
+ """
+ Get all available regions for the CloudWatch service.
+
+ :rtype: list
+ :return: A list of :class:`boto.RegionInfo` instances
+ """
+ regions = []
+ for region_name in RegionData:
+ region = RegionInfo(name=region_name,
+ endpoint=RegionData[region_name],
+ connection_cls=CloudWatchConnection)
+ regions.append(region)
+ return regions
+
+def connect_to_region(region_name, **kw_params):
+ """
+ Given a valid region name, return a
+ :class:`boto.ec2.cloudwatch.CloudWatchConnection`.
+
+ :param str region_name: The name of the region to connect to.
+
+ :rtype: :class:`boto.ec2.CloudWatchConnection` or ``None``
+ :return: A connection to the given region, or None if an invalid region
+ name is given
+ """
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
+
+
+class CloudWatchConnection(AWSQueryConnection):
+
+ APIVersion = boto.config.get('Boto', 'cloudwatch_version', '2010-08-01')
+ DefaultRegionName = boto.config.get('Boto', 'cloudwatch_region_name', 'us-east-1')
+ DefaultRegionEndpoint = boto.config.get('Boto', 'cloudwatch_region_endpoint',
+ 'monitoring.amazonaws.com')
+
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, debug=0,
+ https_connection_factory=None, region=None, path='/'):
+ """
+ Init method to create a new connection to EC2 Monitoring Service.
+
+ B{Note:} The host argument is overridden by the host specified in the
+ boto configuration file.
+ """
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+ self.region = region
+
+ AWSQueryConnection.__init__(self, aws_access_key_id,
+ aws_secret_access_key,
+ is_secure, port, proxy, proxy_port,
+ proxy_user, proxy_pass,
+ self.region.endpoint, debug,
+ https_connection_factory, path)
+
+ def _required_auth_capability(self):
+ return ['ec2']
+
+ def build_list_params(self, params, items, label):
+ if isinstance(items, str):
+ items = [items]
+ for i in range(1, len(items)+1):
+ params[label % i] = items[i-1]
+
+ def get_metric_statistics(self, period, start_time, end_time, metric_name,
+ namespace, statistics, dimensions=None, unit=None):
+ """
+ Get time-series data for one or more statistics of a given metric.
+
+ :type metric_name: string
+ :param metric_name: CPUUtilization|NetworkIO-in|NetworkIO-out|DiskIO-ALL-read|
+ DiskIO-ALL-write|DiskIO-ALL-read-bytes|DiskIO-ALL-write-bytes
+
+ :rtype: list
+ """
+ params = {'Period' : period,
+ 'MetricName' : metric_name,
+ 'Namespace' : namespace,
+ 'StartTime' : start_time.isoformat(),
+ 'EndTime' : end_time.isoformat()}
+ self.build_list_params(params, statistics, 'Statistics.member.%d')
+ if dimensions:
+ i = 1
+ for name in dimensions:
+ params['Dimensions.member.%d.Name' % i] = name
+ params['Dimensions.member.%d.Value' % i] = dimensions[name]
+ i += 1
+ return self.get_list('GetMetricStatistics', params, [('member', Datapoint)])
+
+ def list_metrics(self, next_token=None):
+ """
+ Returns a list of the valid metrics for which there is recorded data available.
+
+ :type next_token: string
+ :param next_token: A maximum of 500 metrics will be returned at one time.
+ If more results are available, the ResultSet returned
+ will contain a non-Null next_token attribute. Passing
+ that token as a parameter to list_metrics will retrieve
+ the next page of metrics.
+ """
+ params = {}
+ if next_token:
+ params['NextToken'] = next_token
+ return self.get_list('ListMetrics', params, [('member', Metric)])
+
+ def describe_alarms(self, action_prefix=None, alarm_name_prefix=None, alarm_names=None,
+ max_records=None, state_value=None, next_token=None):
+ """
+ Retrieves alarms with the specified names. If no name is specified, all
+ alarms for the user are returned. Alarms can be retrieved by using only
+ a prefix for the alarm name, the alarm state, or a prefix for any
+ action.
+
+ :type action_prefix: string
+ :param action_name: The action name prefix.
+
+ :type alarm_name_prefix: string
+ :param alarm_name_prefix: The alarm name prefix. AlarmNames cannot be specified
+ if this parameter is specified.
+
+ :type alarm_names: list
+ :param alarm_names: A list of alarm names to retrieve information for.
+
+ :type max_records: int
+ :param max_records: The maximum number of alarm descriptions to retrieve.
+
+ :type state_value: string
+ :param state_value: The state value to be used in matching alarms.
+
+ :type next_token: string
+ :param next_token: The token returned by a previous call to indicate that there is more data.
+
+ :rtype list
+ """
+ params = {}
+ if action_prefix:
+ params['ActionPrefix'] = action_prefix
+ if alarm_name_prefix:
+ params['AlarmNamePrefix'] = alarm_name_prefix
+ elif alarm_names:
+ self.build_list_params(params, alarm_names, 'AlarmNames.member.%s')
+ if max_records:
+ params['MaxRecords'] = max_records
+ if next_token:
+ params['NextToken'] = next_token
+ if state_value:
+ params['StateValue'] = state_value
+ return self.get_list('DescribeAlarms', params, [('member', MetricAlarm)])
+
+ def describe_alarm_history(self, alarm_name=None, start_date=None, end_date=None,
+ max_records=None, history_item_type=None, next_token=None):
+ """
+ Retrieves history for the specified alarm. Filter alarms by date range
+ or item type. If an alarm name is not specified, Amazon CloudWatch
+ returns histories for all of the owner's alarms.
+
+ Amazon CloudWatch retains the history of deleted alarms for a period of
+ six weeks. If an alarm has been deleted, its history can still be
+ queried.
+
+ :type alarm_name: string
+ :param alarm_name: The name of the alarm.
+
+ :type start_date: datetime
+ :param start_date: The starting date to retrieve alarm history.
+
+ :type end_date: datetime
+ :param end_date: The starting date to retrieve alarm history.
+
+ :type history_item_type: string
+ :param history_item_type: The type of alarm histories to retreive (ConfigurationUpdate | StateUpdate | Action)
+
+ :type max_records: int
+ :param max_records: The maximum number of alarm descriptions to retrieve.
+
+ :type next_token: string
+ :param next_token: The token returned by a previous call to indicate that there is more data.
+
+ :rtype list
+ """
+ params = {}
+ if alarm_name:
+ params['AlarmName'] = alarm_name
+ if start_date:
+ params['StartDate'] = start_date.isoformat()
+ if end_date:
+ params['EndDate'] = end_date.isoformat()
+ if history_item_type:
+ params['HistoryItemType'] = history_item_type
+ if max_records:
+ params['MaxRecords'] = max_records
+ if next_token:
+ params['NextToken'] = next_token
+ return self.get_list('DescribeAlarmHistory', params, [('member', AlarmHistoryItem)])
+
+ def describe_alarms_for_metric(self, metric_name, namespace, period=None, statistic=None, dimensions=None, unit=None):
+ """
+ Retrieves all alarms for a single metric. Specify a statistic, period,
+ or unit to filter the set of alarms further.
+
+ :type metric_name: string
+ :param metric_name: The name of the metric
+
+ :type namespace: string
+ :param namespace: The namespace of the metric.
+
+ :type period: int
+ :param period: The period in seconds over which the statistic is applied.
+
+ :type statistic: string
+ :param statistic: The statistic for the metric.
+
+ :type dimensions: list
+
+ :type unit: string
+
+ :rtype list
+ """
+ params = {
+ 'MetricName' : metric_name,
+ 'Namespace' : namespace,
+ }
+ if period:
+ params['Period'] = period
+ if statistic:
+ params['Statistic'] = statistic
+ if dimensions:
+ self.build_list_params(params, dimensions, 'Dimensions.member.%s')
+ if unit:
+ params['Unit'] = unit
+ return self.get_list('DescribeAlarmsForMetric', params, [('member', MetricAlarm)])
+
+ def put_metric_alarm(self, alarm):
+ """
+ Creates or updates an alarm and associates it with the specified Amazon
+ CloudWatch metric. Optionally, this operation can associate one or more
+ Amazon Simple Notification Service resources with the alarm.
+
+ When this operation creates an alarm, the alarm state is immediately
+ set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is
+ set appropriately. Any actions associated with the StateValue is then
+ executed.
+
+ When updating an existing alarm, its StateValue is left unchanged.
+
+ :type alarm: boto.ec2.cloudwatch.alarm.MetricAlarm
+ :param alarm: MetricAlarm object.
+ """
+ params = {
+ 'AlarmName' : alarm.name,
+ 'MetricName' : alarm.metric,
+ 'Namespace' : alarm.namespace,
+ 'Statistic' : alarm.statistic,
+ 'ComparisonOperator' : MetricAlarm._cmp_map[alarm.comparison],
+ 'Threshold' : alarm.threshold,
+ 'EvaluationPeriods' : alarm.evaluation_periods,
+ 'Period' : alarm.period,
+ }
+ if alarm.actions_enabled is not None:
+ params['ActionsEnabled'] = alarm.actions_enabled
+ if alarm.alarm_actions:
+ self.build_list_params(params, alarm.alarm_actions, 'AlarmActions.member.%s')
+ if alarm.description:
+ params['AlarmDescription'] = alarm.description
+ if alarm.dimensions:
+ self.build_list_params(params, alarm.dimensions, 'Dimensions.member.%s')
+ if alarm.insufficient_data_actions:
+ self.build_list_params(params, alarm.insufficient_data_actions, 'InsufficientDataActions.member.%s')
+ if alarm.ok_actions:
+ self.build_list_params(params, alarm.ok_actions, 'OKActions.member.%s')
+ if alarm.unit:
+ params['Unit'] = alarm.unit
+ alarm.connection = self
+ return self.get_status('PutMetricAlarm', params)
+ create_alarm = put_metric_alarm
+ update_alarm = put_metric_alarm
+
+ def delete_alarms(self, alarms):
+ """
+ Deletes all specified alarms. In the event of an error, no alarms are deleted.
+
+ :type alarms: list
+ :param alarms: List of alarm names.
+ """
+ params = {}
+ self.build_list_params(params, alarms, 'AlarmNames.member.%s')
+ return self.get_status('DeleteAlarms', params)
+
+ def set_alarm_state(self, alarm_name, state_reason, state_value, state_reason_data=None):
+ """
+ Temporarily sets the state of an alarm. When the updated StateValue
+ differs from the previous value, the action configured for the
+ appropriate state is invoked. This is not a permanent change. The next
+ periodic alarm check (in about a minute) will set the alarm to its
+ actual state.
+
+ :type alarm_name: string
+ :param alarm_name: Descriptive name for alarm.
+
+ :type state_reason: string
+ :param state_reason: Human readable reason.
+
+ :type state_value: string
+ :param state_value: OK | ALARM | INSUFFICIENT_DATA
+
+ :type state_reason_data: string
+ :param state_reason_data: Reason string (will be jsonified).
+ """
+ params = {
+ 'AlarmName' : alarm_name,
+ 'StateReason' : state_reason,
+ 'StateValue' : state_value,
+ }
+ if state_reason_data:
+ params['StateReasonData'] = json.dumps(state_reason_data)
+
+ return self.get_status('SetAlarmState', params)
+
+ def enable_alarm_actions(self, alarm_names):
+ """
+ Enables actions for the specified alarms.
+
+ :type alarms: list
+ :param alarms: List of alarm names.
+ """
+ params = {}
+ self.build_list_params(params, alarm_names, 'AlarmNames.member.%s')
+ return self.get_status('EnableAlarmActions', params)
+
+ def disable_alarm_actions(self, alarm_names):
+ """
+ Disables actions for the specified alarms.
+
+ :type alarms: list
+ :param alarms: List of alarm names.
+ """
+ params = {}
+ self.build_list_params(params, alarm_names, 'AlarmNames.member.%s')
+ return self.get_status('DisableAlarmActions', params)
+
diff --git a/boto/ec2/cloudwatch/alarm.py b/boto/ec2/cloudwatch/alarm.py
new file mode 100644
index 0000000..81c0fc3
--- /dev/null
+++ b/boto/ec2/cloudwatch/alarm.py
@@ -0,0 +1,183 @@
+# Copyright (c) 2010 Reza Lotun http://reza.lotun.name
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from datetime import datetime
+import json
+
+
+class MetricAlarm(object):
+
+ OK = 'OK'
+ ALARM = 'ALARM'
+ INSUFFICIENT_DATA = 'INSUFFICIENT_DATA'
+
+ _cmp_map = {
+ '>=' : 'GreaterThanOrEqualToThreshold',
+ '>' : 'GreaterThanThreshold',
+ '<' : 'LessThanThreshold',
+ '<=' : 'LessThanOrEqualToThreshold',
+ }
+ _rev_cmp_map = dict((v, k) for (k, v) in _cmp_map.iteritems())
+
+ def __init__(self, connection=None, name=None, metric=None,
+ namespace=None, statistic=None, comparison=None, threshold=None,
+ period=None, evaluation_periods=None):
+ """
+ Creates a new Alarm.
+
+ :type name: str
+ :param name: Name of alarm.
+
+ :type metric: str
+ :param metric: Name of alarm's associated metric.
+
+ :type namespace: str
+ :param namespace: The namespace for the alarm's metric.
+
+ :type statistic: str
+ :param statistic: The statistic to apply to the alarm's associated metric. Can
+ be one of 'SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum'
+
+ :type comparison: str
+ :param comparison: Comparison used to compare statistic with threshold. Can be
+ one of '>=', '>', '<', '<='
+
+ :type threshold: float
+ :param threshold: The value against which the specified statistic is compared.
+
+ :type period: int
+ :param period: The period in seconds over which teh specified statistic is applied.
+
+ :type evaluation_periods: int
+ :param evaluation_period: The number of periods over which data is compared to
+ the specified threshold
+ """
+ self.name = name
+ self.connection = connection
+ self.metric = metric
+ self.namespace = namespace
+ self.statistic = statistic
+ self.threshold = float(threshold) if threshold is not None else None
+ self.comparison = self._cmp_map.get(comparison)
+ self.period = int(period) if period is not None else None
+ self.evaluation_periods = int(evaluation_periods) if evaluation_periods is not None else None
+ self.actions_enabled = None
+ self.alarm_actions = []
+ self.alarm_arn = None
+ self.last_updated = None
+ self.description = ''
+ self.dimensions = []
+ self.insufficient_data_actions = []
+ self.ok_actions = []
+ self.state_reason = None
+ self.state_value = None
+ self.unit = None
+
+ def __repr__(self):
+ return 'MetricAlarm:%s[%s(%s) %s %s]' % (self.name, self.metric, self.statistic, self.comparison, self.threshold)
+
+ def startElement(self, name, attrs, connection):
+ return
+
+ def endElement(self, name, value, connection):
+ if name == 'ActionsEnabled':
+ self.actions_enabled = value
+ elif name == 'AlarmArn':
+ self.alarm_arn = value
+ elif name == 'AlarmConfigurationUpdatedTimestamp':
+ self.last_updated = value
+ elif name == 'AlarmDescription':
+ self.description = value
+ elif name == 'AlarmName':
+ self.name = value
+ elif name == 'ComparisonOperator':
+ setattr(self, 'comparison', self._rev_cmp_map[value])
+ elif name == 'EvaluationPeriods':
+ self.evaluation_periods = int(value)
+ elif name == 'MetricName':
+ self.metric = value
+ elif name == 'Namespace':
+ self.namespace = value
+ elif name == 'Period':
+ self.period = int(value)
+ elif name == 'StateReason':
+ self.state_reason = value
+ elif name == 'StateValue':
+ self.state_value = None
+ elif name == 'Statistic':
+ self.statistic = value
+ elif name == 'Threshold':
+ self.threshold = float(value)
+ elif name == 'Unit':
+ self.unit = value
+ else:
+ setattr(self, name, value)
+
+ def set_state(self, value, reason, data=None):
+ """ Temporarily sets the state of an alarm.
+
+ :type value: str
+ :param value: OK | ALARM | INSUFFICIENT_DATA
+
+ :type reason: str
+ :param reason: Reason alarm set (human readable).
+
+ :type data: str
+ :param data: Reason data (will be jsonified).
+ """
+ return self.connection.set_alarm_state(self.name, reason, value, data)
+
+ def update(self):
+ return self.connection.update_alarm(self)
+
+ def enable_actions(self):
+ return self.connection.enable_alarm_actions([self.name])
+
+ def disable_actions(self):
+ return self.connection.disable_alarm_actions([self.name])
+
+ def describe_history(self, start_date=None, end_date=None, max_records=None, history_item_type=None, next_token=None):
+ return self.connection.describe_alarm_history(self.name, start_date, end_date,
+ max_records, history_item_type, next_token)
+
+class AlarmHistoryItem(object):
+ def __init__(self, connection=None):
+ self.connection = connection
+
+ def __repr__(self):
+ return 'AlarmHistory:%s[%s at %s]' % (self.name, self.summary, self.timestamp)
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'AlarmName':
+ self.name = value
+ elif name == 'HistoryData':
+ self.data = json.loads(value)
+ elif name == 'HistoryItemType':
+ self.tem_type = value
+ elif name == 'HistorySummary':
+ self.summary = value
+ elif name == 'Timestamp':
+ self.timestamp = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
+
diff --git a/boto/ec2/cloudwatch/datapoint.py b/boto/ec2/cloudwatch/datapoint.py
new file mode 100644
index 0000000..d4350ce
--- /dev/null
+++ b/boto/ec2/cloudwatch/datapoint.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from datetime import datetime
+
+class Datapoint(dict):
+
+ def __init__(self, connection=None):
+ dict.__init__(self)
+ self.connection = connection
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name in ['Average', 'Maximum', 'Minimum', 'Sum', 'SampleCount']:
+ self[name] = float(value)
+ elif name == 'Timestamp':
+ self[name] = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
+ elif name != 'member':
+ self[name] = value
+
diff --git a/boto/ec2/cloudwatch/metric.py b/boto/ec2/cloudwatch/metric.py
new file mode 100644
index 0000000..cd8c4bc
--- /dev/null
+++ b/boto/ec2/cloudwatch/metric.py
@@ -0,0 +1,80 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+class Dimensions(dict):
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'Name':
+ self._name = value
+ elif name == 'Value':
+ self[self._name] = value
+ elif name != 'Dimensions' and name != 'member':
+ self[name] = value
+
+class Metric(object):
+
+ Statistics = ['Minimum', 'Maximum', 'Sum', 'Average', 'SampleCount']
+ Units = ['Seconds', 'Percent', 'Bytes', 'Bits', 'Count',
+ 'Bytes/Second', 'Bits/Second', 'Count/Second']
+
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.name = None
+ self.namespace = None
+ self.dimensions = None
+
+ def __repr__(self):
+ s = 'Metric:%s' % self.name
+ if self.dimensions:
+ for name,value in self.dimensions.items():
+ s += '(%s,%s)' % (name, value)
+ return s
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Dimensions':
+ self.dimensions = Dimensions()
+ return self.dimensions
+
+ def endElement(self, name, value, connection):
+ if name == 'MetricName':
+ self.name = value
+ elif name == 'Namespace':
+ self.namespace = value
+ else:
+ setattr(self, name, value)
+
+ def query(self, start_time, end_time, statistic, unit=None, period=60):
+ return self.connection.get_metric_statistics(period, start_time, end_time,
+ self.name, self.namespace, [statistic],
+ self.dimensions, unit)
+
+ def describe_alarms(self, period=None, statistic=None, dimensions=None, unit=None):
+ return self.connection.describe_alarms_for_metric(self.name,
+ self.namespace,
+ period,
+ statistic,
+ dimensions,
+ unit)
+
diff --git a/boto/ec2/connection.py b/boto/ec2/connection.py
new file mode 100644
index 0000000..89d1d4e
--- /dev/null
+++ b/boto/ec2/connection.py
@@ -0,0 +1,2287 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents a connection to the EC2 service.
+"""
+
+import base64
+import warnings
+from datetime import datetime
+from datetime import timedelta
+import boto
+from boto.connection import AWSQueryConnection
+from boto.resultset import ResultSet
+from boto.ec2.image import Image, ImageAttribute
+from boto.ec2.instance import Reservation, Instance, ConsoleOutput, InstanceAttribute
+from boto.ec2.keypair import KeyPair
+from boto.ec2.address import Address
+from boto.ec2.volume import Volume
+from boto.ec2.snapshot import Snapshot
+from boto.ec2.snapshot import SnapshotAttribute
+from boto.ec2.zone import Zone
+from boto.ec2.securitygroup import SecurityGroup
+from boto.ec2.regioninfo import RegionInfo
+from boto.ec2.instanceinfo import InstanceInfo
+from boto.ec2.reservedinstance import ReservedInstancesOffering, ReservedInstance
+from boto.ec2.spotinstancerequest import SpotInstanceRequest
+from boto.ec2.spotpricehistory import SpotPriceHistory
+from boto.ec2.spotdatafeedsubscription import SpotDatafeedSubscription
+from boto.ec2.bundleinstance import BundleInstanceTask
+from boto.ec2.placementgroup import PlacementGroup
+from boto.ec2.tag import Tag
+from boto.exception import EC2ResponseError
+
+#boto.set_stream_logger('ec2')
+
+class EC2Connection(AWSQueryConnection):
+
+ APIVersion = boto.config.get('Boto', 'ec2_version', '2010-08-31')
+ DefaultRegionName = boto.config.get('Boto', 'ec2_region_name', 'us-east-1')
+ DefaultRegionEndpoint = boto.config.get('Boto', 'ec2_region_endpoint',
+ 'ec2.amazonaws.com')
+ ResponseError = EC2ResponseError
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, host=None, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, debug=0,
+ https_connection_factory=None, region=None, path='/'):
+ """
+ Init method to create a new connection to EC2.
+
+ B{Note:} The host argument is overridden by the host specified in the
+ boto configuration file.
+ """
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+ self.region = region
+ AWSQueryConnection.__init__(self, aws_access_key_id,
+ aws_secret_access_key,
+ is_secure, port, proxy, proxy_port,
+ proxy_user, proxy_pass,
+ self.region.endpoint, debug,
+ https_connection_factory, path)
+
+ def _required_auth_capability(self):
+ return ['ec2']
+
+ def get_params(self):
+ """
+ Returns a dictionary containing the value of of all of the keyword
+ arguments passed when constructing this connection.
+ """
+ param_names = ['aws_access_key_id', 'aws_secret_access_key', 'is_secure',
+ 'port', 'proxy', 'proxy_port', 'proxy_user', 'proxy_pass',
+ 'debug', 'https_connection_factory']
+ params = {}
+ for name in param_names:
+ params[name] = getattr(self, name)
+ return params
+
+ def build_filter_params(self, params, filters):
+ i = 1
+ for name in filters:
+ aws_name = name.replace('_', '-')
+ params['Filter.%d.Name' % i] = aws_name
+ value = filters[name]
+ if not isinstance(value, list):
+ value = [value]
+ j = 1
+ for v in value:
+ params['Filter.%d.Value.%d' % (i,j)] = v
+ j += 1
+ i += 1
+
+ # Image methods
+
+ def get_all_images(self, image_ids=None, owners=None,
+ executable_by=None, filters=None):
+ """
+ Retrieve all the EC2 images available on your account.
+
+ :type image_ids: list
+ :param image_ids: A list of strings with the image IDs wanted
+
+ :type owners: list
+ :param owners: A list of owner IDs
+
+ :type executable_by: list
+ :param executable_by: Returns AMIs for which the specified
+ user ID has explicit launch permissions
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit
+ the results returned. Filters are provided
+ in the form of a dictionary consisting of
+ filter names as the key and filter values
+ as the value. The set of allowable filter
+ names/values is dependent on the request
+ being performed. Check the EC2 API guide
+ for details.
+
+ :rtype: list
+ :return: A list of :class:`boto.ec2.image.Image`
+ """
+ params = {}
+ if image_ids:
+ self.build_list_params(params, image_ids, 'ImageId')
+ if owners:
+ self.build_list_params(params, owners, 'Owner')
+ if executable_by:
+ self.build_list_params(params, executable_by, 'ExecutableBy')
+ if filters:
+ self.build_filter_params(params, filters)
+ return self.get_list('DescribeImages', params, [('item', Image)], verb='POST')
+
+ def get_all_kernels(self, kernel_ids=None, owners=None):
+ """
+ Retrieve all the EC2 kernels available on your account.
+ Constructs a filter to allow the processing to happen server side.
+
+ :type kernel_ids: list
+ :param kernel_ids: A list of strings with the image IDs wanted
+
+ :type owners: list
+ :param owners: A list of owner IDs
+
+ :rtype: list
+ :return: A list of :class:`boto.ec2.image.Image`
+ """
+ params = {}
+ if kernel_ids:
+ self.build_list_params(params, kernel_ids, 'ImageId')
+ if owners:
+ self.build_list_params(params, owners, 'Owner')
+ filter = {'image-type' : 'kernel'}
+ self.build_filter_params(params, filter)
+ return self.get_list('DescribeImages', params, [('item', Image)], verb='POST')
+
+ def get_all_ramdisks(self, ramdisk_ids=None, owners=None):
+ """
+ Retrieve all the EC2 ramdisks available on your account.
+ Constructs a filter to allow the processing to happen server side.
+
+ :type ramdisk_ids: list
+ :param ramdisk_ids: A list of strings with the image IDs wanted
+
+ :type owners: list
+ :param owners: A list of owner IDs
+
+ :rtype: list
+ :return: A list of :class:`boto.ec2.image.Image`
+ """
+ params = {}
+ if ramdisk_ids:
+ self.build_list_params(params, ramdisk_ids, 'ImageId')
+ if owners:
+ self.build_list_params(params, owners, 'Owner')
+ filter = {'image-type' : 'ramdisk'}
+ self.build_filter_params(params, filter)
+ return self.get_list('DescribeImages', params, [('item', Image)], verb='POST')
+
+ def get_image(self, image_id):
+ """
+ Shortcut method to retrieve a specific image (AMI).
+
+ :type image_id: string
+ :param image_id: the ID of the Image to retrieve
+
+ :rtype: :class:`boto.ec2.image.Image`
+ :return: The EC2 Image specified or None if the image is not found
+ """
+ try:
+ return self.get_all_images(image_ids=[image_id])[0]
+ except IndexError: # None of those images available
+ return None
+
+ def register_image(self, name=None, description=None, image_location=None,
+ architecture=None, kernel_id=None, ramdisk_id=None,
+ root_device_name=None, block_device_map=None):
+ """
+ Register an image.
+
+ :type name: string
+ :param name: The name of the AMI. Valid only for EBS-based images.
+
+ :type description: string
+ :param description: The description of the AMI.
+
+ :type image_location: string
+ :param image_location: Full path to your AMI manifest in Amazon S3 storage.
+ Only used for S3-based AMI's.
+
+ :type architecture: string
+ :param architecture: The architecture of the AMI. Valid choices are:
+ i386 | x86_64
+
+ :type kernel_id: string
+ :param kernel_id: The ID of the kernel with which to launch the instances
+
+ :type root_device_name: string
+ :param root_device_name: The root device name (e.g. /dev/sdh)
+
+ :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
+ :param block_device_map: A BlockDeviceMapping data structure
+ describing the EBS volumes associated
+ with the Image.
+
+ :rtype: string
+ :return: The new image id
+ """
+ params = {}
+ if name:
+ params['Name'] = name
+ if description:
+ params['Description'] = description
+ if architecture:
+ params['Architecture'] = architecture
+ if kernel_id:
+ params['KernelId'] = kernel_id
+ if ramdisk_id:
+ params['RamdiskId'] = ramdisk_id
+ if image_location:
+ params['ImageLocation'] = image_location
+ if root_device_name:
+ params['RootDeviceName'] = root_device_name
+ if block_device_map:
+ block_device_map.build_list_params(params)
+ rs = self.get_object('RegisterImage', params, ResultSet, verb='POST')
+ image_id = getattr(rs, 'imageId', None)
+ return image_id
+
+ def deregister_image(self, image_id):
+ """
+ Unregister an AMI.
+
+ :type image_id: string
+ :param image_id: the ID of the Image to unregister
+
+ :rtype: bool
+ :return: True if successful
+ """
+ return self.get_status('DeregisterImage', {'ImageId':image_id}, verb='POST')
+
+ def create_image(self, instance_id, name, description=None, no_reboot=False):
+ """
+ Will create an AMI from the instance in the running or stopped
+ state.
+
+ :type instance_id: string
+ :param instance_id: the ID of the instance to image.
+
+ :type name: string
+ :param name: The name of the new image
+
+ :type description: string
+ :param description: An optional human-readable string describing
+ the contents and purpose of the AMI.
+
+ :type no_reboot: bool
+ :param no_reboot: An optional flag indicating that the bundling process
+ should not attempt to shutdown the instance before
+ bundling. If this flag is True, the responsibility
+ of maintaining file system integrity is left to the
+ owner of the instance.
+
+ :rtype: string
+ :return: The new image id
+ """
+ params = {'InstanceId' : instance_id,
+ 'Name' : name}
+ if description:
+ params['Description'] = description
+ if no_reboot:
+ params['NoReboot'] = 'true'
+ img = self.get_object('CreateImage', params, Image, verb='POST')
+ return img.id
+
+ # ImageAttribute methods
+
+ def get_image_attribute(self, image_id, attribute='launchPermission'):
+ """
+ Gets an attribute from an image.
+
+ :type image_id: string
+ :param image_id: The Amazon image id for which you want info about
+
+ :type attribute: string
+ :param attribute: The attribute you need information about.
+ Valid choices are:
+ * launchPermission
+ * productCodes
+ * blockDeviceMapping
+
+ :rtype: :class:`boto.ec2.image.ImageAttribute`
+ :return: An ImageAttribute object representing the value of the
+ attribute requested
+ """
+ params = {'ImageId' : image_id,
+ 'Attribute' : attribute}
+ return self.get_object('DescribeImageAttribute', params, ImageAttribute, verb='POST')
+
+ def modify_image_attribute(self, image_id, attribute='launchPermission',
+ operation='add', user_ids=None, groups=None,
+ product_codes=None):
+ """
+ Changes an attribute of an image.
+
+ :type image_id: string
+ :param image_id: The image id you wish to change
+
+ :type attribute: string
+ :param attribute: The attribute you wish to change
+
+ :type operation: string
+ :param operation: Either add or remove (this is required for changing
+ launchPermissions)
+
+ :type user_ids: list
+ :param user_ids: The Amazon IDs of users to add/remove attributes
+
+ :type groups: list
+ :param groups: The groups to add/remove attributes
+
+ :type product_codes: list
+ :param product_codes: Amazon DevPay product code. Currently only one
+ product code can be associated with an AMI. Once
+ set, the product code cannot be changed or reset.
+ """
+ params = {'ImageId' : image_id,
+ 'Attribute' : attribute,
+ 'OperationType' : operation}
+ if user_ids:
+ self.build_list_params(params, user_ids, 'UserId')
+ if groups:
+ self.build_list_params(params, groups, 'UserGroup')
+ if product_codes:
+ self.build_list_params(params, product_codes, 'ProductCode')
+ return self.get_status('ModifyImageAttribute', params, verb='POST')
+
+ def reset_image_attribute(self, image_id, attribute='launchPermission'):
+ """
+ Resets an attribute of an AMI to its default value.
+
+ :type image_id: string
+ :param image_id: ID of the AMI for which an attribute will be described
+
+ :type attribute: string
+ :param attribute: The attribute to reset
+
+ :rtype: bool
+ :return: Whether the operation succeeded or not
+ """
+ params = {'ImageId' : image_id,
+ 'Attribute' : attribute}
+ return self.get_status('ResetImageAttribute', params, verb='POST')
+
+ # Instance methods
+
+ def get_all_instances(self, instance_ids=None, filters=None):
+ """
+ Retrieve all the instances associated with your account.
+
+ :type instance_ids: list
+ :param instance_ids: A list of strings of instance IDs
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit
+ the results returned. Filters are provided
+ in the form of a dictionary consisting of
+ filter names as the key and filter values
+ as the value. The set of allowable filter
+ names/values is dependent on the request
+ being performed. Check the EC2 API guide
+ for details.
+
+ :rtype: list
+ :return: A list of :class:`boto.ec2.instance.Reservation`
+ """
+ params = {}
+ if instance_ids:
+ self.build_list_params(params, instance_ids, 'InstanceId')
+ if filters:
+ self.build_filter_params(params, filters)
+ return self.get_list('DescribeInstances', params,
+ [('item', Reservation)], verb='POST')
+
+ def run_instances(self, image_id, min_count=1, max_count=1,
+ key_name=None, security_groups=None,
+ user_data=None, addressing_type=None,
+ instance_type='m1.small', placement=None,
+ kernel_id=None, ramdisk_id=None,
+ monitoring_enabled=False, subnet_id=None,
+ block_device_map=None,
+ disable_api_termination=False,
+ instance_initiated_shutdown_behavior=None,
+ private_ip_address=None,
+ placement_group=None, client_token=None):
+ """
+ Runs an image on EC2.
+
+ :type image_id: string
+ :param image_id: The ID of the image to run
+
+ :type min_count: int
+ :param min_count: The minimum number of instances to launch
+
+ :type max_count: int
+ :param max_count: The maximum number of instances to launch
+
+ :type key_name: string
+ :param key_name: The name of the key pair with which to launch instances
+
+ :type security_groups: list of strings
+ :param security_groups: The names of the security groups with which to
+ associate instances
+
+ :type user_data: string
+ :param user_data: The user data passed to the launched instances
+
+ :type instance_type: string
+ :param instance_type: The type of instance to run:
+
+ * m1.small
+ * m1.large
+ * m1.xlarge
+ * c1.medium
+ * c1.xlarge
+ * m2.xlarge
+ * m2.2xlarge
+ * m2.4xlarge
+ * cc1.4xlarge
+ * t1.micro
+
+ :type placement: string
+ :param placement: The availability zone in which to launch the instances
+
+ :type kernel_id: string
+ :param kernel_id: The ID of the kernel with which to launch the
+ instances
+
+ :type ramdisk_id: string
+ :param ramdisk_id: The ID of the RAM disk with which to launch the
+ instances
+
+ :type monitoring_enabled: bool
+ :param monitoring_enabled: Enable CloudWatch monitoring on the instance.
+
+ :type subnet_id: string
+ :param subnet_id: The subnet ID within which to launch the instances
+ for VPC.
+
+ :type private_ip_address: string
+ :param private_ip_address: If you're using VPC, you can optionally use
+ this parameter to assign the instance a
+ specific available IP address from the
+ subnet (e.g., 10.0.0.25).
+
+ :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
+ :param block_device_map: A BlockDeviceMapping data structure
+ describing the EBS volumes associated
+ with the Image.
+
+ :type disable_api_termination: bool
+ :param disable_api_termination: If True, the instances will be locked
+ and will not be able to be terminated
+ via the API.
+
+ :type instance_initiated_shutdown_behavior: string
+ :param instance_initiated_shutdown_behavior: Specifies whether the
+ instance's EBS volumes are
+ stopped (i.e. detached) or
+ terminated (i.e. deleted)
+ when the instance is
+ shutdown by the
+ owner. Valid values are:
+
+ * stop
+ * terminate
+
+ :type placement_group: string
+ :param placement_group: If specified, this is the name of the placement
+ group in which the instance(s) will be launched.
+
+ :type client_token: string
+ :param client_token: Unique, case-sensitive identifier you provide
+ to ensure idempotency of the request.
+ Maximum 64 ASCII characters
+
+ :rtype: Reservation
+ :return: The :class:`boto.ec2.instance.Reservation` associated with
+ the request for machines
+ """
+ params = {'ImageId':image_id,
+ 'MinCount':min_count,
+ 'MaxCount': max_count}
+ if key_name:
+ params['KeyName'] = key_name
+ if security_groups:
+ l = []
+ for group in security_groups:
+ if isinstance(group, SecurityGroup):
+ l.append(group.name)
+ else:
+ l.append(group)
+ self.build_list_params(params, l, 'SecurityGroup')
+ if user_data:
+ params['UserData'] = base64.b64encode(user_data)
+ if addressing_type:
+ params['AddressingType'] = addressing_type
+ if instance_type:
+ params['InstanceType'] = instance_type
+ if placement:
+ params['Placement.AvailabilityZone'] = placement
+ if placement_group:
+ params['Placement.GroupName'] = placement_group
+ if kernel_id:
+ params['KernelId'] = kernel_id
+ if ramdisk_id:
+ params['RamdiskId'] = ramdisk_id
+ if monitoring_enabled:
+ params['Monitoring.Enabled'] = 'true'
+ if subnet_id:
+ params['SubnetId'] = subnet_id
+ if private_ip_address:
+ params['PrivateIpAddress'] = private_ip_address
+ if block_device_map:
+ block_device_map.build_list_params(params)
+ if disable_api_termination:
+ params['DisableApiTermination'] = 'true'
+ if instance_initiated_shutdown_behavior:
+ val = instance_initiated_shutdown_behavior
+ params['InstanceInitiatedShutdownBehavior'] = val
+ if client_token:
+ params['ClientToken'] = client_token
+ return self.get_object('RunInstances', params, Reservation, verb='POST')
+
+ def terminate_instances(self, instance_ids=None):
+ """
+ Terminate the instances specified
+
+ :type instance_ids: list
+ :param instance_ids: A list of strings of the Instance IDs to terminate
+
+ :rtype: list
+ :return: A list of the instances terminated
+ """
+ params = {}
+ if instance_ids:
+ self.build_list_params(params, instance_ids, 'InstanceId')
+ return self.get_list('TerminateInstances', params, [('item', Instance)], verb='POST')
+
+ def stop_instances(self, instance_ids=None, force=False):
+ """
+ Stop the instances specified
+
+ :type instance_ids: list
+ :param instance_ids: A list of strings of the Instance IDs to stop
+
+ :type force: bool
+ :param force: Forces the instance to stop
+
+ :rtype: list
+ :return: A list of the instances stopped
+ """
+ params = {}
+ if force:
+ params['Force'] = 'true'
+ if instance_ids:
+ self.build_list_params(params, instance_ids, 'InstanceId')
+ return self.get_list('StopInstances', params, [('item', Instance)], verb='POST')
+
+ def start_instances(self, instance_ids=None):
+ """
+ Start the instances specified
+
+ :type instance_ids: list
+ :param instance_ids: A list of strings of the Instance IDs to start
+
+ :rtype: list
+ :return: A list of the instances started
+ """
+ params = {}
+ if instance_ids:
+ self.build_list_params(params, instance_ids, 'InstanceId')
+ return self.get_list('StartInstances', params, [('item', Instance)], verb='POST')
+
+ def get_console_output(self, instance_id):
+ """
+ Retrieves the console output for the specified instance.
+
+ :type instance_id: string
+ :param instance_id: The instance ID of a running instance on the cloud.
+
+ :rtype: :class:`boto.ec2.instance.ConsoleOutput`
+ :return: The console output as a ConsoleOutput object
+ """
+ params = {}
+ self.build_list_params(params, [instance_id], 'InstanceId')
+ return self.get_object('GetConsoleOutput', params, ConsoleOutput, verb='POST')
+
+ def reboot_instances(self, instance_ids=None):
+ """
+ Reboot the specified instances.
+
+ :type instance_ids: list
+ :param instance_ids: The instances to terminate and reboot
+ """
+ params = {}
+ if instance_ids:
+ self.build_list_params(params, instance_ids, 'InstanceId')
+ return self.get_status('RebootInstances', params)
+
+ def confirm_product_instance(self, product_code, instance_id):
+ params = {'ProductCode' : product_code,
+ 'InstanceId' : instance_id}
+ rs = self.get_object('ConfirmProductInstance', params, ResultSet, verb='POST')
+ return (rs.status, rs.ownerId)
+
+ # InstanceAttribute methods
+
+ def get_instance_attribute(self, instance_id, attribute):
+ """
+ Gets an attribute from an instance.
+
+ :type instance_id: string
+ :param instance_id: The Amazon id of the instance
+
+ :type attribute: string
+ :param attribute: The attribute you need information about
+ Valid choices are:
+
+ * instanceType|kernel|ramdisk|userData|
+ * disableApiTermination|
+ * instanceInitiatedShutdownBehavior|
+ * rootDeviceName|blockDeviceMapping
+
+ :rtype: :class:`boto.ec2.image.InstanceAttribute`
+ :return: An InstanceAttribute object representing the value of the
+ attribute requested
+ """
+ params = {'InstanceId' : instance_id}
+ if attribute:
+ params['Attribute'] = attribute
+ return self.get_object('DescribeInstanceAttribute', params,
+ InstanceAttribute, verb='POST')
+
+ def modify_instance_attribute(self, instance_id, attribute, value):
+ """
+ Changes an attribute of an instance
+
+ :type instance_id: string
+ :param instance_id: The instance id you wish to change
+
+ :type attribute: string
+ :param attribute: The attribute you wish to change.
+
+ * AttributeName - Expected value (default)
+ * instanceType - A valid instance type (m1.small)
+ * kernel - Kernel ID (None)
+ * ramdisk - Ramdisk ID (None)
+ * userData - Base64 encoded String (None)
+ * disableApiTermination - Boolean (true)
+ * instanceInitiatedShutdownBehavior - stop|terminate
+ * rootDeviceName - device name (None)
+
+ :type value: string
+ :param value: The new value for the attribute
+
+ :rtype: bool
+ :return: Whether the operation succeeded or not
+ """
+ # Allow a bool to be passed in for value of disableApiTermination
+ if attribute == 'disableApiTermination':
+ if isinstance(value, bool):
+ if value:
+ value = 'true'
+ else:
+ value = 'false'
+ params = {'InstanceId' : instance_id,
+ 'Attribute' : attribute,
+ 'Value' : value}
+ return self.get_status('ModifyInstanceAttribute', params, verb='POST')
+
+ def reset_instance_attribute(self, instance_id, attribute):
+ """
+ Resets an attribute of an instance to its default value.
+
+ :type instance_id: string
+ :param instance_id: ID of the instance
+
+ :type attribute: string
+ :param attribute: The attribute to reset. Valid values are:
+ kernel|ramdisk
+
+ :rtype: bool
+ :return: Whether the operation succeeded or not
+ """
+ params = {'InstanceId' : instance_id,
+ 'Attribute' : attribute}
+ return self.get_status('ResetInstanceAttribute', params, verb='POST')
+
+ # Spot Instances
+
+ def get_all_spot_instance_requests(self, request_ids=None,
+ filters=None):
+ """
+ Retrieve all the spot instances requests associated with your account.
+
+ :type request_ids: list
+ :param request_ids: A list of strings of spot instance request IDs
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit
+ the results returned. Filters are provided
+ in the form of a dictionary consisting of
+ filter names as the key and filter values
+ as the value. The set of allowable filter
+ names/values is dependent on the request
+ being performed. Check the EC2 API guide
+ for details.
+
+ :rtype: list
+ :return: A list of
+ :class:`boto.ec2.spotinstancerequest.SpotInstanceRequest`
+ """
+ params = {}
+ if request_ids:
+ self.build_list_params(params, request_ids, 'SpotInstanceRequestId')
+ if filters:
+ self.build_filter_params(params, filters)
+ return self.get_list('DescribeSpotInstanceRequests', params,
+ [('item', SpotInstanceRequest)], verb='POST')
+
+ def get_spot_price_history(self, start_time=None, end_time=None,
+ instance_type=None, product_description=None):
+ """
+ Retrieve the recent history of spot instances pricing.
+
+ :type start_time: str
+ :param start_time: An indication of how far back to provide price
+ changes for. An ISO8601 DateTime string.
+
+ :type end_time: str
+ :param end_time: An indication of how far forward to provide price
+ changes for. An ISO8601 DateTime string.
+
+ :type instance_type: str
+ :param instance_type: Filter responses to a particular instance type.
+
+ :type product_description: str
+ :param product_descripton: Filter responses to a particular platform.
+ Valid values are currently: Linux
+
+ :rtype: list
+ :return: A list tuples containing price and timestamp.
+ """
+ params = {}
+ if start_time:
+ params['StartTime'] = start_time
+ if end_time:
+ params['EndTime'] = end_time
+ if instance_type:
+ params['InstanceType'] = instance_type
+ if product_description:
+ params['ProductDescription'] = product_description
+ return self.get_list('DescribeSpotPriceHistory', params,
+ [('item', SpotPriceHistory)], verb='POST')
+
+ def request_spot_instances(self, price, image_id, count=1, type='one-time',
+ valid_from=None, valid_until=None,
+ launch_group=None, availability_zone_group=None,
+ key_name=None, security_groups=None,
+ user_data=None, addressing_type=None,
+ instance_type='m1.small', placement=None,
+ kernel_id=None, ramdisk_id=None,
+ monitoring_enabled=False, subnet_id=None,
+ block_device_map=None):
+ """
+ Request instances on the spot market at a particular price.
+
+ :type price: str
+ :param price: The maximum price of your bid
+
+ :type image_id: string
+ :param image_id: The ID of the image to run
+
+ :type count: int
+ :param count: The of instances to requested
+
+ :type type: str
+ :param type: Type of request. Can be 'one-time' or 'persistent'.
+ Default is one-time.
+
+ :type valid_from: str
+ :param valid_from: Start date of the request. An ISO8601 time string.
+
+ :type valid_until: str
+ :param valid_until: End date of the request. An ISO8601 time string.
+
+ :type launch_group: str
+ :param launch_group: If supplied, all requests will be fulfilled
+ as a group.
+
+ :type availability_zone_group: str
+ :param availability_zone_group: If supplied, all requests will be
+ fulfilled within a single
+ availability zone.
+
+ :type key_name: string
+ :param key_name: The name of the key pair with which to launch instances
+
+ :type security_groups: list of strings
+ :param security_groups: The names of the security groups with which to
+ associate instances
+
+ :type user_data: string
+ :param user_data: The user data passed to the launched instances
+
+ :type instance_type: string
+ :param instance_type: The type of instance to run:
+
+ * m1.small
+ * m1.large
+ * m1.xlarge
+ * c1.medium
+ * c1.xlarge
+ * m2.xlarge
+ * m2.2xlarge
+ * m2.4xlarge
+ * cc1.4xlarge
+ * t1.micro
+
+ :type placement: string
+ :param placement: The availability zone in which to launch the instances
+
+ :type kernel_id: string
+ :param kernel_id: The ID of the kernel with which to launch the
+ instances
+
+ :type ramdisk_id: string
+ :param ramdisk_id: The ID of the RAM disk with which to launch the
+ instances
+
+ :type monitoring_enabled: bool
+ :param monitoring_enabled: Enable CloudWatch monitoring on the instance.
+
+ :type subnet_id: string
+ :param subnet_id: The subnet ID within which to launch the instances
+ for VPC.
+
+ :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
+ :param block_device_map: A BlockDeviceMapping data structure
+ describing the EBS volumes associated
+ with the Image.
+
+ :rtype: Reservation
+ :return: The :class:`boto.ec2.spotinstancerequest.SpotInstanceRequest`
+ associated with the request for machines
+ """
+ params = {'LaunchSpecification.ImageId':image_id,
+ 'Type' : type,
+ 'SpotPrice' : price}
+ if count:
+ params['InstanceCount'] = count
+ if valid_from:
+ params['ValidFrom'] = valid_from
+ if valid_until:
+ params['ValidUntil'] = valid_until
+ if launch_group:
+ params['LaunchGroup'] = launch_group
+ if availability_zone_group:
+ params['AvailabilityZoneGroup'] = availability_zone_group
+ if key_name:
+ params['LaunchSpecification.KeyName'] = key_name
+ if security_groups:
+ l = []
+ for group in security_groups:
+ if isinstance(group, SecurityGroup):
+ l.append(group.name)
+ else:
+ l.append(group)
+ self.build_list_params(params, l,
+ 'LaunchSpecification.SecurityGroup')
+ if user_data:
+ params['LaunchSpecification.UserData'] = base64.b64encode(user_data)
+ if addressing_type:
+ params['LaunchSpecification.AddressingType'] = addressing_type
+ if instance_type:
+ params['LaunchSpecification.InstanceType'] = instance_type
+ if placement:
+ params['LaunchSpecification.Placement.AvailabilityZone'] = placement
+ if kernel_id:
+ params['LaunchSpecification.KernelId'] = kernel_id
+ if ramdisk_id:
+ params['LaunchSpecification.RamdiskId'] = ramdisk_id
+ if monitoring_enabled:
+ params['LaunchSpecification.Monitoring.Enabled'] = 'true'
+ if subnet_id:
+ params['LaunchSpecification.SubnetId'] = subnet_id
+ if block_device_map:
+ block_device_map.build_list_params(params, 'LaunchSpecification.')
+ return self.get_list('RequestSpotInstances', params,
+ [('item', SpotInstanceRequest)],
+ verb='POST')
+
+
+ def cancel_spot_instance_requests(self, request_ids):
+ """
+ Cancel the specified Spot Instance Requests.
+
+ :type request_ids: list
+ :param request_ids: A list of strings of the Request IDs to terminate
+
+ :rtype: list
+ :return: A list of the instances terminated
+ """
+ params = {}
+ if request_ids:
+ self.build_list_params(params, request_ids, 'SpotInstanceRequestId')
+ return self.get_list('CancelSpotInstanceRequests', params,
+ [('item', Instance)], verb='POST')
+
+ def get_spot_datafeed_subscription(self):
+ """
+ Return the current spot instance data feed subscription
+ associated with this account, if any.
+
+ :rtype: :class:`boto.ec2.spotdatafeedsubscription.SpotDatafeedSubscription`
+ :return: The datafeed subscription object or None
+ """
+ return self.get_object('DescribeSpotDatafeedSubscription',
+ None, SpotDatafeedSubscription, verb='POST')
+
+ def create_spot_datafeed_subscription(self, bucket, prefix):
+ """
+ Create a spot instance datafeed subscription for this account.
+
+ :type bucket: str or unicode
+ :param bucket: The name of the bucket where spot instance data
+ will be written. The account issuing this request
+ must have FULL_CONTROL access to the bucket
+ specified in the request.
+
+ :type prefix: str or unicode
+ :param prefix: An optional prefix that will be pre-pended to all
+ data files written to the bucket.
+
+ :rtype: :class:`boto.ec2.spotdatafeedsubscription.SpotDatafeedSubscription`
+ :return: The datafeed subscription object or None
+ """
+ params = {'Bucket' : bucket}
+ if prefix:
+ params['Prefix'] = prefix
+ return self.get_object('CreateSpotDatafeedSubscription',
+ params, SpotDatafeedSubscription, verb='POST')
+
+ def delete_spot_datafeed_subscription(self):
+ """
+ Delete the current spot instance data feed subscription
+ associated with this account
+
+ :rtype: bool
+ :return: True if successful
+ """
+ return self.get_status('DeleteSpotDatafeedSubscription', None, verb='POST')
+
+ # Zone methods
+
+ def get_all_zones(self, zones=None, filters=None):
+ """
+ Get all Availability Zones associated with the current region.
+
+ :type zones: list
+ :param zones: Optional list of zones. If this list is present,
+ only the Zones associated with these zone names
+ will be returned.
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit
+ the results returned. Filters are provided
+ in the form of a dictionary consisting of
+ filter names as the key and filter values
+ as the value. The set of allowable filter
+ names/values is dependent on the request
+ being performed. Check the EC2 API guide
+ for details.
+
+ :rtype: list of :class:`boto.ec2.zone.Zone`
+ :return: The requested Zone objects
+ """
+ params = {}
+ if zones:
+ self.build_list_params(params, zones, 'ZoneName')
+ if filters:
+ self.build_filter_params(params, filters)
+ return self.get_list('DescribeAvailabilityZones', params, [('item', Zone)], verb='POST')
+
+ # Address methods
+
+ def get_all_addresses(self, addresses=None, filters=None):
+ """
+ Get all EIP's associated with the current credentials.
+
+ :type addresses: list
+ :param addresses: Optional list of addresses. If this list is present,
+ only the Addresses associated with these addresses
+ will be returned.
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit
+ the results returned. Filters are provided
+ in the form of a dictionary consisting of
+ filter names as the key and filter values
+ as the value. The set of allowable filter
+ names/values is dependent on the request
+ being performed. Check the EC2 API guide
+ for details.
+
+ :rtype: list of :class:`boto.ec2.address.Address`
+ :return: The requested Address objects
+ """
+ params = {}
+ if addresses:
+ self.build_list_params(params, addresses, 'PublicIp')
+ if filters:
+ self.build_filter_params(params, filters)
+ return self.get_list('DescribeAddresses', params, [('item', Address)], verb='POST')
+
+ def allocate_address(self):
+ """
+ Allocate a new Elastic IP address and associate it with your account.
+
+ :rtype: :class:`boto.ec2.address.Address`
+ :return: The newly allocated Address
+ """
+ return self.get_object('AllocateAddress', {}, Address, verb='POST')
+
+ def associate_address(self, instance_id, public_ip):
+ """
+ Associate an Elastic IP address with a currently running instance.
+
+ :type instance_id: string
+ :param instance_id: The ID of the instance
+
+ :type public_ip: string
+ :param public_ip: The public IP address
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'InstanceId' : instance_id, 'PublicIp' : public_ip}
+ return self.get_status('AssociateAddress', params, verb='POST')
+
+ def disassociate_address(self, public_ip):
+ """
+ Disassociate an Elastic IP address from a currently running instance.
+
+ :type public_ip: string
+ :param public_ip: The public IP address
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'PublicIp' : public_ip}
+ return self.get_status('DisassociateAddress', params, verb='POST')
+
+ def release_address(self, public_ip):
+ """
+ Free up an Elastic IP address
+
+ :type public_ip: string
+ :param public_ip: The public IP address
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'PublicIp' : public_ip}
+ return self.get_status('ReleaseAddress', params, verb='POST')
+
+ # Volume methods
+
+ def get_all_volumes(self, volume_ids=None, filters=None):
+ """
+ Get all Volumes associated with the current credentials.
+
+ :type volume_ids: list
+ :param volume_ids: Optional list of volume ids. If this list is present,
+ only the volumes associated with these volume ids
+ will be returned.
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit
+ the results returned. Filters are provided
+ in the form of a dictionary consisting of
+ filter names as the key and filter values
+ as the value. The set of allowable filter
+ names/values is dependent on the request
+ being performed. Check the EC2 API guide
+ for details.
+
+ :rtype: list of :class:`boto.ec2.volume.Volume`
+ :return: The requested Volume objects
+ """
+ params = {}
+ if volume_ids:
+ self.build_list_params(params, volume_ids, 'VolumeId')
+ if filters:
+ self.build_filter_params(params, filters)
+ return self.get_list('DescribeVolumes', params, [('item', Volume)], verb='POST')
+
+ def create_volume(self, size, zone, snapshot=None):
+ """
+ Create a new EBS Volume.
+
+ :type size: int
+ :param size: The size of the new volume, in GiB
+
+ :type zone: string or :class:`boto.ec2.zone.Zone`
+ :param zone: The availability zone in which the Volume will be created.
+
+ :type snapshot: string or :class:`boto.ec2.snapshot.Snapshot`
+ :param snapshot: The snapshot from which the new Volume will be created.
+ """
+ if isinstance(zone, Zone):
+ zone = zone.name
+ params = {'AvailabilityZone' : zone}
+ if size:
+ params['Size'] = size
+ if snapshot:
+ if isinstance(snapshot, Snapshot):
+ snapshot = snapshot.id
+ params['SnapshotId'] = snapshot
+ return self.get_object('CreateVolume', params, Volume, verb='POST')
+
+ def delete_volume(self, volume_id):
+ """
+ Delete an EBS volume.
+
+ :type volume_id: str
+ :param volume_id: The ID of the volume to be delete.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'VolumeId': volume_id}
+ return self.get_status('DeleteVolume', params, verb='POST')
+
+ def attach_volume(self, volume_id, instance_id, device):
+ """
+ Attach an EBS volume to an EC2 instance.
+
+ :type volume_id: str
+ :param volume_id: The ID of the EBS volume to be attached.
+
+ :type instance_id: str
+ :param instance_id: The ID of the EC2 instance to which it will
+ be attached.
+
+ :type device: str
+ :param device: The device on the instance through which the
+ volume will be exposted (e.g. /dev/sdh)
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'InstanceId' : instance_id,
+ 'VolumeId' : volume_id,
+ 'Device' : device}
+ return self.get_status('AttachVolume', params, verb='POST')
+
+ def detach_volume(self, volume_id, instance_id=None,
+ device=None, force=False):
+ """
+ Detach an EBS volume from an EC2 instance.
+
+ :type volume_id: str
+ :param volume_id: The ID of the EBS volume to be attached.
+
+ :type instance_id: str
+ :param instance_id: The ID of the EC2 instance from which it will
+ be detached.
+
+ :type device: str
+ :param device: The device on the instance through which the
+ volume is exposted (e.g. /dev/sdh)
+
+ :type force: bool
+ :param force: Forces detachment if the previous detachment attempt did
+ not occur cleanly. This option can lead to data loss or
+ a corrupted file system. Use this option only as a last
+ resort to detach a volume from a failed instance. The
+ instance will not have an opportunity to flush file system
+ caches nor file system meta data. If you use this option,
+ you must perform file system check and repair procedures.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'VolumeId' : volume_id}
+ if instance_id:
+ params['InstanceId'] = instance_id
+ if device:
+ params['Device'] = device
+ if force:
+ params['Force'] = 'true'
+ return self.get_status('DetachVolume', params, verb='POST')
+
+ # Snapshot methods
+
+ def get_all_snapshots(self, snapshot_ids=None,
+ owner=None, restorable_by=None,
+ filters=None):
+ """
+ Get all EBS Snapshots associated with the current credentials.
+
+ :type snapshot_ids: list
+ :param snapshot_ids: Optional list of snapshot ids. If this list is
+ present, only the Snapshots associated with
+ these snapshot ids will be returned.
+
+ :type owner: str
+ :param owner: If present, only the snapshots owned by the specified user
+ will be returned. Valid values are:
+
+ * self
+ * amazon
+ * AWS Account ID
+
+ :type restorable_by: str
+ :param restorable_by: If present, only the snapshots that are restorable
+ by the specified account id will be returned.
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit
+ the results returned. Filters are provided
+ in the form of a dictionary consisting of
+ filter names as the key and filter values
+ as the value. The set of allowable filter
+ names/values is dependent on the request
+ being performed. Check the EC2 API guide
+ for details.
+
+ :rtype: list of :class:`boto.ec2.snapshot.Snapshot`
+ :return: The requested Snapshot objects
+ """
+ params = {}
+ if snapshot_ids:
+ self.build_list_params(params, snapshot_ids, 'SnapshotId')
+ if owner:
+ params['Owner'] = owner
+ if restorable_by:
+ params['RestorableBy'] = restorable_by
+ if filters:
+ self.build_filter_params(params, filters)
+ return self.get_list('DescribeSnapshots', params, [('item', Snapshot)], verb='POST')
+
+ def create_snapshot(self, volume_id, description=None):
+ """
+ Create a snapshot of an existing EBS Volume.
+
+ :type volume_id: str
+ :param volume_id: The ID of the volume to be snapshot'ed
+
+ :type description: str
+ :param description: A description of the snapshot.
+ Limited to 255 characters.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'VolumeId' : volume_id}
+ if description:
+ params['Description'] = description[0:255]
+ snapshot = self.get_object('CreateSnapshot', params, Snapshot, verb='POST')
+ volume = self.get_all_volumes([volume_id])[0]
+ volume_name = volume.tags.get('Name')
+ if volume_name:
+ snapshot.add_tag('Name', volume_name)
+ return snapshot
+
+ def delete_snapshot(self, snapshot_id):
+ params = {'SnapshotId': snapshot_id}
+ return self.get_status('DeleteSnapshot', params, verb='POST')
+
+ def trim_snapshots(self, hourly_backups = 8, daily_backups = 7, weekly_backups = 4):
+ """
+ Trim excess snapshots, based on when they were taken. More current snapshots are
+ retained, with the number retained decreasing as you move back in time.
+
+ If ebs volumes have a 'Name' tag with a value, their snapshots will be assigned the same
+ tag when they are created. The values of the 'Name' tags for snapshots are used by this
+ function to group snapshots taken from the same volume (or from a series of like-named
+ volumes over time) for trimming.
+
+ For every group of like-named snapshots, this function retains the newest and oldest
+ snapshots, as well as, by default, the first snapshots taken in each of the last eight
+ hours, the first snapshots taken in each of the last seven days, the first snapshots
+ taken in the last 4 weeks (counting Midnight Sunday morning as the start of the week),
+ and the first snapshot from the first Sunday of each month forever.
+
+ :type hourly_backups: int
+ :param hourly_backups: How many recent hourly backups should be saved.
+
+ :type daily_backups: int
+ :param daily_backups: How many recent daily backups should be saved.
+
+ :type weekly_backups: int
+ :param weekly_backups: How many recent weekly backups should be saved.
+ """
+
+ # This function first builds up an ordered list of target times that snapshots should be saved for
+ # (last 8 hours, last 7 days, etc.). Then a map of snapshots is constructed, with the keys being
+ # the snapshot / volume names and the values being arrays of chornologically sorted snapshots.
+ # Finally, for each array in the map, we go through the snapshot array and the target time array
+ # in an interleaved fashion, deleting snapshots whose start_times don't immediately follow a
+ # target time (we delete a snapshot if there's another snapshot that was made closer to the
+ # preceding target time).
+
+ now = datetime.utcnow() # work with UTC time, which is what the snapshot start time is reported in
+ last_hour = datetime(now.year, now.month, now.day, now.hour)
+ last_midnight = datetime(now.year, now.month, now.day)
+ last_sunday = datetime(now.year, now.month, now.day) - timedelta(days = (now.weekday() + 1) % 7)
+ start_of_month = datetime(now.year, now.month, 1)
+
+ target_backup_times = []
+
+ oldest_snapshot_date = datetime(2007, 1, 1) # there are no snapshots older than 1/1/2007
+
+ for hour in range(0, hourly_backups):
+ target_backup_times.append(last_hour - timedelta(hours = hour))
+
+ for day in range(0, daily_backups):
+ target_backup_times.append(last_midnight - timedelta(days = day))
+
+ for week in range(0, weekly_backups):
+ target_backup_times.append(last_sunday - timedelta(weeks = week))
+
+ one_day = timedelta(days = 1)
+ while start_of_month > oldest_snapshot_date:
+ # append the start of the month to the list of snapshot dates to save:
+ target_backup_times.append(start_of_month)
+ # there's no timedelta setting for one month, so instead:
+ # decrement the day by one, so we go to the final day of the previous month...
+ start_of_month -= one_day
+ # ... and then go to the first day of that previous month:
+ start_of_month = datetime(start_of_month.year, start_of_month.month, 1)
+
+ temp = []
+
+ for t in target_backup_times:
+ if temp.__contains__(t) == False:
+ temp.append(t)
+
+ target_backup_times = temp
+ target_backup_times.reverse() # make the oldest date first
+
+ # get all the snapshots, sort them by date and time, and organize them into one array for each volume:
+ all_snapshots = self.get_all_snapshots(owner = 'self')
+ all_snapshots.sort(cmp = lambda x, y: cmp(x.start_time, y.start_time)) # oldest first
+ snaps_for_each_volume = {}
+ for snap in all_snapshots:
+ # the snapshot name and the volume name are the same. The snapshot name is set from the volume
+ # name at the time the snapshot is taken
+ volume_name = snap.tags.get('Name')
+ if volume_name:
+ # only examine snapshots that have a volume name
+ snaps_for_volume = snaps_for_each_volume.get(volume_name)
+ if not snaps_for_volume:
+ snaps_for_volume = []
+ snaps_for_each_volume[volume_name] = snaps_for_volume
+ snaps_for_volume.append(snap)
+
+ # Do a running comparison of snapshot dates to desired time periods, keeping the oldest snapshot in each
+ # time period and deleting the rest:
+ for volume_name in snaps_for_each_volume:
+ snaps = snaps_for_each_volume[volume_name]
+ snaps = snaps[:-1] # never delete the newest snapshot, so remove it from consideration
+ time_period_number = 0
+ snap_found_for_this_time_period = False
+ for snap in snaps:
+ check_this_snap = True
+ while check_this_snap and time_period_number < target_backup_times.__len__():
+ snap_date = datetime.strptime(snap.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
+ if snap_date < target_backup_times[time_period_number]:
+ # the snap date is before the cutoff date. Figure out if it's the first snap in this
+ # date range and act accordingly (since both date the date ranges and the snapshots
+ # are sorted chronologically, we know this snapshot isn't in an earlier date range):
+ if snap_found_for_this_time_period == True:
+ if not snap.tags.get('preserve_snapshot'):
+ # as long as the snapshot wasn't marked with the 'preserve_snapshot' tag, delete it:
+ self.delete_snapshot(snap.id)
+ boto.log.info('Trimmed snapshot %s (%s)' % (snap.tags['Name'], snap.start_time))
+ # go on and look at the next snapshot, leaving the time period alone
+ else:
+ # this was the first snapshot found for this time period. Leave it alone and look at the
+ # next snapshot:
+ snap_found_for_this_time_period = True
+ check_this_snap = False
+ else:
+ # the snap is after the cutoff date. Check it against the next cutoff date
+ time_period_number += 1
+ snap_found_for_this_time_period = False
+
+
+ def get_snapshot_attribute(self, snapshot_id,
+ attribute='createVolumePermission'):
+ """
+ Get information about an attribute of a snapshot. Only one attribute
+ can be specified per call.
+
+ :type snapshot_id: str
+ :param snapshot_id: The ID of the snapshot.
+
+ :type attribute: str
+ :param attribute: The requested attribute. Valid values are:
+
+ * createVolumePermission
+
+ :rtype: list of :class:`boto.ec2.snapshotattribute.SnapshotAttribute`
+ :return: The requested Snapshot attribute
+ """
+ params = {'Attribute' : attribute}
+ if snapshot_id:
+ params['SnapshotId'] = snapshot_id
+ return self.get_object('DescribeSnapshotAttribute', params,
+ SnapshotAttribute, verb='POST')
+
+ def modify_snapshot_attribute(self, snapshot_id,
+ attribute='createVolumePermission',
+ operation='add', user_ids=None, groups=None):
+ """
+ Changes an attribute of an image.
+
+ :type snapshot_id: string
+ :param snapshot_id: The snapshot id you wish to change
+
+ :type attribute: string
+ :param attribute: The attribute you wish to change. Valid values are:
+ createVolumePermission
+
+ :type operation: string
+ :param operation: Either add or remove (this is required for changing
+ snapshot ermissions)
+
+ :type user_ids: list
+ :param user_ids: The Amazon IDs of users to add/remove attributes
+
+ :type groups: list
+ :param groups: The groups to add/remove attributes. The only valid
+ value at this time is 'all'.
+
+ """
+ params = {'SnapshotId' : snapshot_id,
+ 'Attribute' : attribute,
+ 'OperationType' : operation}
+ if user_ids:
+ self.build_list_params(params, user_ids, 'UserId')
+ if groups:
+ self.build_list_params(params, groups, 'UserGroup')
+ return self.get_status('ModifySnapshotAttribute', params, verb='POST')
+
+ def reset_snapshot_attribute(self, snapshot_id,
+ attribute='createVolumePermission'):
+ """
+ Resets an attribute of a snapshot to its default value.
+
+ :type snapshot_id: string
+ :param snapshot_id: ID of the snapshot
+
+ :type attribute: string
+ :param attribute: The attribute to reset
+
+ :rtype: bool
+ :return: Whether the operation succeeded or not
+ """
+ params = {'SnapshotId' : snapshot_id,
+ 'Attribute' : attribute}
+ return self.get_status('ResetSnapshotAttribute', params, verb='POST')
+
+ # Keypair methods
+
+ def get_all_key_pairs(self, keynames=None, filters=None):
+ """
+ Get all key pairs associated with your account.
+
+ :type keynames: list
+ :param keynames: A list of the names of keypairs to retrieve.
+ If not provided, all key pairs will be returned.
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit
+ the results returned. Filters are provided
+ in the form of a dictionary consisting of
+ filter names as the key and filter values
+ as the value. The set of allowable filter
+ names/values is dependent on the request
+ being performed. Check the EC2 API guide
+ for details.
+
+ :rtype: list
+ :return: A list of :class:`boto.ec2.keypair.KeyPair`
+ """
+ params = {}
+ if keynames:
+ self.build_list_params(params, keynames, 'KeyName')
+ if filters:
+ self.build_filter_params(params, filters)
+ return self.get_list('DescribeKeyPairs', params, [('item', KeyPair)], verb='POST')
+
+ def get_key_pair(self, keyname):
+ """
+ Convenience method to retrieve a specific keypair (KeyPair).
+
+ :type image_id: string
+ :param image_id: the ID of the Image to retrieve
+
+ :rtype: :class:`boto.ec2.keypair.KeyPair`
+ :return: The KeyPair specified or None if it is not found
+ """
+ try:
+ return self.get_all_key_pairs(keynames=[keyname])[0]
+ except IndexError: # None of those key pairs available
+ return None
+
+ def create_key_pair(self, key_name):
+ """
+ Create a new key pair for your account.
+ This will create the key pair within the region you
+ are currently connected to.
+
+ :type key_name: string
+ :param key_name: The name of the new keypair
+
+ :rtype: :class:`boto.ec2.keypair.KeyPair`
+ :return: The newly created :class:`boto.ec2.keypair.KeyPair`.
+ The material attribute of the new KeyPair object
+ will contain the the unencrypted PEM encoded RSA private key.
+ """
+ params = {'KeyName':key_name}
+ return self.get_object('CreateKeyPair', params, KeyPair, verb='POST')
+
+ def delete_key_pair(self, key_name):
+ """
+ Delete a key pair from your account.
+
+ :type key_name: string
+ :param key_name: The name of the keypair to delete
+ """
+ params = {'KeyName':key_name}
+ return self.get_status('DeleteKeyPair', params, verb='POST')
+
+ def import_key_pair(self, key_name, public_key_material):
+ """
+ mports the public key from an RSA key pair that you created
+ with a third-party tool.
+
+ Supported formats:
+
+ * OpenSSH public key format (e.g., the format
+ in ~/.ssh/authorized_keys)
+
+ * Base64 encoded DER format
+
+ * SSH public key file format as specified in RFC4716
+
+ DSA keys are not supported. Make sure your key generator is
+ set up to create RSA keys.
+
+ Supported lengths: 1024, 2048, and 4096.
+
+ :type key_name: string
+ :param key_name: The name of the new keypair
+
+ :type public_key_material: string
+ :param public_key_material: The public key. You must base64 encode
+ the public key material before sending
+ it to AWS.
+
+ :rtype: :class:`boto.ec2.keypair.KeyPair`
+ :return: The newly created :class:`boto.ec2.keypair.KeyPair`.
+ The material attribute of the new KeyPair object
+ will contain the the unencrypted PEM encoded RSA private key.
+ """
+ params = {'KeyName' : key_name,
+ 'PublicKeyMaterial' : public_key_material}
+ return self.get_object('ImportKeyPair', params, KeyPair, verb='POST')
+
+ # SecurityGroup methods
+
+ def get_all_security_groups(self, groupnames=None, filters=None):
+ """
+ Get all security groups associated with your account in a region.
+
+ :type groupnames: list
+ :param groupnames: A list of the names of security groups to retrieve.
+ If not provided, all security groups will be
+ returned.
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit
+ the results returned. Filters are provided
+ in the form of a dictionary consisting of
+ filter names as the key and filter values
+ as the value. The set of allowable filter
+ names/values is dependent on the request
+ being performed. Check the EC2 API guide
+ for details.
+
+ :rtype: list
+ :return: A list of :class:`boto.ec2.securitygroup.SecurityGroup`
+ """
+ params = {}
+ if groupnames:
+ self.build_list_params(params, groupnames, 'GroupName')
+ if filters:
+ self.build_filter_params(params, filters)
+ return self.get_list('DescribeSecurityGroups', params,
+ [('item', SecurityGroup)], verb='POST')
+
+ def create_security_group(self, name, description):
+ """
+ Create a new security group for your account.
+ This will create the security group within the region you
+ are currently connected to.
+
+ :type name: string
+ :param name: The name of the new security group
+
+ :type description: string
+ :param description: The description of the new security group
+
+ :rtype: :class:`boto.ec2.securitygroup.SecurityGroup`
+ :return: The newly created :class:`boto.ec2.keypair.KeyPair`.
+ """
+ params = {'GroupName':name, 'GroupDescription':description}
+ group = self.get_object('CreateSecurityGroup', params, SecurityGroup, verb='POST')
+ group.name = name
+ group.description = description
+ return group
+
+ def delete_security_group(self, name):
+ """
+ Delete a security group from your account.
+
+ :type key_name: string
+ :param key_name: The name of the keypair to delete
+ """
+ params = {'GroupName':name}
+ return self.get_status('DeleteSecurityGroup', params, verb='POST')
+
+ def _authorize_deprecated(self, group_name, src_security_group_name=None,
+ src_security_group_owner_id=None):
+ """
+ This method is called only when someone tries to authorize a group
+ without specifying a from_port or to_port. Until recently, that was
+ the only way to do group authorization but the EC2 API has been
+ changed to now require a from_port and to_port when specifying a
+ group. This is a much better approach but I don't want to break
+ existing boto applications that depend on the old behavior, hence
+ this kludge.
+
+ :type group_name: string
+ :param group_name: The name of the security group you are adding
+ the rule to.
+
+ :type src_security_group_name: string
+ :param src_security_group_name: The name of the security group you are
+ granting access to.
+
+ :type src_security_group_owner_id: string
+ :param src_security_group_owner_id: The ID of the owner of the security
+ group you are granting access to.
+
+ :rtype: bool
+ :return: True if successful.
+ """
+ warnings.warn('FromPort and ToPort now required for group authorization',
+ DeprecationWarning)
+ params = {'GroupName':group_name}
+ if src_security_group_name:
+ params['SourceSecurityGroupName'] = src_security_group_name
+ if src_security_group_owner_id:
+ params['SourceSecurityGroupOwnerId'] = src_security_group_owner_id
+ return self.get_status('AuthorizeSecurityGroupIngress', params, verb='POST')
+
+ def authorize_security_group(self, group_name, src_security_group_name=None,
+ src_security_group_owner_id=None,
+ ip_protocol=None, from_port=None, to_port=None,
+ cidr_ip=None):
+ """
+ Add a new rule to an existing security group.
+ You need to pass in either src_security_group_name and
+ src_security_group_owner_id OR ip_protocol, from_port, to_port,
+ and cidr_ip. In other words, either you are authorizing another
+ group or you are authorizing some ip-based rule.
+
+ :type group_name: string
+ :param group_name: The name of the security group you are adding
+ the rule to.
+
+ :type src_security_group_name: string
+ :param src_security_group_name: The name of the security group you are
+ granting access to.
+
+ :type src_security_group_owner_id: string
+ :param src_security_group_owner_id: The ID of the owner of the security
+ group you are granting access to.
+
+ :type ip_protocol: string
+ :param ip_protocol: Either tcp | udp | icmp
+
+ :type from_port: int
+ :param from_port: The beginning port number you are enabling
+
+ :type to_port: int
+ :param to_port: The ending port number you are enabling
+
+ :type cidr_ip: string
+ :param cidr_ip: The CIDR block you are providing access to.
+ See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
+
+ :rtype: bool
+ :return: True if successful.
+ """
+ if src_security_group_name:
+ if from_port is None and to_port is None and ip_protocol is None:
+ return self._authorize_deprecated(group_name,
+ src_security_group_name,
+ src_security_group_owner_id)
+ params = {'GroupName':group_name}
+ if src_security_group_name:
+ params['IpPermissions.1.Groups.1.GroupName'] = src_security_group_name
+ if src_security_group_owner_id:
+ params['IpPermissions.1.Groups.1.UserId'] = src_security_group_owner_id
+ if ip_protocol:
+ params['IpPermissions.1.IpProtocol'] = ip_protocol
+ if from_port:
+ params['IpPermissions.1.FromPort'] = from_port
+ if to_port:
+ params['IpPermissions.1.ToPort'] = to_port
+ if cidr_ip:
+ params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip
+ return self.get_status('AuthorizeSecurityGroupIngress', params, verb='POST')
+
+ def _revoke_deprecated(self, group_name, src_security_group_name=None,
+ src_security_group_owner_id=None):
+ """
+ This method is called only when someone tries to revoke a group
+ without specifying a from_port or to_port. Until recently, that was
+ the only way to do group revocation but the EC2 API has been
+ changed to now require a from_port and to_port when specifying a
+ group. This is a much better approach but I don't want to break
+ existing boto applications that depend on the old behavior, hence
+ this kludge.
+
+ :type group_name: string
+ :param group_name: The name of the security group you are adding
+ the rule to.
+
+ :type src_security_group_name: string
+ :param src_security_group_name: The name of the security group you are
+ granting access to.
+
+ :type src_security_group_owner_id: string
+ :param src_security_group_owner_id: The ID of the owner of the security
+ group you are granting access to.
+
+ :rtype: bool
+ :return: True if successful.
+ """
+ warnings.warn('FromPort and ToPort now required for group authorization',
+ DeprecationWarning)
+ params = {'GroupName':group_name}
+ if src_security_group_name:
+ params['SourceSecurityGroupName'] = src_security_group_name
+ if src_security_group_owner_id:
+ params['SourceSecurityGroupOwnerId'] = src_security_group_owner_id
+ return self.get_status('RevokeSecurityGroupIngress', params, verb='POST')
+
+ def revoke_security_group(self, group_name, src_security_group_name=None,
+ src_security_group_owner_id=None,
+ ip_protocol=None, from_port=None, to_port=None,
+ cidr_ip=None):
+ """
+ Remove an existing rule from an existing security group.
+ You need to pass in either src_security_group_name and
+ src_security_group_owner_id OR ip_protocol, from_port, to_port,
+ and cidr_ip. In other words, either you are revoking another
+ group or you are revoking some ip-based rule.
+
+ :type group_name: string
+ :param group_name: The name of the security group you are removing
+ the rule from.
+
+ :type src_security_group_name: string
+ :param src_security_group_name: The name of the security group you are
+ revoking access to.
+
+ :type src_security_group_owner_id: string
+ :param src_security_group_owner_id: The ID of the owner of the security
+ group you are revoking access to.
+
+ :type ip_protocol: string
+ :param ip_protocol: Either tcp | udp | icmp
+
+ :type from_port: int
+ :param from_port: The beginning port number you are disabling
+
+ :type to_port: int
+ :param to_port: The ending port number you are disabling
+
+ :type cidr_ip: string
+ :param cidr_ip: The CIDR block you are revoking access to.
+ See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
+
+ :rtype: bool
+ :return: True if successful.
+ """
+ if src_security_group_name:
+ if from_port is None and to_port is None and ip_protocol is None:
+ return self._revoke_deprecated(group_name,
+ src_security_group_name,
+ src_security_group_owner_id)
+ params = {'GroupName':group_name}
+ if src_security_group_name:
+ params['IpPermissions.1.Groups.1.GroupName'] = src_security_group_name
+ if src_security_group_owner_id:
+ params['IpPermissions.1.Groups.1.UserId'] = src_security_group_owner_id
+ if ip_protocol:
+ params['IpPermissions.1.IpProtocol'] = ip_protocol
+ if from_port:
+ params['IpPermissions.1.FromPort'] = from_port
+ if to_port:
+ params['IpPermissions.1.ToPort'] = to_port
+ if cidr_ip:
+ params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip
+ return self.get_status('RevokeSecurityGroupIngress', params, verb='POST')
+
+ #
+ # Regions
+ #
+
+ def get_all_regions(self, region_names=None, filters=None):
+ """
+ Get all available regions for the EC2 service.
+
+ :type region_names: list of str
+ :param region_names: Names of regions to limit output
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit
+ the results returned. Filters are provided
+ in the form of a dictionary consisting of
+ filter names as the key and filter values
+ as the value. The set of allowable filter
+ names/values is dependent on the request
+ being performed. Check the EC2 API guide
+ for details.
+
+ :rtype: list
+ :return: A list of :class:`boto.ec2.regioninfo.RegionInfo`
+ """
+ params = {}
+ if region_names:
+ self.build_list_params(params, region_names, 'RegionName')
+ if filters:
+ self.build_filter_params(params, filters)
+ regions = self.get_list('DescribeRegions', params, [('item', RegionInfo)], verb='POST')
+ for region in regions:
+ region.connection_cls = EC2Connection
+ return regions
+
+ #
+ # Reservation methods
+ #
+
+ def get_all_reserved_instances_offerings(self, reserved_instances_id=None,
+ instance_type=None,
+ availability_zone=None,
+ product_description=None,
+ filters=None):
+ """
+ Describes Reserved Instance offerings that are available for purchase.
+
+ :type reserved_instances_id: str
+ :param reserved_instances_id: Displays Reserved Instances with the
+ specified offering IDs.
+
+ :type instance_type: str
+ :param instance_type: Displays Reserved Instances of the specified
+ instance type.
+
+ :type availability_zone: str
+ :param availability_zone: Displays Reserved Instances within the
+ specified Availability Zone.
+
+ :type product_description: str
+ :param product_description: Displays Reserved Instances with the
+ specified product description.
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit
+ the results returned. Filters are provided
+ in the form of a dictionary consisting of
+ filter names as the key and filter values
+ as the value. The set of allowable filter
+ names/values is dependent on the request
+ being performed. Check the EC2 API guide
+ for details.
+
+ :rtype: list
+ :return: A list of :class:`boto.ec2.reservedinstance.ReservedInstancesOffering`
+ """
+ params = {}
+ if reserved_instances_id:
+ params['ReservedInstancesId'] = reserved_instances_id
+ if instance_type:
+ params['InstanceType'] = instance_type
+ if availability_zone:
+ params['AvailabilityZone'] = availability_zone
+ if product_description:
+ params['ProductDescription'] = product_description
+ if filters:
+ self.build_filter_params(params, filters)
+
+ return self.get_list('DescribeReservedInstancesOfferings',
+ params, [('item', ReservedInstancesOffering)], verb='POST')
+
+ def get_all_reserved_instances(self, reserved_instances_id=None,
+ filters=None):
+ """
+ Describes Reserved Instance offerings that are available for purchase.
+
+ :type reserved_instance_ids: list
+ :param reserved_instance_ids: A list of the reserved instance ids that
+ will be returned. If not provided, all
+ reserved instances will be returned.
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit
+ the results returned. Filters are provided
+ in the form of a dictionary consisting of
+ filter names as the key and filter values
+ as the value. The set of allowable filter
+ names/values is dependent on the request
+ being performed. Check the EC2 API guide
+ for details.
+
+ :rtype: list
+ :return: A list of :class:`boto.ec2.reservedinstance.ReservedInstance`
+ """
+ params = {}
+ if reserved_instances_id:
+ self.build_list_params(params, reserved_instances_id,
+ 'ReservedInstancesId')
+ if filters:
+ self.build_filter_params(params, filters)
+ return self.get_list('DescribeReservedInstances',
+ params, [('item', ReservedInstance)], verb='POST')
+
+ def purchase_reserved_instance_offering(self, reserved_instances_offering_id,
+ instance_count=1):
+ """
+ Purchase a Reserved Instance for use with your account.
+ ** CAUTION **
+ This request can result in large amounts of money being charged to your
+ AWS account. Use with caution!
+
+ :type reserved_instances_offering_id: string
+ :param reserved_instances_offering_id: The offering ID of the Reserved
+ Instance to purchase
+
+ :type instance_count: int
+ :param instance_count: The number of Reserved Instances to purchase.
+ Default value is 1.
+
+ :rtype: :class:`boto.ec2.reservedinstance.ReservedInstance`
+ :return: The newly created Reserved Instance
+ """
+ params = {'ReservedInstancesOfferingId' : reserved_instances_offering_id,
+ 'InstanceCount' : instance_count}
+ return self.get_object('PurchaseReservedInstancesOffering', params,
+ ReservedInstance, verb='POST')
+
+ #
+ # Monitoring
+ #
+
+ def monitor_instance(self, instance_id):
+ """
+ Enable CloudWatch monitoring for the supplied instance.
+
+ :type instance_id: string
+ :param instance_id: The instance id
+
+ :rtype: list
+ :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo`
+ """
+ params = {'InstanceId' : instance_id}
+ return self.get_list('MonitorInstances', params,
+ [('item', InstanceInfo)], verb='POST')
+
+ def unmonitor_instance(self, instance_id):
+ """
+ Disable CloudWatch monitoring for the supplied instance.
+
+ :type instance_id: string
+ :param instance_id: The instance id
+
+ :rtype: list
+ :return: A list of :class:`boto.ec2.instanceinfo.InstanceInfo`
+ """
+ params = {'InstanceId' : instance_id}
+ return self.get_list('UnmonitorInstances', params,
+ [('item', InstanceInfo)], verb='POST')
+
+ #
+ # Bundle Windows Instances
+ #
+
+ def bundle_instance(self, instance_id,
+ s3_bucket,
+ s3_prefix,
+ s3_upload_policy):
+ """
+ Bundle Windows instance.
+
+ :type instance_id: string
+ :param instance_id: The instance id
+
+ :type s3_bucket: string
+ :param s3_bucket: The bucket in which the AMI should be stored.
+
+ :type s3_prefix: string
+ :param s3_prefix: The beginning of the file name for the AMI.
+
+ :type s3_upload_policy: string
+ :param s3_upload_policy: Base64 encoded policy that specifies condition
+ and permissions for Amazon EC2 to upload the
+ user's image into Amazon S3.
+ """
+
+ params = {'InstanceId' : instance_id,
+ 'Storage.S3.Bucket' : s3_bucket,
+ 'Storage.S3.Prefix' : s3_prefix,
+ 'Storage.S3.UploadPolicy' : s3_upload_policy}
+ s3auth = boto.auth.get_auth_handler(None, boto.config, self.provider, ['s3'])
+ params['Storage.S3.AWSAccessKeyId'] = self.aws_access_key_id
+ signature = s3auth.sign_string(s3_upload_policy)
+ params['Storage.S3.UploadPolicySignature'] = signature
+ return self.get_object('BundleInstance', params, BundleInstanceTask, verb='POST')
+
+ def get_all_bundle_tasks(self, bundle_ids=None, filters=None):
+ """
+ Retrieve current bundling tasks. If no bundle id is specified, all
+ tasks are retrieved.
+
+ :type bundle_ids: list
+ :param bundle_ids: A list of strings containing identifiers for
+ previously created bundling tasks.
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit
+ the results returned. Filters are provided
+ in the form of a dictionary consisting of
+ filter names as the key and filter values
+ as the value. The set of allowable filter
+ names/values is dependent on the request
+ being performed. Check the EC2 API guide
+ for details.
+
+ """
+
+ params = {}
+ if bundle_ids:
+ self.build_list_params(params, bundle_ids, 'BundleId')
+ if filters:
+ self.build_filter_params(params, filters)
+ return self.get_list('DescribeBundleTasks', params,
+ [('item', BundleInstanceTask)], verb='POST')
+
+ def cancel_bundle_task(self, bundle_id):
+ """
+ Cancel a previously submitted bundle task
+
+ :type bundle_id: string
+ :param bundle_id: The identifier of the bundle task to cancel.
+ """
+
+ params = {'BundleId' : bundle_id}
+ return self.get_object('CancelBundleTask', params, BundleInstanceTask, verb='POST')
+
+ def get_password_data(self, instance_id):
+ """
+ Get encrypted administrator password for a Windows instance.
+
+ :type instance_id: string
+ :param instance_id: The identifier of the instance to retrieve the
+ password for.
+ """
+
+ params = {'InstanceId' : instance_id}
+ rs = self.get_object('GetPasswordData', params, ResultSet, verb='POST')
+ return rs.passwordData
+
+ #
+ # Cluster Placement Groups
+ #
+
+ def get_all_placement_groups(self, groupnames=None, filters=None):
+ """
+ Get all placement groups associated with your account in a region.
+
+ :type groupnames: list
+ :param groupnames: A list of the names of placement groups to retrieve.
+ If not provided, all placement groups will be
+ returned.
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit
+ the results returned. Filters are provided
+ in the form of a dictionary consisting of
+ filter names as the key and filter values
+ as the value. The set of allowable filter
+ names/values is dependent on the request
+ being performed. Check the EC2 API guide
+ for details.
+
+ :rtype: list
+ :return: A list of :class:`boto.ec2.placementgroup.PlacementGroup`
+ """
+ params = {}
+ if groupnames:
+ self.build_list_params(params, groupnames, 'GroupName')
+ if filters:
+ self.build_filter_params(params, filters)
+ return self.get_list('DescribePlacementGroups', params,
+ [('item', PlacementGroup)], verb='POST')
+
+ def create_placement_group(self, name, strategy='cluster'):
+ """
+ Create a new placement group for your account.
+ This will create the placement group within the region you
+ are currently connected to.
+
+ :type name: string
+ :param name: The name of the new placement group
+
+ :type strategy: string
+ :param strategy: The placement strategy of the new placement group.
+ Currently, the only acceptable value is "cluster".
+
+ :rtype: :class:`boto.ec2.placementgroup.PlacementGroup`
+ :return: The newly created :class:`boto.ec2.keypair.KeyPair`.
+ """
+ params = {'GroupName':name, 'Strategy':strategy}
+ group = self.get_status('CreatePlacementGroup', params, verb='POST')
+ return group
+
+ def delete_placement_group(self, name):
+ """
+ Delete a placement group from your account.
+
+ :type key_name: string
+ :param key_name: The name of the keypair to delete
+ """
+ params = {'GroupName':name}
+ return self.get_status('DeletePlacementGroup', params, verb='POST')
+
+ # Tag methods
+
+ def build_tag_param_list(self, params, tags):
+ keys = tags.keys()
+ keys.sort()
+ i = 1
+ for key in keys:
+ value = tags[key]
+ params['Tag.%d.Key'%i] = key
+ if value is not None:
+ params['Tag.%d.Value'%i] = value
+ i += 1
+
+ def get_all_tags(self, tags=None, filters=None):
+ """
+ Retrieve all the metadata tags associated with your account.
+
+ :type tags: list
+ :param tags: A list of mumble
+
+ :type filters: dict
+ :param filters: Optional filters that can be used to limit
+ the results returned. Filters are provided
+ in the form of a dictionary consisting of
+ filter names as the key and filter values
+ as the value. The set of allowable filter
+ names/values is dependent on the request
+ being performed. Check the EC2 API guide
+ for details.
+
+ :rtype: dict
+ :return: A dictionary containing metadata tags
+ """
+ params = {}
+ if tags:
+ self.build_list_params(params, instance_ids, 'InstanceId')
+ if filters:
+ self.build_filter_params(params, filters)
+ return self.get_list('DescribeTags', params, [('item', Tag)], verb='POST')
+
+ def create_tags(self, resource_ids, tags):
+ """
+ Create new metadata tags for the specified resource ids.
+
+ :type resource_ids: list
+ :param resource_ids: List of strings
+
+ :type tags: dict
+ :param tags: A dictionary containing the name/value pairs
+
+ """
+ params = {}
+ self.build_list_params(params, resource_ids, 'ResourceId')
+ self.build_tag_param_list(params, tags)
+ return self.get_status('CreateTags', params, verb='POST')
+
+ def delete_tags(self, resource_ids, tags):
+ """
+ Delete metadata tags for the specified resource ids.
+
+ :type resource_ids: list
+ :param resource_ids: List of strings
+
+ :type tags: dict or list
+ :param tags: Either a dictionary containing name/value pairs
+ or a list containing just tag names.
+ If you pass in a dictionary, the values must
+ match the actual tag values or the tag will
+ not be deleted.
+
+ """
+ if isinstance(tags, list):
+ tags = {}.fromkeys(tags, None)
+ params = {}
+ self.build_list_params(params, resource_ids, 'ResourceId')
+ self.build_tag_param_list(params, tags)
+ return self.get_status('DeleteTags', params, verb='POST')
+
diff --git a/boto/ec2/ec2object.py b/boto/ec2/ec2object.py
new file mode 100644
index 0000000..6e37596
--- /dev/null
+++ b/boto/ec2/ec2object.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents an EC2 Object
+"""
+from boto.ec2.tag import TagSet
+
+class EC2Object(object):
+
+ def __init__(self, connection=None):
+ self.connection = connection
+ if self.connection and hasattr(self.connection, 'region'):
+ self.region = connection.region
+ else:
+ self.region = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ setattr(self, name, value)
+
+
+class TaggedEC2Object(EC2Object):
+ """
+ Any EC2 resource that can be tagged should be represented
+ by a Python object that subclasses this class. This class
+ has the mechanism in place to handle the tagSet element in
+ the Describe* responses. If tags are found, it will create
+ a TagSet object and allow it to parse and collect the tags
+ into a dict that is stored in the "tags" attribute of the
+ object.
+ """
+
+ def __init__(self, connection=None):
+ EC2Object.__init__(self, connection)
+ self.tags = TagSet()
+
+ def startElement(self, name, attrs, connection):
+ if name == 'tagSet':
+ return self.tags
+ else:
+ return None
+
+ def add_tag(self, key, value=None):
+ """
+ Add a tag to this object. Tag's are stored by AWS and can be used
+ to organize and filter resources. Adding a tag involves a round-trip
+ to the EC2 service.
+
+ :type key: str
+ :param key: The key or name of the tag being stored.
+
+ :type value: str
+ :param value: An optional value that can be stored with the tag.
+ """
+ status = self.connection.create_tags([self.id], {key : value})
+ if self.tags is None:
+ self.tags = TagSet()
+ self.tags[key] = value
+
+ def remove_tag(self, key, value=None):
+ """
+ Remove a tag from this object. Removing a tag involves a round-trip
+ to the EC2 service.
+
+ :type key: str
+ :param key: The key or name of the tag being stored.
+
+ :type value: str
+ :param value: An optional value that can be stored with the tag.
+ If a value is provided, it must match the value
+ currently stored in EC2. If not, the tag will not
+ be removed.
+ """
+ if value:
+ tags = {key : value}
+ else:
+ tags = [key]
+ status = self.connection.delete_tags([self.id], tags)
+ if key in self.tags:
+ del self.tags[key]
diff --git a/boto/ec2/elb/__init__.py b/boto/ec2/elb/__init__.py
new file mode 100644
index 0000000..f4061d3
--- /dev/null
+++ b/boto/ec2/elb/__init__.py
@@ -0,0 +1,427 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+"""
+This module provides an interface to the Elastic Compute Cloud (EC2)
+load balancing service from AWS.
+"""
+from boto.connection import AWSQueryConnection
+from boto.ec2.instanceinfo import InstanceInfo
+from boto.ec2.elb.loadbalancer import LoadBalancer
+from boto.ec2.elb.instancestate import InstanceState
+from boto.ec2.elb.healthcheck import HealthCheck
+from boto.regioninfo import RegionInfo
+import boto
+
+RegionData = {
+ 'us-east-1' : 'elasticloadbalancing.us-east-1.amazonaws.com',
+ 'us-west-1' : 'elasticloadbalancing.us-west-1.amazonaws.com',
+ 'eu-west-1' : 'elasticloadbalancing.eu-west-1.amazonaws.com',
+ 'ap-southeast-1' : 'elasticloadbalancing.ap-southeast-1.amazonaws.com'}
+
+def regions():
+ """
+ Get all available regions for the SDB service.
+
+ :rtype: list
+ :return: A list of :class:`boto.RegionInfo` instances
+ """
+ regions = []
+ for region_name in RegionData:
+ region = RegionInfo(name=region_name,
+ endpoint=RegionData[region_name],
+ connection_cls=ELBConnection)
+ regions.append(region)
+ return regions
+
+def connect_to_region(region_name, **kw_params):
+ """
+ Given a valid region name, return a
+ :class:`boto.ec2.elb.ELBConnection`.
+
+ :param str region_name: The name of the region to connect to.
+
+ :rtype: :class:`boto.ec2.ELBConnection` or ``None``
+ :return: A connection to the given region, or None if an invalid region
+ name is given
+ """
+ for region in regions():
+ if region.name == region_name:
+ return region.connect(**kw_params)
+ return None
+
+class ELBConnection(AWSQueryConnection):
+
+ APIVersion = boto.config.get('Boto', 'elb_version', '2010-07-01')
+ DefaultRegionName = boto.config.get('Boto', 'elb_region_name', 'us-east-1')
+ DefaultRegionEndpoint = boto.config.get('Boto', 'elb_region_endpoint',
+ 'elasticloadbalancing.amazonaws.com')
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=False, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, debug=0,
+ https_connection_factory=None, region=None, path='/'):
+ """
+ Init method to create a new connection to EC2 Load Balancing Service.
+
+ B{Note:} The region argument is overridden by the region specified in
+ the boto configuration file.
+ """
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+ self.region = region
+ AWSQueryConnection.__init__(self, aws_access_key_id,
+ aws_secret_access_key,
+ is_secure, port, proxy, proxy_port,
+ proxy_user, proxy_pass,
+ self.region.endpoint, debug,
+ https_connection_factory, path)
+
+ def _required_auth_capability(self):
+ return ['ec2']
+
+ def build_list_params(self, params, items, label):
+ if isinstance(items, str):
+ items = [items]
+ for i in range(1, len(items)+1):
+ params[label % i] = items[i-1]
+
+ def get_all_load_balancers(self, load_balancer_names=None):
+ """
+ Retrieve all load balancers associated with your account.
+
+ :type load_balancer_names: list
+ :param load_balancer_names: An optional list of load balancer names
+
+ :rtype: list
+ :return: A list of :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
+ """
+ params = {}
+ if load_balancer_names:
+ self.build_list_params(params, load_balancer_names, 'LoadBalancerNames.member.%d')
+ return self.get_list('DescribeLoadBalancers', params, [('member', LoadBalancer)])
+
+
+ def create_load_balancer(self, name, zones, listeners):
+ """
+ Create a new load balancer for your account.
+
+ :type name: string
+ :param name: The mnemonic name associated with the new load balancer
+
+ :type zones: List of strings
+ :param zones: The names of the availability zone(s) to add.
+
+ :type listeners: List of tuples
+ :param listeners: Each tuple contains three or four values,
+ (LoadBalancerPortNumber, InstancePortNumber, Protocol,
+ [SSLCertificateId])
+ where LoadBalancerPortNumber and InstancePortNumber are
+ integer values between 1 and 65535, Protocol is a
+ string containing either 'TCP', 'HTTP' or 'HTTPS';
+ SSLCertificateID is the ARN of a AWS AIM certificate,
+ and must be specified when doing HTTPS.
+
+ :rtype: :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
+ :return: The newly created :class:`boto.ec2.elb.loadbalancer.LoadBalancer`
+ """
+ params = {'LoadBalancerName' : name}
+ for i in range(0, len(listeners)):
+ params['Listeners.member.%d.LoadBalancerPort' % (i+1)] = listeners[i][0]
+ params['Listeners.member.%d.InstancePort' % (i+1)] = listeners[i][1]
+ params['Listeners.member.%d.Protocol' % (i+1)] = listeners[i][2]
+ if listeners[i][2]=='HTTPS':
+ params['Listeners.member.%d.SSLCertificateId' % (i+1)] = listeners[i][3]
+ self.build_list_params(params, zones, 'AvailabilityZones.member.%d')
+ load_balancer = self.get_object('CreateLoadBalancer', params, LoadBalancer)
+ load_balancer.name = name
+ load_balancer.listeners = listeners
+ load_balancer.availability_zones = zones
+ return load_balancer
+
+ def create_load_balancer_listeners(self, name, listeners):
+ """
+ Creates a Listener (or group of listeners) for an existing Load Balancer
+
+ :type name: string
+ :param name: The name of the load balancer to create the listeners for
+
+ :type listeners: List of tuples
+ :param listeners: Each tuple contains three values,
+ (LoadBalancerPortNumber, InstancePortNumber, Protocol,
+ [SSLCertificateId])
+ where LoadBalancerPortNumber and InstancePortNumber are
+ integer values between 1 and 65535, Protocol is a
+ string containing either 'TCP', 'HTTP' or 'HTTPS';
+ SSLCertificateID is the ARN of a AWS AIM certificate,
+ and must be specified when doing HTTPS.
+
+ :return: The status of the request
+ """
+ params = {'LoadBalancerName' : name}
+ for i in range(0, len(listeners)):
+ params['Listeners.member.%d.LoadBalancerPort' % (i+1)] = listeners[i][0]
+ params['Listeners.member.%d.InstancePort' % (i+1)] = listeners[i][1]
+ params['Listeners.member.%d.Protocol' % (i+1)] = listeners[i][2]
+ if listeners[i][2]=='HTTPS':
+ params['Listeners.member.%d.SSLCertificateId' % (i+1)] = listeners[i][3]
+ return self.get_status('CreateLoadBalancerListeners', params)
+
+
+ def delete_load_balancer(self, name):
+ """
+ Delete a Load Balancer from your account.
+
+ :type name: string
+ :param name: The name of the Load Balancer to delete
+ """
+ params = {'LoadBalancerName': name}
+ return self.get_status('DeleteLoadBalancer', params)
+
+ def delete_load_balancer_listeners(self, name, ports):
+ """
+ Deletes a load balancer listener (or group of listeners)
+
+ :type name: string
+ :param name: The name of the load balancer to create the listeners for
+
+ :type ports: List int
+ :param ports: Each int represents the port on the ELB to be removed
+
+ :return: The status of the request
+ """
+ params = {'LoadBalancerName' : name}
+ for i in range(0, len(ports)):
+ params['LoadBalancerPorts.member.%d' % (i+1)] = ports[i]
+ return self.get_status('DeleteLoadBalancerListeners', params)
+
+
+
+ def enable_availability_zones(self, load_balancer_name, zones_to_add):
+ """
+ Add availability zones to an existing Load Balancer
+ All zones must be in the same region as the Load Balancer
+ Adding zones that are already registered with the Load Balancer
+ has no effect.
+
+ :type load_balancer_name: string
+ :param load_balancer_name: The name of the Load Balancer
+
+ :type zones: List of strings
+ :param zones: The name of the zone(s) to add.
+
+ :rtype: List of strings
+ :return: An updated list of zones for this Load Balancer.
+
+ """
+ params = {'LoadBalancerName' : load_balancer_name}
+ self.build_list_params(params, zones_to_add, 'AvailabilityZones.member.%d')
+ return self.get_list('EnableAvailabilityZonesForLoadBalancer', params, None)
+
+ def disable_availability_zones(self, load_balancer_name, zones_to_remove):
+ """
+ Remove availability zones from an existing Load Balancer.
+ All zones must be in the same region as the Load Balancer.
+ Removing zones that are not registered with the Load Balancer
+ has no effect.
+ You cannot remove all zones from an Load Balancer.
+
+ :type load_balancer_name: string
+ :param load_balancer_name: The name of the Load Balancer
+
+ :type zones: List of strings
+ :param zones: The name of the zone(s) to remove.
+
+ :rtype: List of strings
+ :return: An updated list of zones for this Load Balancer.
+
+ """
+ params = {'LoadBalancerName' : load_balancer_name}
+ self.build_list_params(params, zones_to_remove, 'AvailabilityZones.member.%d')
+ return self.get_list('DisableAvailabilityZonesForLoadBalancer', params, None)
+
+ def register_instances(self, load_balancer_name, instances):
+ """
+ Add new Instances to an existing Load Balancer.
+
+ :type load_balancer_name: string
+ :param load_balancer_name: The name of the Load Balancer
+
+ :type instances: List of strings
+ :param instances: The instance ID's of the EC2 instances to add.
+
+ :rtype: List of strings
+ :return: An updated list of instances for this Load Balancer.
+
+ """
+ params = {'LoadBalancerName' : load_balancer_name}
+ self.build_list_params(params, instances, 'Instances.member.%d.InstanceId')
+ return self.get_list('RegisterInstancesWithLoadBalancer', params, [('member', InstanceInfo)])
+
+ def deregister_instances(self, load_balancer_name, instances):
+ """
+ Remove Instances from an existing Load Balancer.
+
+ :type load_balancer_name: string
+ :param load_balancer_name: The name of the Load Balancer
+
+ :type instances: List of strings
+ :param instances: The instance ID's of the EC2 instances to remove.
+
+ :rtype: List of strings
+ :return: An updated list of instances for this Load Balancer.
+
+ """
+ params = {'LoadBalancerName' : load_balancer_name}
+ self.build_list_params(params, instances, 'Instances.member.%d.InstanceId')
+ return self.get_list('DeregisterInstancesFromLoadBalancer', params, [('member', InstanceInfo)])
+
+ def describe_instance_health(self, load_balancer_name, instances=None):
+ """
+ Get current state of all Instances registered to an Load Balancer.
+
+ :type load_balancer_name: string
+ :param load_balancer_name: The name of the Load Balancer
+
+ :type instances: List of strings
+ :param instances: The instance ID's of the EC2 instances
+ to return status for. If not provided,
+ the state of all instances will be returned.
+
+ :rtype: List of :class:`boto.ec2.elb.instancestate.InstanceState`
+ :return: list of state info for instances in this Load Balancer.
+
+ """
+ params = {'LoadBalancerName' : load_balancer_name}
+ if instances:
+ self.build_list_params(params, instances, 'Instances.member.%d.InstanceId')
+ return self.get_list('DescribeInstanceHealth', params, [('member', InstanceState)])
+
+ def configure_health_check(self, name, health_check):
+ """
+ Define a health check for the EndPoints.
+
+ :type name: string
+ :param name: The mnemonic name associated with the new access point
+
+ :type health_check: :class:`boto.ec2.elb.healthcheck.HealthCheck`
+ :param health_check: A HealthCheck object populated with the desired
+ values.
+
+ :rtype: :class:`boto.ec2.elb.healthcheck.HealthCheck`
+ :return: The updated :class:`boto.ec2.elb.healthcheck.HealthCheck`
+ """
+ params = {'LoadBalancerName' : name,
+ 'HealthCheck.Timeout' : health_check.timeout,
+ 'HealthCheck.Target' : health_check.target,
+ 'HealthCheck.Interval' : health_check.interval,
+ 'HealthCheck.UnhealthyThreshold' : health_check.unhealthy_threshold,
+ 'HealthCheck.HealthyThreshold' : health_check.healthy_threshold}
+ return self.get_object('ConfigureHealthCheck', params, HealthCheck)
+
+ def set_lb_listener_SSL_certificate(self, lb_name, lb_port, ssl_certificate_id):
+ """
+ Sets the certificate that terminates the specified listener's SSL
+ connections. The specified certificate replaces any prior certificate
+ that was used on the same LoadBalancer and port.
+ """
+ params = {
+ 'LoadBalancerName' : lb_name,
+ 'LoadBalancerPort' : lb_port,
+ 'SSLCertificateId' : ssl_certificate_id,
+ }
+ return self.get_status('SetLoadBalancerListenerSSLCertificate', params)
+
+ def create_app_cookie_stickiness_policy(self, name, lb_name, policy_name):
+ """
+ Generates a stickiness policy with sticky session lifetimes that follow
+ that of an application-generated cookie. This policy can only be
+ associated with HTTP listeners.
+
+ This policy is similar to the policy created by
+ CreateLBCookieStickinessPolicy, except that the lifetime of the special
+ Elastic Load Balancing cookie follows the lifetime of the
+ application-generated cookie specified in the policy configuration. The
+ load balancer only inserts a new stickiness cookie when the application
+ response includes a new application cookie.
+
+ If the application cookie is explicitly removed or expires, the session
+ stops being sticky until a new application cookie is issued.
+ """
+ params = {
+ 'CookieName' : name,
+ 'LoadBalancerName' : lb_name,
+ 'PolicyName' : policy_name,
+ }
+ return self.get_status('CreateAppCookieStickinessPolicy', params)
+
+ def create_lb_cookie_stickiness_policy(self, cookie_expiration_period, lb_name, policy_name):
+ """
+ Generates a stickiness policy with sticky session lifetimes controlled
+ by the lifetime of the browser (user-agent) or a specified expiration
+ period. This policy can only be associated only with HTTP listeners.
+
+ When a load balancer implements this policy, the load balancer uses a
+ special cookie to track the backend server instance for each request.
+ When the load balancer receives a request, it first checks to see if
+ this cookie is present in the request. If so, the load balancer sends
+ the request to the application server specified in the cookie. If not,
+ the load balancer sends the request to a server that is chosen based on
+ the existing load balancing algorithm.
+
+ A cookie is inserted into the response for binding subsequent requests
+ from the same user to that server. The validity of the cookie is based
+ on the cookie expiration time, which is specified in the policy
+ configuration.
+ """
+ params = {
+ 'CookieExpirationPeriod' : cookie_expiration_period,
+ 'LoadBalancerName' : lb_name,
+ 'PolicyName' : policy_name,
+ }
+ return self.get_status('CreateLBCookieStickinessPolicy', params)
+
+ def delete_lb_policy(self, lb_name, policy_name):
+ """
+ Deletes a policy from the LoadBalancer. The specified policy must not
+ be enabled for any listeners.
+ """
+ params = {
+ 'LoadBalancerName' : lb_name,
+ 'PolicyName' : policy_name,
+ }
+ return self.get_status('DeleteLoadBalancerPolicy', params)
+
+ def set_lb_policies_of_listener(self, lb_name, lb_port, policies):
+ """
+ Associates, updates, or disables a policy with a listener on the load
+ balancer. Currently only zero (0) or one (1) policy can be associated
+ with a listener.
+ """
+ params = {
+ 'LoadBalancerName' : lb_name,
+ 'LoadBalancerPort' : lb_port,
+ }
+ self.build_list_params(params, policies, 'PolicyNames.member.%d')
+ return self.get_status('SetLoadBalancerPoliciesOfListener', params)
+
+
diff --git a/boto/ec2/elb/healthcheck.py b/boto/ec2/elb/healthcheck.py
new file mode 100644
index 0000000..5a3edbc
--- /dev/null
+++ b/boto/ec2/elb/healthcheck.py
@@ -0,0 +1,68 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class HealthCheck(object):
+ """
+ Represents an EC2 Access Point Health Check
+ """
+
+ def __init__(self, access_point=None, interval=30, target=None,
+ healthy_threshold=3, timeout=5, unhealthy_threshold=5):
+ self.access_point = access_point
+ self.interval = interval
+ self.target = target
+ self.healthy_threshold = healthy_threshold
+ self.timeout = timeout
+ self.unhealthy_threshold = unhealthy_threshold
+
+ def __repr__(self):
+ return 'HealthCheck:%s' % self.target
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Interval':
+ self.interval = int(value)
+ elif name == 'Target':
+ self.target = value
+ elif name == 'HealthyThreshold':
+ self.healthy_threshold = int(value)
+ elif name == 'Timeout':
+ self.timeout = int(value)
+ elif name == 'UnhealthyThreshold':
+ self.unhealthy_threshold = int(value)
+ else:
+ setattr(self, name, value)
+
+ def update(self):
+ if not self.access_point:
+ return
+
+ new_hc = self.connection.configure_health_check(self.access_point,
+ self)
+ self.interval = new_hc.interval
+ self.target = new_hc.target
+ self.healthy_threshold = new_hc.healthy_threshold
+ self.unhealthy_threshold = new_hc.unhealthy_threshold
+ self.timeout = new_hc.timeout
+
+
diff --git a/boto/ec2/elb/instancestate.py b/boto/ec2/elb/instancestate.py
new file mode 100644
index 0000000..4a9b0d4
--- /dev/null
+++ b/boto/ec2/elb/instancestate.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class InstanceState(object):
+ """
+ Represents the state of an EC2 Load Balancer Instance
+ """
+
+ def __init__(self, load_balancer=None, description=None,
+ state=None, instance_id=None, reason_code=None):
+ self.load_balancer = load_balancer
+ self.description = description
+ self.state = state
+ self.instance_id = instance_id
+ self.reason_code = reason_code
+
+ def __repr__(self):
+ return 'InstanceState:(%s,%s)' % (self.instance_id, self.state)
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Description':
+ self.description = value
+ elif name == 'State':
+ self.state = value
+ elif name == 'InstanceId':
+ self.instance_id = value
+ elif name == 'ReasonCode':
+ self.reason_code = value
+ else:
+ setattr(self, name, value)
+
+
+
diff --git a/boto/ec2/elb/listelement.py b/boto/ec2/elb/listelement.py
new file mode 100644
index 0000000..5be4599
--- /dev/null
+++ b/boto/ec2/elb/listelement.py
@@ -0,0 +1,31 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class ListElement(list):
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'member':
+ self.append(value)
+
+
diff --git a/boto/ec2/elb/listener.py b/boto/ec2/elb/listener.py
new file mode 100644
index 0000000..a8807c0
--- /dev/null
+++ b/boto/ec2/elb/listener.py
@@ -0,0 +1,71 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class Listener(object):
+ """
+ Represents an EC2 Load Balancer Listener tuple
+ """
+
+ def __init__(self, load_balancer=None, load_balancer_port=0,
+ instance_port=0, protocol='', ssl_certificate_id=None):
+ self.load_balancer = load_balancer
+ self.load_balancer_port = load_balancer_port
+ self.instance_port = instance_port
+ self.protocol = protocol
+ self.ssl_certificate_id = ssl_certificate_id
+
+ def __repr__(self):
+ r = "(%d, %d, '%s'" % (self.load_balancer_port, self.instance_port, self.protocol)
+ if self.ssl_certificate_id:
+ r += ', %s' % (self.ssl_certificate_id)
+ r += ')'
+ return r
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'LoadBalancerPort':
+ self.load_balancer_port = int(value)
+ elif name == 'InstancePort':
+ self.instance_port = int(value)
+ elif name == 'Protocol':
+ self.protocol = value
+ elif name == 'SSLCertificateId':
+ self.ssl_certificate_id = value
+ else:
+ setattr(self, name, value)
+
+ def get_tuple(self):
+ return self.load_balancer_port, self.instance_port, self.protocol
+
+ def __getitem__(self, key):
+ if key == 0:
+ return self.load_balancer_port
+ if key == 1:
+ return self.instance_port
+ if key == 2:
+ return self.protocol
+ raise KeyError
+
+
+
+
diff --git a/boto/ec2/elb/loadbalancer.py b/boto/ec2/elb/loadbalancer.py
new file mode 100644
index 0000000..9759952
--- /dev/null
+++ b/boto/ec2/elb/loadbalancer.py
@@ -0,0 +1,182 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.ec2.elb.healthcheck import HealthCheck
+from boto.ec2.elb.listener import Listener
+from boto.ec2.elb.listelement import ListElement
+from boto.ec2.elb.policies import Policies
+from boto.ec2.instanceinfo import InstanceInfo
+from boto.resultset import ResultSet
+
+class LoadBalancer(object):
+ """
+ Represents an EC2 Load Balancer
+ """
+
+ def __init__(self, connection=None, name=None, endpoints=None):
+ self.connection = connection
+ self.name = name
+ self.listeners = None
+ self.health_check = None
+ self.policies = None
+ self.dns_name = None
+ self.created_time = None
+ self.instances = None
+ self.availability_zones = ListElement()
+
+ def __repr__(self):
+ return 'LoadBalancer:%s' % self.name
+
+ def startElement(self, name, attrs, connection):
+ if name == 'HealthCheck':
+ self.health_check = HealthCheck(self)
+ return self.health_check
+ elif name == 'ListenerDescriptions':
+ self.listeners = ResultSet([('member', Listener)])
+ return self.listeners
+ elif name == 'AvailabilityZones':
+ return self.availability_zones
+ elif name == 'Instances':
+ self.instances = ResultSet([('member', InstanceInfo)])
+ return self.instances
+ elif name == 'Policies':
+ self.policies = Policies(self)
+ return self.policies
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'LoadBalancerName':
+ self.name = value
+ elif name == 'DNSName':
+ self.dns_name = value
+ elif name == 'CreatedTime':
+ self.created_time = value
+ elif name == 'InstanceId':
+ self.instances.append(value)
+ else:
+ setattr(self, name, value)
+
+ def enable_zones(self, zones):
+ """
+ Enable availability zones to this Access Point.
+ All zones must be in the same region as the Access Point.
+
+ :type zones: string or List of strings
+ :param zones: The name of the zone(s) to add.
+
+ """
+ if isinstance(zones, str) or isinstance(zones, unicode):
+ zones = [zones]
+ new_zones = self.connection.enable_availability_zones(self.name, zones)
+ self.availability_zones = new_zones
+
+ def disable_zones(self, zones):
+ """
+ Disable availability zones from this Access Point.
+
+ :type zones: string or List of strings
+ :param zones: The name of the zone(s) to add.
+
+ """
+ if isinstance(zones, str) or isinstance(zones, unicode):
+ zones = [zones]
+ new_zones = self.connection.disable_availability_zones(self.name, zones)
+ self.availability_zones = new_zones
+
+ def register_instances(self, instances):
+ """
+ Add instances to this Load Balancer
+ All instances must be in the same region as the Load Balancer.
+ Adding endpoints that are already registered with the Load Balancer
+ has no effect.
+
+ :type zones: string or List of instance id's
+ :param zones: The name of the endpoint(s) to add.
+
+ """
+ if isinstance(instances, str) or isinstance(instances, unicode):
+ instances = [instances]
+ new_instances = self.connection.register_instances(self.name, instances)
+ self.instances = new_instances
+
+ def deregister_instances(self, instances):
+ """
+ Remove instances from this Load Balancer.
+ Removing instances that are not registered with the Load Balancer
+ has no effect.
+
+ :type zones: string or List of instance id's
+ :param zones: The name of the endpoint(s) to add.
+
+ """
+ if isinstance(instances, str) or isinstance(instances, unicode):
+ instances = [instances]
+ new_instances = self.connection.deregister_instances(self.name, instances)
+ self.instances = new_instances
+
+ def delete(self):
+ """
+ Delete this load balancer
+ """
+ return self.connection.delete_load_balancer(self.name)
+
+ def configure_health_check(self, health_check):
+ return self.connection.configure_health_check(self.name, health_check)
+
+ def get_instance_health(self, instances=None):
+ return self.connection.describe_instance_health(self.name, instances)
+
+ def create_listeners(self, listeners):
+ return self.connection.create_load_balancer_listeners(self.name, listeners)
+
+ def create_listener(self, inPort, outPort=None, proto="tcp"):
+ if outPort == None:
+ outPort = inPort
+ return self.create_listeners([(inPort, outPort, proto)])
+
+ def delete_listeners(self, listeners):
+ return self.connection.delete_load_balancer_listeners(self.name, listeners)
+
+ def delete_listener(self, inPort, outPort=None, proto="tcp"):
+ if outPort == None:
+ outPort = inPort
+ return self.delete_listeners([(inPort, outPort, proto)])
+
+ def delete_policy(self, policy_name):
+ """
+ Deletes a policy from the LoadBalancer. The specified policy must not
+ be enabled for any listeners.
+ """
+ return self.connection.delete_lb_policy(self.name, policy_name)
+
+ def set_policies_of_listener(self, lb_port, policies):
+ return self.connection.set_lb_policies_of_listener(self.name, lb_port, policies)
+
+ def create_cookie_stickiness_policy(self, cookie_expiration_period, policy_name):
+ return self.connection.create_lb_cookie_stickiness_policy(cookie_expiration_period, self.name, policy_name)
+
+ def create_app_cookie_stickiness_policy(self, name, policy_name):
+ return self.connection.create_app_cookie_stickiness_policy(name, self.name, policy_name)
+
+ def set_listener_SSL_certificate(self, lb_port, ssl_certificate_id):
+ return self.connection.set_lb_listener_SSL_certificate(self.name, lb_port, ssl_certificate_id)
+
diff --git a/boto/ec2/elb/policies.py b/boto/ec2/elb/policies.py
new file mode 100644
index 0000000..428ce72
--- /dev/null
+++ b/boto/ec2/elb/policies.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2010 Reza Lotun http://reza.lotun.name
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.resultset import ResultSet
+
+
+class AppCookieStickinessPolicy(object):
+ def __init__(self, connection=None):
+ self.cookie_name = None
+ self.policy_name = None
+
+ def __repr__(self):
+ return 'AppCookieStickiness(%s, %s)' % (self.policy_name, self.cookie_name)
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'CookieName':
+ self.cookie_name = value
+ elif name == 'PolicyName':
+ self.policy_name = value
+
+
+class LBCookieStickinessPolicy(object):
+ def __init__(self, connection=None):
+ self.policy_name = None
+ self.cookie_expiration_period = None
+
+ def __repr__(self):
+ return 'LBCookieStickiness(%s, %s)' % (self.policy_name, self.cookie_expiration_period)
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'CookieExpirationPeriod':
+ self.cookie_expiration_period = value
+ elif name == 'PolicyName':
+ self.policy_name = value
+
+
+class Policies(object):
+ """
+ ELB Policies
+ """
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.app_cookie_stickiness_policies = None
+ self.lb_cookie_stickiness_policies = None
+
+ def __repr__(self):
+ return 'Policies(AppCookieStickiness%s, LBCookieStickiness%s)' % (self.app_cookie_stickiness_policies,
+ self.lb_cookie_stickiness_policies)
+
+ def startElement(self, name, attrs, connection):
+ if name == 'AppCookieStickinessPolicies':
+ self.app_cookie_stickiness_policies = ResultSet([('member', AppCookieStickinessPolicy)])
+ elif name == 'LBCookieStickinessPolicies':
+ self.lb_cookie_stickiness_policies = ResultSet([('member', LBCookieStickinessPolicy)])
+
+ def endElement(self, name, value, connection):
+ return
+
diff --git a/boto/ec2/image.py b/boto/ec2/image.py
new file mode 100644
index 0000000..a85fba0
--- /dev/null
+++ b/boto/ec2/image.py
@@ -0,0 +1,322 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.ec2.ec2object import EC2Object, TaggedEC2Object
+from boto.ec2.blockdevicemapping import BlockDeviceMapping
+
+class ProductCodes(list):
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'productCode':
+ self.append(value)
+
+class Image(TaggedEC2Object):
+ """
+ Represents an EC2 Image
+ """
+
+ def __init__(self, connection=None):
+ TaggedEC2Object.__init__(self, connection)
+ self.id = None
+ self.location = None
+ self.state = None
+ self.ownerId = None # for backwards compatibility
+ self.owner_id = None
+ self.owner_alias = None
+ self.is_public = False
+ self.architecture = None
+ self.platform = None
+ self.type = None
+ self.kernel_id = None
+ self.ramdisk_id = None
+ self.name = None
+ self.description = None
+ self.product_codes = ProductCodes()
+ self.block_device_mapping = None
+ self.root_device_type = None
+ self.root_device_name = None
+ self.virtualization_type = None
+ self.hypervisor = None
+ self.instance_lifecycle = None
+
+ def __repr__(self):
+ return 'Image:%s' % self.id
+
+ def startElement(self, name, attrs, connection):
+ retval = TaggedEC2Object.startElement(self, name, attrs, connection)
+ if retval is not None:
+ return retval
+ if name == 'blockDeviceMapping':
+ self.block_device_mapping = BlockDeviceMapping()
+ return self.block_device_mapping
+ elif name == 'productCodes':
+ return self.product_codes
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'imageId':
+ self.id = value
+ elif name == 'imageLocation':
+ self.location = value
+ elif name == 'imageState':
+ self.state = value
+ elif name == 'imageOwnerId':
+ self.ownerId = value # for backwards compatibility
+ self.owner_id = value
+ elif name == 'isPublic':
+ if value == 'false':
+ self.is_public = False
+ elif value == 'true':
+ self.is_public = True
+ else:
+ raise Exception(
+ 'Unexpected value of isPublic %s for image %s'%(
+ value,
+ self.id
+ )
+ )
+ elif name == 'architecture':
+ self.architecture = value
+ elif name == 'imageType':
+ self.type = value
+ elif name == 'kernelId':
+ self.kernel_id = value
+ elif name == 'ramdiskId':
+ self.ramdisk_id = value
+ elif name == 'imageOwnerAlias':
+ self.owner_alias = value
+ elif name == 'platform':
+ self.platform = value
+ elif name == 'name':
+ self.name = value
+ elif name == 'description':
+ self.description = value
+ elif name == 'rootDeviceType':
+ self.root_device_type = value
+ elif name == 'rootDeviceName':
+ self.root_device_name = value
+ elif name == 'virtualizationType':
+ self.virtualization_type = value
+ elif name == 'hypervisor':
+ self.hypervisor = value
+ elif name == 'instanceLifecycle':
+ self.instance_lifecycle = value
+ else:
+ setattr(self, name, value)
+
+ def _update(self, updated):
+ self.__dict__.update(updated.__dict__)
+
+ def update(self, validate=False):
+ """
+ Update the image's state information by making a call to fetch
+ the current image attributes from the service.
+
+ :type validate: bool
+ :param validate: By default, if EC2 returns no data about the
+ image the update method returns quietly. If
+ the validate param is True, however, it will
+ raise a ValueError exception if no data is
+ returned from EC2.
+ """
+ rs = self.connection.get_all_images([self.id])
+ if len(rs) > 0:
+ img = rs[0]
+ if img.id == self.id:
+ self._update(img)
+ elif validate:
+ raise ValueError('%s is not a valid Image ID' % self.id)
+ return self.state
+
+ def run(self, min_count=1, max_count=1, key_name=None,
+ security_groups=None, user_data=None,
+ addressing_type=None, instance_type='m1.small', placement=None,
+ kernel_id=None, ramdisk_id=None,
+ monitoring_enabled=False, subnet_id=None,
+ block_device_map=None,
+ disable_api_termination=False,
+ instance_initiated_shutdown_behavior=None,
+ private_ip_address=None,
+ placement_group=None):
+ """
+ Runs this instance.
+
+ :type min_count: int
+ :param min_count: The minimum number of instances to start
+
+ :type max_count: int
+ :param max_count: The maximum number of instances to start
+
+ :type key_name: string
+ :param key_name: The name of the keypair to run this instance with.
+
+ :type security_groups:
+ :param security_groups:
+
+ :type user_data:
+ :param user_data:
+
+ :type addressing_type:
+ :param daddressing_type:
+
+ :type instance_type: string
+ :param instance_type: The type of instance to run. Current choices are:
+ m1.small | m1.large | m1.xlarge | c1.medium |
+ c1.xlarge | m2.xlarge | m2.2xlarge |
+ m2.4xlarge | cc1.4xlarge
+
+ :type placement: string
+ :param placement: The availability zone in which to launch the instances
+
+ :type kernel_id: string
+ :param kernel_id: The ID of the kernel with which to launch the instances
+
+ :type ramdisk_id: string
+ :param ramdisk_id: The ID of the RAM disk with which to launch the instances
+
+ :type monitoring_enabled: bool
+ :param monitoring_enabled: Enable CloudWatch monitoring on the instance.
+
+ :type subnet_id: string
+ :param subnet_id: The subnet ID within which to launch the instances for VPC.
+
+ :type private_ip_address: string
+ :param private_ip_address: If you're using VPC, you can optionally use
+ this parameter to assign the instance a
+ specific available IP address from the
+ subnet (e.g., 10.0.0.25).
+
+ :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping`
+ :param block_device_map: A BlockDeviceMapping data structure
+ describing the EBS volumes associated
+ with the Image.
+
+ :type disable_api_termination: bool
+ :param disable_api_termination: If True, the instances will be locked
+ and will not be able to be terminated
+ via the API.
+
+ :type instance_initiated_shutdown_behavior: string
+ :param instance_initiated_shutdown_behavior: Specifies whether the instance's
+ EBS volumes are stopped (i.e. detached)
+ or terminated (i.e. deleted) when
+ the instance is shutdown by the
+ owner. Valid values are:
+ stop | terminate
+
+ :type placement_group: string
+ :param placement_group: If specified, this is the name of the placement
+ group in which the instance(s) will be launched.
+
+ :rtype: Reservation
+ :return: The :class:`boto.ec2.instance.Reservation` associated with the request for machines
+ """
+ return self.connection.run_instances(self.id, min_count, max_count,
+ key_name, security_groups,
+ user_data, addressing_type,
+ instance_type, placement,
+ kernel_id, ramdisk_id,
+ monitoring_enabled, subnet_id,
+ block_device_map, disable_api_termination,
+ instance_initiated_shutdown_behavior,
+ private_ip_address,
+ placement_group)
+
+ def deregister(self):
+ return self.connection.deregister_image(self.id)
+
+ def get_launch_permissions(self):
+ img_attrs = self.connection.get_image_attribute(self.id,
+ 'launchPermission')
+ return img_attrs.attrs
+
+ def set_launch_permissions(self, user_ids=None, group_names=None):
+ return self.connection.modify_image_attribute(self.id,
+ 'launchPermission',
+ 'add',
+ user_ids,
+ group_names)
+
+ def remove_launch_permissions(self, user_ids=None, group_names=None):
+ return self.connection.modify_image_attribute(self.id,
+ 'launchPermission',
+ 'remove',
+ user_ids,
+ group_names)
+
+ def reset_launch_attributes(self):
+ return self.connection.reset_image_attribute(self.id,
+ 'launchPermission')
+
+ def get_kernel(self):
+ img_attrs =self.connection.get_image_attribute(self.id, 'kernel')
+ return img_attrs.kernel
+
+ def get_ramdisk(self):
+ img_attrs = self.connection.get_image_attribute(self.id, 'ramdisk')
+ return img_attrs.ramdisk
+
+class ImageAttribute:
+
+ def __init__(self, parent=None):
+ self.name = None
+ self.kernel = None
+ self.ramdisk = None
+ self.attrs = {}
+
+ def startElement(self, name, attrs, connection):
+ if name == 'blockDeviceMapping':
+ self.attrs['block_device_mapping'] = BlockDeviceMapping()
+ return self.attrs['block_device_mapping']
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'launchPermission':
+ self.name = 'launch_permission'
+ elif name == 'group':
+ if self.attrs.has_key('groups'):
+ self.attrs['groups'].append(value)
+ else:
+ self.attrs['groups'] = [value]
+ elif name == 'userId':
+ if self.attrs.has_key('user_ids'):
+ self.attrs['user_ids'].append(value)
+ else:
+ self.attrs['user_ids'] = [value]
+ elif name == 'productCode':
+ if self.attrs.has_key('product_codes'):
+ self.attrs['product_codes'].append(value)
+ else:
+ self.attrs['product_codes'] = [value]
+ elif name == 'imageId':
+ self.image_id = value
+ elif name == 'kernel':
+ self.kernel = value
+ elif name == 'ramdisk':
+ self.ramdisk = value
+ else:
+ setattr(self, name, value)
diff --git a/boto/ec2/instance.py b/boto/ec2/instance.py
new file mode 100644
index 0000000..9e8aacf
--- /dev/null
+++ b/boto/ec2/instance.py
@@ -0,0 +1,394 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents an EC2 Instance
+"""
+import boto
+from boto.ec2.ec2object import EC2Object, TaggedEC2Object
+from boto.resultset import ResultSet
+from boto.ec2.address import Address
+from boto.ec2.blockdevicemapping import BlockDeviceMapping
+from boto.ec2.image import ProductCodes
+import base64
+
+class Reservation(EC2Object):
+
+ def __init__(self, connection=None):
+ EC2Object.__init__(self, connection)
+ self.id = None
+ self.owner_id = None
+ self.groups = []
+ self.instances = []
+
+ def __repr__(self):
+ return 'Reservation:%s' % self.id
+
+ def startElement(self, name, attrs, connection):
+ if name == 'instancesSet':
+ self.instances = ResultSet([('item', Instance)])
+ return self.instances
+ elif name == 'groupSet':
+ self.groups = ResultSet([('item', Group)])
+ return self.groups
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'reservationId':
+ self.id = value
+ elif name == 'ownerId':
+ self.owner_id = value
+ else:
+ setattr(self, name, value)
+
+ def stop_all(self):
+ for instance in self.instances:
+ instance.stop()
+
+class Instance(TaggedEC2Object):
+
+ def __init__(self, connection=None):
+ TaggedEC2Object.__init__(self, connection)
+ self.id = None
+ self.dns_name = None
+ self.public_dns_name = None
+ self.private_dns_name = None
+ self.state = None
+ self.state_code = None
+ self.key_name = None
+ self.shutdown_state = None
+ self.previous_state = None
+ self.instance_type = None
+ self.instance_class = None
+ self.launch_time = None
+ self.image_id = None
+ self.placement = None
+ self.kernel = None
+ self.ramdisk = None
+ self.product_codes = ProductCodes()
+ self.ami_launch_index = None
+ self.monitored = False
+ self.instance_class = None
+ self.spot_instance_request_id = None
+ self.subnet_id = None
+ self.vpc_id = None
+ self.private_ip_address = None
+ self.ip_address = None
+ self.requester_id = None
+ self._in_monitoring_element = False
+ self.persistent = False
+ self.root_device_name = None
+ self.root_device_type = None
+ self.block_device_mapping = None
+ self.state_reason = None
+ self.group_name = None
+ self.client_token = None
+
+ def __repr__(self):
+ return 'Instance:%s' % self.id
+
+ def startElement(self, name, attrs, connection):
+ retval = TaggedEC2Object.startElement(self, name, attrs, connection)
+ if retval is not None:
+ return retval
+ if name == 'monitoring':
+ self._in_monitoring_element = True
+ elif name == 'blockDeviceMapping':
+ self.block_device_mapping = BlockDeviceMapping()
+ return self.block_device_mapping
+ elif name == 'productCodes':
+ return self.product_codes
+ elif name == 'stateReason':
+ self.state_reason = StateReason()
+ return self.state_reason
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'instanceId':
+ self.id = value
+ elif name == 'imageId':
+ self.image_id = value
+ elif name == 'dnsName' or name == 'publicDnsName':
+ self.dns_name = value # backwards compatibility
+ self.public_dns_name = value
+ elif name == 'privateDnsName':
+ self.private_dns_name = value
+ elif name == 'keyName':
+ self.key_name = value
+ elif name == 'amiLaunchIndex':
+ self.ami_launch_index = value
+ elif name == 'shutdownState':
+ self.shutdown_state = value
+ elif name == 'previousState':
+ self.previous_state = value
+ elif name == 'name':
+ self.state = value
+ elif name == 'code':
+ try:
+ self.state_code = int(value)
+ except ValueError:
+ boto.log.warning('Error converting code (%s) to int' % value)
+ self.state_code = value
+ elif name == 'instanceType':
+ self.instance_type = value
+ elif name == 'instanceClass':
+ self.instance_class = value
+ elif name == 'rootDeviceName':
+ self.root_device_name = value
+ elif name == 'rootDeviceType':
+ self.root_device_type = value
+ elif name == 'launchTime':
+ self.launch_time = value
+ elif name == 'availabilityZone':
+ self.placement = value
+ elif name == 'placement':
+ pass
+ elif name == 'kernelId':
+ self.kernel = value
+ elif name == 'ramdiskId':
+ self.ramdisk = value
+ elif name == 'state':
+ if self._in_monitoring_element:
+ if value == 'enabled':
+ self.monitored = True
+ self._in_monitoring_element = False
+ elif name == 'instanceClass':
+ self.instance_class = value
+ elif name == 'spotInstanceRequestId':
+ self.spot_instance_request_id = value
+ elif name == 'subnetId':
+ self.subnet_id = value
+ elif name == 'vpcId':
+ self.vpc_id = value
+ elif name == 'privateIpAddress':
+ self.private_ip_address = value
+ elif name == 'ipAddress':
+ self.ip_address = value
+ elif name == 'requesterId':
+ self.requester_id = value
+ elif name == 'persistent':
+ if value == 'true':
+ self.persistent = True
+ else:
+ self.persistent = False
+ elif name == 'groupName':
+ if self._in_monitoring_element:
+ self.group_name = value
+ elif name == 'clientToken':
+ self.client_token = value
+ else:
+ setattr(self, name, value)
+
+ def _update(self, updated):
+ self.__dict__.update(updated.__dict__)
+
+ def update(self, validate=False):
+ """
+ Update the instance's state information by making a call to fetch
+ the current instance attributes from the service.
+
+ :type validate: bool
+ :param validate: By default, if EC2 returns no data about the
+ instance the update method returns quietly. If
+ the validate param is True, however, it will
+ raise a ValueError exception if no data is
+ returned from EC2.
+ """
+ rs = self.connection.get_all_instances([self.id])
+ if len(rs) > 0:
+ r = rs[0]
+ for i in r.instances:
+ if i.id == self.id:
+ self._update(i)
+ elif validate:
+ raise ValueError('%s is not a valid Instance ID' % self.id)
+ return self.state
+
+ def terminate(self):
+ """
+ Terminate the instance
+ """
+ rs = self.connection.terminate_instances([self.id])
+ self._update(rs[0])
+
+ def stop(self, force=False):
+ """
+ Stop the instance
+
+ :type force: bool
+ :param force: Forces the instance to stop
+
+ :rtype: list
+ :return: A list of the instances stopped
+ """
+ rs = self.connection.stop_instances([self.id])
+ self._update(rs[0])
+
+ def start(self):
+ """
+ Start the instance.
+ """
+ rs = self.connection.start_instances([self.id])
+ self._update(rs[0])
+
+ def reboot(self):
+ return self.connection.reboot_instances([self.id])
+
+ def get_console_output(self):
+ """
+ Retrieves the console output for the instance.
+
+ :rtype: :class:`boto.ec2.instance.ConsoleOutput`
+ :return: The console output as a ConsoleOutput object
+ """
+ return self.connection.get_console_output(self.id)
+
+ def confirm_product(self, product_code):
+ return self.connection.confirm_product_instance(self.id, product_code)
+
+ def use_ip(self, ip_address):
+ if isinstance(ip_address, Address):
+ ip_address = ip_address.public_ip
+ return self.connection.associate_address(self.id, ip_address)
+
+ def monitor(self):
+ return self.connection.monitor_instance(self.id)
+
+ def unmonitor(self):
+ return self.connection.unmonitor_instance(self.id)
+
+ def get_attribute(self, attribute):
+ """
+ Gets an attribute from this instance.
+
+ :type attribute: string
+ :param attribute: The attribute you need information about
+ Valid choices are:
+ instanceType|kernel|ramdisk|userData|
+ disableApiTermination|
+ instanceInitiatedShutdownBehavior|
+ rootDeviceName|blockDeviceMapping
+
+ :rtype: :class:`boto.ec2.image.InstanceAttribute`
+ :return: An InstanceAttribute object representing the value of the
+ attribute requested
+ """
+ return self.connection.get_instance_attribute(self.id, attribute)
+
+ def modify_attribute(self, attribute, value):
+ """
+ Changes an attribute of this instance
+
+ :type attribute: string
+ :param attribute: The attribute you wish to change.
+ AttributeName - Expected value (default)
+ instanceType - A valid instance type (m1.small)
+ kernel - Kernel ID (None)
+ ramdisk - Ramdisk ID (None)
+ userData - Base64 encoded String (None)
+ disableApiTermination - Boolean (true)
+ instanceInitiatedShutdownBehavior - stop|terminate
+ rootDeviceName - device name (None)
+
+ :type value: string
+ :param value: The new value for the attribute
+
+ :rtype: bool
+ :return: Whether the operation succeeded or not
+ """
+ return self.connection.modify_instance_attribute(self.id, attribute,
+ value)
+
+ def reset_attribute(self, attribute):
+ """
+ Resets an attribute of this instance to its default value.
+
+ :type attribute: string
+ :param attribute: The attribute to reset. Valid values are:
+ kernel|ramdisk
+
+ :rtype: bool
+ :return: Whether the operation succeeded or not
+ """
+ return self.connection.reset_instance_attribute(self.id, attribute)
+
+class Group:
+
+ def __init__(self, parent=None):
+ self.id = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'groupId':
+ self.id = value
+ else:
+ setattr(self, name, value)
+
+class ConsoleOutput:
+
+ def __init__(self, parent=None):
+ self.parent = parent
+ self.instance_id = None
+ self.timestamp = None
+ self.comment = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'instanceId':
+ self.instance_id = value
+ elif name == 'output':
+ self.output = base64.b64decode(value)
+ else:
+ setattr(self, name, value)
+
+class InstanceAttribute(dict):
+
+ def __init__(self, parent=None):
+ dict.__init__(self)
+ self._current_value = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'value':
+ self._current_value = value
+ else:
+ self[name] = self._current_value
+
+class StateReason(dict):
+
+ def __init__(self, parent=None):
+ dict.__init__(self)
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name != 'stateReason':
+ self[name] = value
+
diff --git a/boto/ec2/instanceinfo.py b/boto/ec2/instanceinfo.py
new file mode 100644
index 0000000..6efbaed
--- /dev/null
+++ b/boto/ec2/instanceinfo.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class InstanceInfo(object):
+ """
+ Represents an EC2 Instance status response from CloudWatch
+ """
+
+ def __init__(self, connection=None, id=None, state=None):
+ self.connection = connection
+ self.id = id
+ self.state = state
+
+ def __repr__(self):
+ return 'InstanceInfo:%s' % self.id
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'instanceId' or name == 'InstanceId':
+ self.id = value
+ elif name == 'state':
+ self.state = value
+ else:
+ setattr(self, name, value)
+
+
+
diff --git a/boto/ec2/keypair.py b/boto/ec2/keypair.py
new file mode 100644
index 0000000..d08e5ce
--- /dev/null
+++ b/boto/ec2/keypair.py
@@ -0,0 +1,111 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents an EC2 Keypair
+"""
+
+import os
+from boto.ec2.ec2object import EC2Object
+from boto.exception import BotoClientError
+
+class KeyPair(EC2Object):
+
+ def __init__(self, connection=None):
+ EC2Object.__init__(self, connection)
+ self.name = None
+ self.fingerprint = None
+ self.material = None
+
+ def __repr__(self):
+ return 'KeyPair:%s' % self.name
+
+ def endElement(self, name, value, connection):
+ if name == 'keyName':
+ self.name = value
+ elif name == 'keyFingerprint':
+ self.fingerprint = value
+ elif name == 'keyMaterial':
+ self.material = value
+ else:
+ setattr(self, name, value)
+
+ def delete(self):
+ """
+ Delete the KeyPair.
+
+ :rtype: bool
+ :return: True if successful, otherwise False.
+ """
+ return self.connection.delete_key_pair(self.name)
+
+ def save(self, directory_path):
+ """
+ Save the material (the unencrypted PEM encoded RSA private key)
+ of a newly created KeyPair to a local file.
+
+ :type directory_path: string
+ :param directory_path: The fully qualified path to the directory
+ in which the keypair will be saved. The
+ keypair file will be named using the name
+ of the keypair as the base name and .pem
+ for the file extension. If a file of that
+ name already exists in the directory, an
+ exception will be raised and the old file
+ will not be overwritten.
+
+ :rtype: bool
+ :return: True if successful.
+ """
+ if self.material:
+ file_path = os.path.join(directory_path, '%s.pem' % self.name)
+ if os.path.exists(file_path):
+ raise BotoClientError('%s already exists, it will not be overwritten' % file_path)
+ fp = open(file_path, 'wb')
+ fp.write(self.material)
+ fp.close()
+ return True
+ else:
+ raise BotoClientError('KeyPair contains no material')
+
+ def copy_to_region(self, region):
+ """
+ Create a new key pair of the same new in another region.
+ Note that the new key pair will use a different ssh
+ cert than the this key pair. After doing the copy,
+ you will need to save the material associated with the
+ new key pair (use the save method) to a local file.
+
+ :type region: :class:`boto.ec2.regioninfo.RegionInfo`
+ :param region: The region to which this security group will be copied.
+
+ :rtype: :class:`boto.ec2.keypair.KeyPair`
+ :return: The new key pair
+ """
+ if region.name == self.region:
+ raise BotoClientError('Unable to copy to the same Region')
+ conn_params = self.connection.get_params()
+ rconn = region.connect(**conn_params)
+ kp = rconn.create_key_pair(self.name)
+ return kp
+
+
+
diff --git a/boto/ec2/launchspecification.py b/boto/ec2/launchspecification.py
new file mode 100644
index 0000000..a574a38
--- /dev/null
+++ b/boto/ec2/launchspecification.py
@@ -0,0 +1,96 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents a launch specification for Spot instances.
+"""
+
+from boto.ec2.ec2object import EC2Object
+from boto.resultset import ResultSet
+from boto.ec2.blockdevicemapping import BlockDeviceMapping
+from boto.ec2.instance import Group
+
+class GroupList(list):
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'groupId':
+ self.append(value)
+
+class LaunchSpecification(EC2Object):
+
+ def __init__(self, connection=None):
+ EC2Object.__init__(self, connection)
+ self.key_name = None
+ self.instance_type = None
+ self.image_id = None
+ self.groups = []
+ self.placement = None
+ self.kernel = None
+ self.ramdisk = None
+ self.monitored = False
+ self.subnet_id = None
+ self._in_monitoring_element = False
+ self.block_device_mapping = None
+
+ def __repr__(self):
+ return 'LaunchSpecification(%s)' % self.image_id
+
+ def startElement(self, name, attrs, connection):
+ if name == 'groupSet':
+ self.groups = ResultSet([('item', Group)])
+ return self.groups
+ elif name == 'monitoring':
+ self._in_monitoring_element = True
+ elif name == 'blockDeviceMapping':
+ self.block_device_mapping = BlockDeviceMapping()
+ return self.block_device_mapping
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'imageId':
+ self.image_id = value
+ elif name == 'keyName':
+ self.key_name = value
+ elif name == 'instanceType':
+ self.instance_type = value
+ elif name == 'availabilityZone':
+ self.placement = value
+ elif name == 'placement':
+ pass
+ elif name == 'kernelId':
+ self.kernel = value
+ elif name == 'ramdiskId':
+ self.ramdisk = value
+ elif name == 'subnetId':
+ self.subnet_id = value
+ elif name == 'state':
+ if self._in_monitoring_element:
+ if value == 'enabled':
+ self.monitored = True
+ self._in_monitoring_element = False
+ else:
+ setattr(self, name, value)
+
+
diff --git a/boto/ec2/placementgroup.py b/boto/ec2/placementgroup.py
new file mode 100644
index 0000000..e1bbea6
--- /dev/null
+++ b/boto/ec2/placementgroup.py
@@ -0,0 +1,51 @@
+# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+"""
+Represents an EC2 Placement Group
+"""
+from boto.ec2.ec2object import EC2Object
+from boto.exception import BotoClientError
+
+class PlacementGroup(EC2Object):
+
+ def __init__(self, connection=None, name=None, strategy=None, state=None):
+ EC2Object.__init__(self, connection)
+ self.name = name
+ self.strategy = strategy
+ self.state = state
+
+ def __repr__(self):
+ return 'PlacementGroup:%s' % self.name
+
+ def endElement(self, name, value, connection):
+ if name == 'groupName':
+ self.name = value
+ elif name == 'strategy':
+ self.strategy = value
+ elif name == 'state':
+ self.state = value
+ else:
+ setattr(self, name, value)
+
+ def delete(self):
+ return self.connection.delete_placement_group(self.name)
+
+
diff --git a/boto/ec2/regioninfo.py b/boto/ec2/regioninfo.py
new file mode 100644
index 0000000..0b37b0e
--- /dev/null
+++ b/boto/ec2/regioninfo.py
@@ -0,0 +1,34 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.regioninfo import RegionInfo
+
+class EC2RegionInfo(RegionInfo):
+ """
+ Represents an EC2 Region
+ """
+
+ def __init__(self, connection=None, name=None, endpoint=None):
+ from boto.ec2.connection import EC2Connection
+ RegionInfo.__init__(self, connection, name, endpoint,
+ EC2Connection)
diff --git a/boto/ec2/reservedinstance.py b/boto/ec2/reservedinstance.py
new file mode 100644
index 0000000..1d35c1d
--- /dev/null
+++ b/boto/ec2/reservedinstance.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.ec2.ec2object import EC2Object
+
+class ReservedInstancesOffering(EC2Object):
+
+ def __init__(self, connection=None, id=None, instance_type=None,
+ availability_zone=None, duration=None, fixed_price=None,
+ usage_price=None, description=None):
+ EC2Object.__init__(self, connection)
+ self.id = id
+ self.instance_type = instance_type
+ self.availability_zone = availability_zone
+ self.duration = duration
+ self.fixed_price = fixed_price
+ self.usage_price = usage_price
+ self.description = description
+
+ def __repr__(self):
+ return 'ReservedInstanceOffering:%s' % self.id
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'reservedInstancesOfferingId':
+ self.id = value
+ elif name == 'instanceType':
+ self.instance_type = value
+ elif name == 'availabilityZone':
+ self.availability_zone = value
+ elif name == 'duration':
+ self.duration = value
+ elif name == 'fixedPrice':
+ self.fixed_price = value
+ elif name == 'usagePrice':
+ self.usage_price = value
+ elif name == 'productDescription':
+ self.description = value
+ else:
+ setattr(self, name, value)
+
+ def describe(self):
+ print 'ID=%s' % self.id
+ print '\tInstance Type=%s' % self.instance_type
+ print '\tZone=%s' % self.availability_zone
+ print '\tDuration=%s' % self.duration
+ print '\tFixed Price=%s' % self.fixed_price
+ print '\tUsage Price=%s' % self.usage_price
+ print '\tDescription=%s' % self.description
+
+ def purchase(self, instance_count=1):
+ return self.connection.purchase_reserved_instance_offering(self.id, instance_count)
+
+class ReservedInstance(ReservedInstancesOffering):
+
+ def __init__(self, connection=None, id=None, instance_type=None,
+ availability_zone=None, duration=None, fixed_price=None,
+ usage_price=None, description=None,
+ instance_count=None, state=None):
+ ReservedInstancesOffering.__init__(self, connection, id, instance_type,
+ availability_zone, duration, fixed_price,
+ usage_price, description)
+ self.instance_count = instance_count
+ self.state = state
+
+ def __repr__(self):
+ return 'ReservedInstance:%s' % self.id
+
+ def endElement(self, name, value, connection):
+ if name == 'reservedInstancesId':
+ self.id = value
+ if name == 'instanceCount':
+ self.instance_count = int(value)
+ elif name == 'state':
+ self.state = value
+ else:
+ ReservedInstancesOffering.endElement(self, name, value, connection)
diff --git a/boto/ec2/securitygroup.py b/boto/ec2/securitygroup.py
new file mode 100644
index 0000000..24e08c3
--- /dev/null
+++ b/boto/ec2/securitygroup.py
@@ -0,0 +1,286 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents an EC2 Security Group
+"""
+from boto.ec2.ec2object import EC2Object
+from boto.exception import BotoClientError
+
+class SecurityGroup(EC2Object):
+
+ def __init__(self, connection=None, owner_id=None,
+ name=None, description=None):
+ EC2Object.__init__(self, connection)
+ self.owner_id = owner_id
+ self.name = name
+ self.description = description
+ self.rules = []
+
+ def __repr__(self):
+ return 'SecurityGroup:%s' % self.name
+
+ def startElement(self, name, attrs, connection):
+ if name == 'item':
+ self.rules.append(IPPermissions(self))
+ return self.rules[-1]
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'ownerId':
+ self.owner_id = value
+ elif name == 'groupName':
+ self.name = value
+ elif name == 'groupDescription':
+ self.description = value
+ elif name == 'ipRanges':
+ pass
+ elif name == 'return':
+ if value == 'false':
+ self.status = False
+ elif value == 'true':
+ self.status = True
+ else:
+ raise Exception(
+ 'Unexpected value of status %s for group %s'%(
+ value,
+ self.name
+ )
+ )
+ else:
+ setattr(self, name, value)
+
+ def delete(self):
+ return self.connection.delete_security_group(self.name)
+
+ def add_rule(self, ip_protocol, from_port, to_port,
+ src_group_name, src_group_owner_id, cidr_ip):
+ """
+ Add a rule to the SecurityGroup object. Note that this method
+ only changes the local version of the object. No information
+ is sent to EC2.
+ """
+ rule = IPPermissions(self)
+ rule.ip_protocol = ip_protocol
+ rule.from_port = from_port
+ rule.to_port = to_port
+ self.rules.append(rule)
+ rule.add_grant(src_group_name, src_group_owner_id, cidr_ip)
+
+ def remove_rule(self, ip_protocol, from_port, to_port,
+ src_group_name, src_group_owner_id, cidr_ip):
+ """
+ Remove a rule to the SecurityGroup object. Note that this method
+ only changes the local version of the object. No information
+ is sent to EC2.
+ """
+ target_rule = None
+ for rule in self.rules:
+ if rule.ip_protocol == ip_protocol:
+ if rule.from_port == from_port:
+ if rule.to_port == to_port:
+ target_rule = rule
+ target_grant = None
+ for grant in rule.grants:
+ if grant.name == src_group_name:
+ if grant.owner_id == src_group_owner_id:
+ if grant.cidr_ip == cidr_ip:
+ target_grant = grant
+ if target_grant:
+ rule.grants.remove(target_grant)
+ if len(rule.grants) == 0:
+ self.rules.remove(target_rule)
+
+ def authorize(self, ip_protocol=None, from_port=None, to_port=None,
+ cidr_ip=None, src_group=None):
+ """
+ Add a new rule to this security group.
+ You need to pass in either src_group_name
+ OR ip_protocol, from_port, to_port,
+ and cidr_ip. In other words, either you are authorizing another
+ group or you are authorizing some ip-based rule.
+
+ :type ip_protocol: string
+ :param ip_protocol: Either tcp | udp | icmp
+
+ :type from_port: int
+ :param from_port: The beginning port number you are enabling
+
+ :type to_port: int
+ :param to_port: The ending port number you are enabling
+
+ :type to_port: string
+ :param to_port: The CIDR block you are providing access to.
+ See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
+
+ :type src_group: :class:`boto.ec2.securitygroup.SecurityGroup` or
+ :class:`boto.ec2.securitygroup.GroupOrCIDR`
+
+ :rtype: bool
+ :return: True if successful.
+ """
+ if src_group:
+ cidr_ip = None
+ src_group_name = src_group.name
+ src_group_owner_id = src_group.owner_id
+ else:
+ src_group_name = None
+ src_group_owner_id = None
+ status = self.connection.authorize_security_group(self.name,
+ src_group_name,
+ src_group_owner_id,
+ ip_protocol,
+ from_port,
+ to_port,
+ cidr_ip)
+ if status:
+ self.add_rule(ip_protocol, from_port, to_port, src_group_name,
+ src_group_owner_id, cidr_ip)
+ return status
+
+ def revoke(self, ip_protocol=None, from_port=None, to_port=None,
+ cidr_ip=None, src_group=None):
+ if src_group:
+ cidr_ip=None
+ src_group_name = src_group.name
+ src_group_owner_id = src_group.owner_id
+ else:
+ src_group_name = None
+ src_group_owner_id = None
+ status = self.connection.revoke_security_group(self.name,
+ src_group_name,
+ src_group_owner_id,
+ ip_protocol,
+ from_port,
+ to_port,
+ cidr_ip)
+ if status:
+ self.remove_rule(ip_protocol, from_port, to_port, src_group_name,
+ src_group_owner_id, cidr_ip)
+ return status
+
+ def copy_to_region(self, region, name=None):
+ """
+ Create a copy of this security group in another region.
+ Note that the new security group will be a separate entity
+ and will not stay in sync automatically after the copy
+ operation.
+
+ :type region: :class:`boto.ec2.regioninfo.RegionInfo`
+ :param region: The region to which this security group will be copied.
+
+ :type name: string
+ :param name: The name of the copy. If not supplied, the copy
+ will have the same name as this security group.
+
+ :rtype: :class:`boto.ec2.securitygroup.SecurityGroup`
+ :return: The new security group.
+ """
+ if region.name == self.region:
+ raise BotoClientError('Unable to copy to the same Region')
+ conn_params = self.connection.get_params()
+ rconn = region.connect(**conn_params)
+ sg = rconn.create_security_group(name or self.name, self.description)
+ source_groups = []
+ for rule in self.rules:
+ grant = rule.grants[0]
+ if grant.name:
+ if grant.name not in source_groups:
+ source_groups.append(grant.name)
+ sg.authorize(None, None, None, None, grant)
+ else:
+ sg.authorize(rule.ip_protocol, rule.from_port, rule.to_port,
+ grant.cidr_ip)
+ return sg
+
+ def instances(self):
+ instances = []
+ rs = self.connection.get_all_instances()
+ for reservation in rs:
+ uses_group = [g.id for g in reservation.groups if g.id == self.name]
+ if uses_group:
+ instances.extend(reservation.instances)
+ return instances
+
+class IPPermissions:
+
+ def __init__(self, parent=None):
+ self.parent = parent
+ self.ip_protocol = None
+ self.from_port = None
+ self.to_port = None
+ self.grants = []
+
+ def __repr__(self):
+ return 'IPPermissions:%s(%s-%s)' % (self.ip_protocol,
+ self.from_port, self.to_port)
+
+ def startElement(self, name, attrs, connection):
+ if name == 'item':
+ self.grants.append(GroupOrCIDR(self))
+ return self.grants[-1]
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'ipProtocol':
+ self.ip_protocol = value
+ elif name == 'fromPort':
+ self.from_port = value
+ elif name == 'toPort':
+ self.to_port = value
+ else:
+ setattr(self, name, value)
+
+ def add_grant(self, name=None, owner_id=None, cidr_ip=None):
+ grant = GroupOrCIDR(self)
+ grant.owner_id = owner_id
+ grant.name = name
+ grant.cidr_ip = cidr_ip
+ self.grants.append(grant)
+ return grant
+
+class GroupOrCIDR:
+
+ def __init__(self, parent=None):
+ self.owner_id = None
+ self.name = None
+ self.cidr_ip = None
+
+ def __repr__(self):
+ if self.cidr_ip:
+ return '%s' % self.cidr_ip
+ else:
+ return '%s-%s' % (self.name, self.owner_id)
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'userId':
+ self.owner_id = value
+ elif name == 'groupName':
+ self.name = value
+ if name == 'cidrIp':
+ self.cidr_ip = value
+ else:
+ setattr(self, name, value)
+
diff --git a/boto/ec2/snapshot.py b/boto/ec2/snapshot.py
new file mode 100644
index 0000000..bbe8ad4
--- /dev/null
+++ b/boto/ec2/snapshot.py
@@ -0,0 +1,140 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents an EC2 Elastic IP Snapshot
+"""
+from boto.ec2.ec2object import TaggedEC2Object
+
+class Snapshot(TaggedEC2Object):
+
+ def __init__(self, connection=None):
+ TaggedEC2Object.__init__(self, connection)
+ self.id = None
+ self.volume_id = None
+ self.status = None
+ self.progress = None
+ self.start_time = None
+ self.owner_id = None
+ self.volume_size = None
+ self.description = None
+
+ def __repr__(self):
+ return 'Snapshot:%s' % self.id
+
+ def endElement(self, name, value, connection):
+ if name == 'snapshotId':
+ self.id = value
+ elif name == 'volumeId':
+ self.volume_id = value
+ elif name == 'status':
+ self.status = value
+ elif name == 'startTime':
+ self.start_time = value
+ elif name == 'ownerId':
+ self.owner_id = value
+ elif name == 'volumeSize':
+ try:
+ self.volume_size = int(value)
+ except:
+ self.volume_size = value
+ elif name == 'description':
+ self.description = value
+ else:
+ setattr(self, name, value)
+
+ def _update(self, updated):
+ self.progress = updated.progress
+ self.status = updated.status
+
+ def update(self, validate=False):
+ """
+ Update the data associated with this snapshot by querying EC2.
+
+ :type validate: bool
+ :param validate: By default, if EC2 returns no data about the
+ snapshot the update method returns quietly. If
+ the validate param is True, however, it will
+ raise a ValueError exception if no data is
+ returned from EC2.
+ """
+ rs = self.connection.get_all_snapshots([self.id])
+ if len(rs) > 0:
+ self._update(rs[0])
+ elif validate:
+ raise ValueError('%s is not a valid Snapshot ID' % self.id)
+ return self.progress
+
+ def delete(self):
+ return self.connection.delete_snapshot(self.id)
+
+ def get_permissions(self):
+ attrs = self.connection.get_snapshot_attribute(self.id,
+ attribute='createVolumePermission')
+ return attrs.attrs
+
+ def share(self, user_ids=None, groups=None):
+ return self.connection.modify_snapshot_attribute(self.id,
+ 'createVolumePermission',
+ 'add',
+ user_ids,
+ groups)
+
+ def unshare(self, user_ids=None, groups=None):
+ return self.connection.modify_snapshot_attribute(self.id,
+ 'createVolumePermission',
+ 'remove',
+ user_ids,
+ groups)
+
+ def reset_permissions(self):
+ return self.connection.reset_snapshot_attribute(self.id, 'createVolumePermission')
+
+class SnapshotAttribute:
+
+ def __init__(self, parent=None):
+ self.snapshot_id = None
+ self.attrs = {}
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'createVolumePermission':
+ self.name = 'create_volume_permission'
+ elif name == 'group':
+ if self.attrs.has_key('groups'):
+ self.attrs['groups'].append(value)
+ else:
+ self.attrs['groups'] = [value]
+ elif name == 'userId':
+ if self.attrs.has_key('user_ids'):
+ self.attrs['user_ids'].append(value)
+ else:
+ self.attrs['user_ids'] = [value]
+ elif name == 'snapshotId':
+ self.snapshot_id = value
+ else:
+ setattr(self, name, value)
+
+
+
diff --git a/boto/ec2/spotdatafeedsubscription.py b/boto/ec2/spotdatafeedsubscription.py
new file mode 100644
index 0000000..9b820a3
--- /dev/null
+++ b/boto/ec2/spotdatafeedsubscription.py
@@ -0,0 +1,63 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents an EC2 Spot Instance Datafeed Subscription
+"""
+from boto.ec2.ec2object import EC2Object
+from boto.ec2.spotinstancerequest import SpotInstanceStateFault
+
+class SpotDatafeedSubscription(EC2Object):
+
+ def __init__(self, connection=None, owner_id=None,
+ bucket=None, prefix=None, state=None,fault=None):
+ EC2Object.__init__(self, connection)
+ self.owner_id = owner_id
+ self.bucket = bucket
+ self.prefix = prefix
+ self.state = state
+ self.fault = fault
+
+ def __repr__(self):
+ return 'SpotDatafeedSubscription:%s' % self.bucket
+
+ def startElement(self, name, attrs, connection):
+ if name == 'fault':
+ self.fault = SpotInstanceStateFault()
+ return self.fault
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'ownerId':
+ self.owner_id = value
+ elif name == 'bucket':
+ self.bucket = value
+ elif name == 'prefix':
+ self.prefix = value
+ elif name == 'state':
+ self.state = value
+ else:
+ setattr(self, name, value)
+
+ def delete(self):
+ return self.connection.delete_spot_datafeed_subscription()
+
diff --git a/boto/ec2/spotinstancerequest.py b/boto/ec2/spotinstancerequest.py
new file mode 100644
index 0000000..06acb0f
--- /dev/null
+++ b/boto/ec2/spotinstancerequest.py
@@ -0,0 +1,113 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents an EC2 Spot Instance Request
+"""
+
+from boto.ec2.ec2object import TaggedEC2Object
+from boto.ec2.launchspecification import LaunchSpecification
+
+class SpotInstanceStateFault(object):
+
+ def __init__(self, code=None, message=None):
+ self.code = code
+ self.message = message
+
+ def __repr__(self):
+ return '(%s, %s)' % (self.code, self.message)
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'code':
+ self.code = value
+ elif name == 'message':
+ self.message = value
+ setattr(self, name, value)
+
+class SpotInstanceRequest(TaggedEC2Object):
+
+ def __init__(self, connection=None):
+ TaggedEC2Object.__init__(self, connection)
+ self.id = None
+ self.price = None
+ self.type = None
+ self.state = None
+ self.fault = None
+ self.valid_from = None
+ self.valid_until = None
+ self.launch_group = None
+ self.product_description = None
+ self.availability_zone_group = None
+ self.create_time = None
+ self.launch_specification = None
+ self.instance_id = None
+
+ def __repr__(self):
+ return 'SpotInstanceRequest:%s' % self.id
+
+ def startElement(self, name, attrs, connection):
+ retval = TaggedEC2Object.startElement(self, name, attrs, connection)
+ if retval is not None:
+ return retval
+ if name == 'launchSpecification':
+ self.launch_specification = LaunchSpecification(connection)
+ return self.launch_specification
+ elif name == 'fault':
+ self.fault = SpotInstanceStateFault()
+ return self.fault
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'spotInstanceRequestId':
+ self.id = value
+ elif name == 'spotPrice':
+ self.price = float(value)
+ elif name == 'type':
+ self.type = value
+ elif name == 'state':
+ self.state = value
+ elif name == 'productDescription':
+ self.product_description = value
+ elif name == 'validFrom':
+ self.valid_from = value
+ elif name == 'validUntil':
+ self.valid_until = value
+ elif name == 'launchGroup':
+ self.launch_group = value
+ elif name == 'availabilityZoneGroup':
+ self.availability_zone_group = value
+ elif name == 'createTime':
+ self.create_time = value
+ elif name == 'instanceId':
+ self.instance_id = value
+ else:
+ setattr(self, name, value)
+
+ def cancel(self):
+ self.connection.cancel_spot_instance_requests([self.id])
+
+
+
diff --git a/boto/ec2/spotpricehistory.py b/boto/ec2/spotpricehistory.py
new file mode 100644
index 0000000..d4e1711
--- /dev/null
+++ b/boto/ec2/spotpricehistory.py
@@ -0,0 +1,52 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents an EC2 Spot Instance Request
+"""
+
+from boto.ec2.ec2object import EC2Object
+
+class SpotPriceHistory(EC2Object):
+
+ def __init__(self, connection=None):
+ EC2Object.__init__(self, connection)
+ self.price = 0.0
+ self.instance_type = None
+ self.product_description = None
+ self.timestamp = None
+
+ def __repr__(self):
+ return 'SpotPriceHistory(%s):%2f' % (self.instance_type, self.price)
+
+ def endElement(self, name, value, connection):
+ if name == 'instanceType':
+ self.instance_type = value
+ elif name == 'spotPrice':
+ self.price = float(value)
+ elif name == 'productDescription':
+ self.product_description = value
+ elif name == 'timestamp':
+ self.timestamp = value
+ else:
+ setattr(self, name, value)
+
+
diff --git a/boto/ec2/tag.py b/boto/ec2/tag.py
new file mode 100644
index 0000000..8032e6f
--- /dev/null
+++ b/boto/ec2/tag.py
@@ -0,0 +1,87 @@
+# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class TagSet(dict):
+ """
+ A TagSet is used to collect the tags associated with a particular
+ EC2 resource. Not all resources can be tagged but for those that
+ can, this dict object will be used to collect those values. See
+ :class:`boto.ec2.ec2object.TaggedEC2Object` for more details.
+ """
+
+ def __init__(self, connection=None):
+ self.connection = connection
+ self._current_key = None
+ self._current_value = None
+
+ def startElement(self, name, attrs, connection):
+ if name == 'item':
+ self._current_key = None
+ self._current_value = None
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'key':
+ self._current_key = value
+ elif name == 'value':
+ self._current_value = value
+ elif name == 'item':
+ self[self._current_key] = self._current_value
+
+
+class Tag(object):
+ """
+ A Tag is used when creating or listing all tags related to
+ an AWS account. It records not only the key and value but
+ also the ID of the resource to which the tag is attached
+ as well as the type of the resource.
+ """
+
+ def __init__(self, connection=None, res_id=None, res_type=None,
+ name=None, value=None):
+ self.connection = connection
+ self.res_id = res_id
+ self.res_type = res_type
+ self.name = name
+ self.value = value
+
+ def __repr__(self):
+ return 'Tag:%s' % self.name
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'resourceId':
+ self.res_id = value
+ elif name == 'resourceType':
+ self.res_type = value
+ elif name == 'key':
+ self.name = value
+ elif name == 'value':
+ self.value = value
+ else:
+ setattr(self, name, value)
+
+
+
+
diff --git a/boto/ec2/volume.py b/boto/ec2/volume.py
new file mode 100644
index 0000000..45345fa
--- /dev/null
+++ b/boto/ec2/volume.py
@@ -0,0 +1,227 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents an EC2 Elastic Block Storage Volume
+"""
+from boto.ec2.ec2object import TaggedEC2Object
+
+class Volume(TaggedEC2Object):
+
+ def __init__(self, connection=None):
+ TaggedEC2Object.__init__(self, connection)
+ self.id = None
+ self.create_time = None
+ self.status = None
+ self.size = None
+ self.snapshot_id = None
+ self.attach_data = None
+ self.zone = None
+
+ def __repr__(self):
+ return 'Volume:%s' % self.id
+
+ def startElement(self, name, attrs, connection):
+ retval = TaggedEC2Object.startElement(self, name, attrs, connection)
+ if retval is not None:
+ return retval
+ if name == 'attachmentSet':
+ self.attach_data = AttachmentSet()
+ return self.attach_data
+ elif name == 'tagSet':
+ self.tags = boto.resultset.ResultSet([('item', Tag)])
+ return self.tags
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'volumeId':
+ self.id = value
+ elif name == 'createTime':
+ self.create_time = value
+ elif name == 'status':
+ if value != '':
+ self.status = value
+ elif name == 'size':
+ self.size = int(value)
+ elif name == 'snapshotId':
+ self.snapshot_id = value
+ elif name == 'availabilityZone':
+ self.zone = value
+ else:
+ setattr(self, name, value)
+
+ def _update(self, updated):
+ self.__dict__.update(updated.__dict__)
+
+ def update(self, validate=False):
+ """
+ Update the data associated with this volume by querying EC2.
+
+ :type validate: bool
+ :param validate: By default, if EC2 returns no data about the
+ volume the update method returns quietly. If
+ the validate param is True, however, it will
+ raise a ValueError exception if no data is
+ returned from EC2.
+ """
+ rs = self.connection.get_all_volumes([self.id])
+ if len(rs) > 0:
+ self._update(rs[0])
+ elif validate:
+ raise ValueError('%s is not a valid Volume ID' % self.id)
+ return self.status
+
+ def delete(self):
+ """
+ Delete this EBS volume.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ return self.connection.delete_volume(self.id)
+
+ def attach(self, instance_id, device):
+ """
+ Attach this EBS volume to an EC2 instance.
+
+ :type instance_id: str
+ :param instance_id: The ID of the EC2 instance to which it will
+ be attached.
+
+ :type device: str
+ :param device: The device on the instance through which the
+ volume will be exposted (e.g. /dev/sdh)
+
+ :rtype: bool
+ :return: True if successful
+ """
+ return self.connection.attach_volume(self.id, instance_id, device)
+
+ def detach(self, force=False):
+ """
+ Detach this EBS volume from an EC2 instance.
+
+ :type force: bool
+ :param force: Forces detachment if the previous detachment attempt did
+ not occur cleanly. This option can lead to data loss or
+ a corrupted file system. Use this option only as a last
+ resort to detach a volume from a failed instance. The
+ instance will not have an opportunity to flush file system
+ caches nor file system meta data. If you use this option,
+ you must perform file system check and repair procedures.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ instance_id = None
+ if self.attach_data:
+ instance_id = self.attach_data.instance_id
+ device = None
+ if self.attach_data:
+ device = self.attach_data.device
+ return self.connection.detach_volume(self.id, instance_id, device, force)
+
+ def create_snapshot(self, description=None):
+ """
+ Create a snapshot of this EBS Volume.
+
+ :type description: str
+ :param description: A description of the snapshot. Limited to 256 characters.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ return self.connection.create_snapshot(self.id, description)
+
+ def volume_state(self):
+ """
+ Returns the state of the volume. Same value as the status attribute.
+ """
+ return self.status
+
+ def attachment_state(self):
+ """
+ Get the attachment state.
+ """
+ state = None
+ if self.attach_data:
+ state = self.attach_data.status
+ return state
+
+ def snapshots(self, owner=None, restorable_by=None):
+ """
+ Get all snapshots related to this volume. Note that this requires
+ that all available snapshots for the account be retrieved from EC2
+ first and then the list is filtered client-side to contain only
+ those for this volume.
+
+ :type owner: str
+ :param owner: If present, only the snapshots owned by the specified user
+ will be returned. Valid values are:
+ self | amazon | AWS Account ID
+
+ :type restorable_by: str
+ :param restorable_by: If present, only the snapshots that are restorable
+ by the specified account id will be returned.
+
+ :rtype: list of L{boto.ec2.snapshot.Snapshot}
+ :return: The requested Snapshot objects
+
+ """
+ rs = self.connection.get_all_snapshots(owner=owner,
+ restorable_by=restorable_by)
+ mine = []
+ for snap in rs:
+ if snap.volume_id == self.id:
+ mine.append(snap)
+ return mine
+
+class AttachmentSet(object):
+
+ def __init__(self):
+ self.id = None
+ self.instance_id = None
+ self.status = None
+ self.attach_time = None
+ self.device = None
+
+ def __repr__(self):
+ return 'AttachmentSet:%s' % self.id
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'volumeId':
+ self.id = value
+ elif name == 'instanceId':
+ self.instance_id = value
+ elif name == 'status':
+ self.status = value
+ elif name == 'attachTime':
+ self.attach_time = value
+ elif name == 'device':
+ self.device = value
+ else:
+ setattr(self, name, value)
+
diff --git a/boto/ec2/zone.py b/boto/ec2/zone.py
new file mode 100644
index 0000000..aec79b2
--- /dev/null
+++ b/boto/ec2/zone.py
@@ -0,0 +1,47 @@
+# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents an EC2 Availability Zone
+"""
+from boto.ec2.ec2object import EC2Object
+
+class Zone(EC2Object):
+
+ def __init__(self, connection=None):
+ EC2Object.__init__(self, connection)
+ self.name = None
+ self.state = None
+
+ def __repr__(self):
+ return 'Zone:%s' % self.name
+
+ def endElement(self, name, value, connection):
+ if name == 'zoneName':
+ self.name = value
+ elif name == 'zoneState':
+ self.state = value
+ else:
+ setattr(self, name, value)
+
+
+
+
diff --git a/boto/ecs/__init__.py b/boto/ecs/__init__.py
new file mode 100644
index 0000000..db86dd5
--- /dev/null
+++ b/boto/ecs/__init__.py
@@ -0,0 +1,84 @@
+# Copyright (c) 2010 Chris Moyer http://coredumped.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import boto
+from boto.connection import AWSQueryConnection, AWSAuthConnection
+import time
+import urllib
+import xml.sax
+from boto.ecs.item import ItemSet
+from boto import handler
+
+class ECSConnection(AWSQueryConnection):
+ """ECommerse Connection"""
+
+ APIVersion = '2010-11-01'
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, host='ecs.amazonaws.com',
+ debug=0, https_connection_factory=None, path='/'):
+ AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key,
+ is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
+ host, debug, https_connection_factory, path)
+
+ def _required_auth_capability(self):
+ return ['ecs']
+
+ def get_response(self, action, params, page=0, itemSet=None):
+ """
+ Utility method to handle calls to ECS and parsing of responses.
+ """
+ params['Service'] = "AWSECommerceService"
+ params['Operation'] = action
+ if page:
+ params['ItemPage'] = page
+ response = self.make_request(None, params, "/onca/xml")
+ body = response.read()
+ boto.log.debug(body)
+
+ if response.status != 200:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
+ if itemSet == None:
+ rs = ItemSet(self, action, params, page)
+ else:
+ rs = itemSet
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs
+
+ #
+ # Group methods
+ #
+
+ def item_search(self, search_index, **params):
+ """
+ Returns items that satisfy the search criteria, including one or more search
+ indices.
+
+ For a full list of search terms,
+ :see: http://docs.amazonwebservices.com/AWSECommerceService/2010-09-01/DG/index.html?ItemSearch.html
+ """
+ params['SearchIndex'] = search_index
+ return self.get_response('ItemSearch', params)
diff --git a/boto/ecs/item.py b/boto/ecs/item.py
new file mode 100644
index 0000000..29588b8
--- /dev/null
+++ b/boto/ecs/item.py
@@ -0,0 +1,153 @@
+# Copyright (c) 2010 Chris Moyer http://coredumped.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+import xml.sax
+import cgi
+from StringIO import StringIO
+
+class ResponseGroup(xml.sax.ContentHandler):
+ """A Generic "Response Group", which can
+ be anything from the entire list of Items to
+ specific response elements within an item"""
+
+ def __init__(self, connection=None, nodename=None):
+ """Initialize this Item"""
+ self._connection = connection
+ self._nodename = nodename
+ self._nodepath = []
+ self._curobj = None
+ self._xml = StringIO()
+
+ def __repr__(self):
+ return '<%s: %s>' % (self.__class__.__name__, self.__dict__)
+
+ #
+ # Attribute Functions
+ #
+ def get(self, name):
+ return self.__dict__.get(name)
+
+ def set(self, name, value):
+ self.__dict__[name] = value
+
+ def to_xml(self):
+ return "<%s>%s</%s>" % (self._nodename, self._xml.getvalue(), self._nodename)
+
+ #
+ # XML Parser functions
+ #
+ def startElement(self, name, attrs, connection):
+ self._xml.write("<%s>" % name)
+ self._nodepath.append(name)
+ if len(self._nodepath) == 1:
+ obj = ResponseGroup(self._connection)
+ self.set(name, obj)
+ self._curobj = obj
+ elif self._curobj:
+ self._curobj.startElement(name, attrs, connection)
+ return None
+
+ def endElement(self, name, value, connection):
+ self._xml.write("%s</%s>" % (cgi.escape(value).replace("&amp;", "&"), name))
+ if len(self._nodepath) == 0:
+ return
+ obj = None
+ curval = self.get(name)
+ if len(self._nodepath) == 1:
+ if value or not curval:
+ self.set(name, value)
+ if self._curobj:
+ self._curobj = None
+ #elif len(self._nodepath) == 2:
+ #self._curobj = None
+ elif self._curobj:
+ self._curobj.endElement(name, value, connection)
+ self._nodepath.pop()
+ return None
+
+
+class Item(ResponseGroup):
+ """A single Item"""
+
+ def __init__(self, connection=None):
+ """Initialize this Item"""
+ ResponseGroup.__init__(self, connection, "Item")
+
+class ItemSet(ResponseGroup):
+ """A special ResponseGroup that has built-in paging, and
+ only creates new Items on the "Item" tag"""
+
+ def __init__(self, connection, action, params, page=0):
+ ResponseGroup.__init__(self, connection, "Items")
+ self.objs = []
+ self.iter = None
+ self.page = page
+ self.action = action
+ self.params = params
+ self.curItem = None
+ self.total_results = 0
+ self.total_pages = 0
+
+ def startElement(self, name, attrs, connection):
+ if name == "Item":
+ self.curItem = Item(self._connection)
+ elif self.curItem != None:
+ self.curItem.startElement(name, attrs, connection)
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'TotalResults':
+ self.total_results = value
+ elif name == 'TotalPages':
+ self.total_pages = value
+ elif name == "Item":
+ self.objs.append(self.curItem)
+ self._xml.write(self.curItem.to_xml())
+ self.curItem = None
+ elif self.curItem != None:
+ self.curItem.endElement(name, value, connection)
+ return None
+
+ def next(self):
+ """Special paging functionality"""
+ if self.iter == None:
+ self.iter = iter(self.objs)
+ try:
+ return self.iter.next()
+ except StopIteration:
+ self.iter = None
+ self.objs = []
+ if int(self.page) < int(self.total_pages):
+ self.page += 1
+ self._connection.get_response(self.action, self.params, self.page, self)
+ return self.next()
+ else:
+ raise
+
+ def __iter__(self):
+ return self
+
+ def to_xml(self):
+ """Override to first fetch everything"""
+ for item in self:
+ pass
+ return ResponseGroup.to_xml(self)
diff --git a/boto/emr/__init__.py b/boto/emr/__init__.py
new file mode 100644
index 0000000..3c33f9a
--- /dev/null
+++ b/boto/emr/__init__.py
@@ -0,0 +1,30 @@
+# Copyright (c) 2010 Spotify AB
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+This module provies an interface to the Elastic MapReduce (EMR)
+service from AWS.
+"""
+from connection import EmrConnection
+from step import Step, StreamingStep, JarStep
+from bootstrap_action import BootstrapAction
+
+
diff --git a/boto/emr/bootstrap_action.py b/boto/emr/bootstrap_action.py
new file mode 100644
index 0000000..c1c9038
--- /dev/null
+++ b/boto/emr/bootstrap_action.py
@@ -0,0 +1,43 @@
+# Copyright (c) 2010 Spotify AB
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class BootstrapAction(object):
+ def __init__(self, name, path, bootstrap_action_args):
+ self.name = name
+ self.path = path
+
+ if isinstance(bootstrap_action_args, basestring):
+ bootstrap_action_args = [bootstrap_action_args]
+
+ self.bootstrap_action_args = bootstrap_action_args
+
+ def args(self):
+ args = []
+
+ if self.bootstrap_action_args:
+ args.extend(self.bootstrap_action_args)
+
+ return args
+
+ def __repr__(self):
+ return '%s.%s(name=%r, path=%r, bootstrap_action_args=%r)' % (
+ self.__class__.__module__, self.__class__.__name__,
+ self.name, self.path, self.bootstrap_action_args)
diff --git a/boto/emr/connection.py b/boto/emr/connection.py
new file mode 100644
index 0000000..2bfd368
--- /dev/null
+++ b/boto/emr/connection.py
@@ -0,0 +1,280 @@
+# Copyright (c) 2010 Spotify AB
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents a connection to the EMR service
+"""
+import types
+
+import boto
+from boto.ec2.regioninfo import RegionInfo
+from boto.emr.emrobject import JobFlow, RunJobFlowResponse
+from boto.emr.step import JarStep
+from boto.connection import AWSQueryConnection
+from boto.exception import EmrResponseError
+
+class EmrConnection(AWSQueryConnection):
+
+ APIVersion = boto.config.get('Boto', 'emr_version', '2009-03-31')
+ DefaultRegionName = boto.config.get('Boto', 'emr_region_name', 'us-east-1')
+ DefaultRegionEndpoint = boto.config.get('Boto', 'emr_region_endpoint',
+ 'elasticmapreduce.amazonaws.com')
+ ResponseError = EmrResponseError
+
+ # Constants for AWS Console debugging
+ DebuggingJar = 's3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar'
+ DebuggingArgs = 's3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch'
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, debug=0,
+ https_connection_factory=None, region=None, path='/'):
+ if not region:
+ region = RegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint)
+ self.region = region
+ AWSQueryConnection.__init__(self, aws_access_key_id,
+ aws_secret_access_key,
+ is_secure, port, proxy, proxy_port,
+ proxy_user, proxy_pass,
+ self.region.endpoint, debug,
+ https_connection_factory, path)
+
+ def _required_auth_capability(self):
+ return ['emr']
+
+ def describe_jobflow(self, jobflow_id):
+ """
+ Describes a single Elastic MapReduce job flow
+
+ :type jobflow_id: str
+ :param jobflow_id: The job flow id of interest
+ """
+ jobflows = self.describe_jobflows(jobflow_ids=[jobflow_id])
+ if jobflows:
+ return jobflows[0]
+
+ def describe_jobflows(self, states=None, jobflow_ids=None,
+ created_after=None, created_before=None):
+ """
+ Retrieve all the Elastic MapReduce job flows on your account
+
+ :type states: list
+ :param states: A list of strings with job flow states wanted
+
+ :type jobflow_ids: list
+ :param jobflow_ids: A list of job flow IDs
+ :type created_after: datetime
+ :param created_after: Bound on job flow creation time
+
+ :type created_before: datetime
+ :param created_before: Bound on job flow creation time
+ """
+ params = {}
+
+ if states:
+ self.build_list_params(params, states, 'JobFlowStates.member')
+ if jobflow_ids:
+ self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
+ if created_after:
+ params['CreatedAfter'] = created_after.strftime('%Y-%m-%dT%H:%M:%S')
+ if created_before:
+ params['CreatedBefore'] = created_before.strftime('%Y-%m-%dT%H:%M:%S')
+
+ return self.get_list('DescribeJobFlows', params, [('member', JobFlow)])
+
+ def terminate_jobflow(self, jobflow_id):
+ """
+ Terminate an Elastic MapReduce job flow
+
+ :type jobflow_id: str
+ :param jobflow_id: A jobflow id
+ """
+ self.terminate_jobflows([jobflow_id])
+
+ def terminate_jobflows(self, jobflow_ids):
+ """
+ Terminate an Elastic MapReduce job flow
+
+ :type jobflow_ids: list
+ :param jobflow_ids: A list of job flow IDs
+ """
+ params = {}
+ self.build_list_params(params, jobflow_ids, 'JobFlowIds.member')
+ return self.get_status('TerminateJobFlows', params)
+
+ def add_jobflow_steps(self, jobflow_id, steps):
+ """
+ Adds steps to a jobflow
+
+ :type jobflow_id: str
+ :param jobflow_id: The job flow id
+ :type steps: list(boto.emr.Step)
+ :param steps: A list of steps to add to the job
+ """
+ if type(steps) != types.ListType:
+ steps = [steps]
+ params = {}
+ params['JobFlowId'] = jobflow_id
+
+ # Step args
+ step_args = [self._build_step_args(step) for step in steps]
+ params.update(self._build_step_list(step_args))
+
+ return self.get_object('AddJobFlowSteps', params, RunJobFlowResponse)
+
+ def run_jobflow(self, name, log_uri, ec2_keyname=None, availability_zone=None,
+ master_instance_type='m1.small',
+ slave_instance_type='m1.small', num_instances=1,
+ action_on_failure='TERMINATE_JOB_FLOW', keep_alive=False,
+ enable_debugging=False,
+ hadoop_version='0.18',
+ steps=[],
+ bootstrap_actions=[]):
+ """
+ Runs a job flow
+
+ :type name: str
+ :param name: Name of the job flow
+ :type log_uri: str
+ :param log_uri: URI of the S3 bucket to place logs
+ :type ec2_keyname: str
+ :param ec2_keyname: EC2 key used for the instances
+ :type availability_zone: str
+ :param availability_zone: EC2 availability zone of the cluster
+ :type master_instance_type: str
+ :param master_instance_type: EC2 instance type of the master
+ :type slave_instance_type: str
+ :param slave_instance_type: EC2 instance type of the slave nodes
+ :type num_instances: int
+ :param num_instances: Number of instances in the Hadoop cluster
+ :type action_on_failure: str
+ :param action_on_failure: Action to take if a step terminates
+ :type keep_alive: bool
+ :param keep_alive: Denotes whether the cluster should stay alive upon completion
+ :type enable_debugging: bool
+ :param enable_debugging: Denotes whether AWS console debugging should be enabled.
+ :type steps: list(boto.emr.Step)
+ :param steps: List of steps to add with the job
+
+ :rtype: str
+ :return: The jobflow id
+ """
+ params = {}
+ if action_on_failure:
+ params['ActionOnFailure'] = action_on_failure
+ params['Name'] = name
+ params['LogUri'] = log_uri
+
+ # Instance args
+ instance_params = self._build_instance_args(ec2_keyname, availability_zone,
+ master_instance_type, slave_instance_type,
+ num_instances, keep_alive, hadoop_version)
+ params.update(instance_params)
+
+ # Debugging step from EMR API docs
+ if enable_debugging:
+ debugging_step = JarStep(name='Setup Hadoop Debugging',
+ action_on_failure='TERMINATE_JOB_FLOW',
+ main_class=None,
+ jar=self.DebuggingJar,
+ step_args=self.DebuggingArgs)
+ steps.insert(0, debugging_step)
+
+ # Step args
+ if steps:
+ step_args = [self._build_step_args(step) for step in steps]
+ params.update(self._build_step_list(step_args))
+
+ if bootstrap_actions:
+ bootstrap_action_args = [self._build_bootstrap_action_args(bootstrap_action) for bootstrap_action in bootstrap_actions]
+ params.update(self._build_bootstrap_action_list(bootstrap_action_args))
+
+ response = self.get_object('RunJobFlow', params, RunJobFlowResponse)
+ return response.jobflowid
+
+ def _build_bootstrap_action_args(self, bootstrap_action):
+ bootstrap_action_params = {}
+ bootstrap_action_params['ScriptBootstrapAction.Path'] = bootstrap_action.path
+
+ try:
+ bootstrap_action_params['Name'] = bootstrap_action.name
+ except AttributeError:
+ pass
+
+ args = bootstrap_action.args()
+ if args:
+ self.build_list_params(bootstrap_action_params, args, 'ScriptBootstrapAction.Args.member')
+
+ return bootstrap_action_params
+
+ def _build_step_args(self, step):
+ step_params = {}
+ step_params['ActionOnFailure'] = step.action_on_failure
+ step_params['HadoopJarStep.Jar'] = step.jar()
+
+ main_class = step.main_class()
+ if main_class:
+ step_params['HadoopJarStep.MainClass'] = main_class
+
+ args = step.args()
+ if args:
+ self.build_list_params(step_params, args, 'HadoopJarStep.Args.member')
+
+ step_params['Name'] = step.name
+ return step_params
+
+ def _build_bootstrap_action_list(self, bootstrap_actions):
+ if type(bootstrap_actions) != types.ListType:
+ bootstrap_actions = [bootstrap_actions]
+
+ params = {}
+ for i, bootstrap_action in enumerate(bootstrap_actions):
+ for key, value in bootstrap_action.iteritems():
+ params['BootstrapActions.memeber.%s.%s' % (i + 1, key)] = value
+ return params
+
+ def _build_step_list(self, steps):
+ if type(steps) != types.ListType:
+ steps = [steps]
+
+ params = {}
+ for i, step in enumerate(steps):
+ for key, value in step.iteritems():
+ params['Steps.memeber.%s.%s' % (i+1, key)] = value
+ return params
+
+ def _build_instance_args(self, ec2_keyname, availability_zone, master_instance_type,
+ slave_instance_type, num_instances, keep_alive, hadoop_version):
+ params = {
+ 'Instances.MasterInstanceType' : master_instance_type,
+ 'Instances.SlaveInstanceType' : slave_instance_type,
+ 'Instances.InstanceCount' : num_instances,
+ 'Instances.KeepJobFlowAliveWhenNoSteps' : str(keep_alive).lower(),
+ 'Instances.HadoopVersion' : hadoop_version
+ }
+
+ if ec2_keyname:
+ params['Instances.Ec2KeyName'] = ec2_keyname
+ if availability_zone:
+ params['Placement'] = availability_zone
+
+ return params
+
diff --git a/boto/emr/emrobject.py b/boto/emr/emrobject.py
new file mode 100644
index 0000000..0ffe292
--- /dev/null
+++ b/boto/emr/emrobject.py
@@ -0,0 +1,141 @@
+# Copyright (c) 2010 Spotify AB
+# Copyright (c) 2010 Jeremy Thurgood <firxen+boto@gmail.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+This module contains EMR response objects
+"""
+
+from boto.resultset import ResultSet
+
+
+class EmrObject(object):
+ Fields = set()
+
+ def __init__(self, connection=None):
+ self.connection = connection
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name in self.Fields:
+ setattr(self, name.lower(), value)
+
+
+class RunJobFlowResponse(EmrObject):
+ Fields = set(['JobFlowId'])
+
+
+class Arg(EmrObject):
+ def __init__(self, connection=None):
+ self.value = None
+
+ def endElement(self, name, value, connection):
+ self.value = value
+
+
+class BootstrapAction(EmrObject):
+ Fields = set(['Name',
+ 'Args',
+ 'Path'])
+
+
+class Step(EmrObject):
+ Fields = set(['Name',
+ 'ActionOnFailure',
+ 'CreationDateTime',
+ 'StartDateTime',
+ 'EndDateTime',
+ 'LastStateChangeReason',
+ 'State'])
+
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.args = None
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Args':
+ self.args = ResultSet([('member', Arg)])
+ return self.args
+
+
+class InstanceGroup(EmrObject):
+ Fields = set(['Name',
+ 'CreationDateTime',
+ 'InstanceRunningCount',
+ 'StartDateTime',
+ 'ReadyDateTime',
+ 'State',
+ 'EndDateTime',
+ 'InstanceRequestCount',
+ 'InstanceType',
+ 'Market',
+ 'LastStateChangeReason',
+ 'InstanceRole',
+ 'InstanceGroupId',
+ 'LaunchGroup',
+ 'SpotPrice'])
+
+
+class JobFlow(EmrObject):
+ Fields = set(['CreationDateTime',
+ 'StartDateTime',
+ 'State',
+ 'EndDateTime',
+ 'Id',
+ 'InstanceCount',
+ 'JobFlowId',
+ 'LogUri',
+ 'MasterPublicDnsName',
+ 'MasterInstanceId',
+ 'Name',
+ 'Placement',
+ 'RequestId',
+ 'Type',
+ 'Value',
+ 'AvailabilityZone',
+ 'SlaveInstanceType',
+ 'MasterInstanceType',
+ 'Ec2KeyName',
+ 'InstanceCount',
+ 'KeepJobFlowAliveWhenNoSteps',
+ 'LastStateChangeReason'])
+
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.steps = None
+ self.instancegroups = None
+ self.bootstrapactions = None
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Steps':
+ self.steps = ResultSet([('member', Step)])
+ return self.steps
+ elif name == 'InstanceGroups':
+ self.instancegroups = ResultSet([('member', InstanceGroup)])
+ return self.instancegroups
+ elif name == 'BootstrapActions':
+ self.bootstrapactions = ResultSet([('member', BootstrapAction)])
+ return self.bootstrapactions
+ else:
+ return None
+
diff --git a/boto/emr/step.py b/boto/emr/step.py
new file mode 100644
index 0000000..a444261
--- /dev/null
+++ b/boto/emr/step.py
@@ -0,0 +1,179 @@
+# Copyright (c) 2010 Spotify AB
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class Step(object):
+ """
+ Jobflow Step base class
+ """
+ def jar(self):
+ """
+ :rtype: str
+ :return: URI to the jar
+ """
+ raise NotImplemented()
+
+ def args(self):
+ """
+ :rtype: list(str)
+ :return: List of arguments for the step
+ """
+ raise NotImplemented()
+
+ def main_class(self):
+ """
+ :rtype: str
+ :return: The main class name
+ """
+ raise NotImplemented()
+
+
+class JarStep(Step):
+ """
+ Custom jar step
+ """
+ def __init__(self, name, jar, main_class=None,
+ action_on_failure='TERMINATE_JOB_FLOW', step_args=None):
+ """
+ A elastic mapreduce step that executes a jar
+
+ :type name: str
+ :param name: The name of the step
+ :type jar: str
+ :param jar: S3 URI to the Jar file
+ :type main_class: str
+ :param main_class: The class to execute in the jar
+ :type action_on_failure: str
+ :param action_on_failure: An action, defined in the EMR docs to take on failure.
+ :type step_args: list(str)
+ :param step_args: A list of arguments to pass to the step
+ """
+ self.name = name
+ self._jar = jar
+ self._main_class = main_class
+ self.action_on_failure = action_on_failure
+
+ if isinstance(step_args, basestring):
+ step_args = [step_args]
+
+ self.step_args = step_args
+
+ def jar(self):
+ return self._jar
+
+ def args(self):
+ args = []
+
+ if self.step_args:
+ args.extend(self.step_args)
+
+ return args
+
+ def main_class(self):
+ return self._main_class
+
+
+class StreamingStep(Step):
+ """
+ Hadoop streaming step
+ """
+ def __init__(self, name, mapper, reducer=None,
+ action_on_failure='TERMINATE_JOB_FLOW',
+ cache_files=None, cache_archives=None,
+ step_args=None, input=None, output=None):
+ """
+ A hadoop streaming elastic mapreduce step
+
+ :type name: str
+ :param name: The name of the step
+ :type mapper: str
+ :param mapper: The mapper URI
+ :type reducer: str
+ :param reducer: The reducer URI
+ :type action_on_failure: str
+ :param action_on_failure: An action, defined in the EMR docs to take on failure.
+ :type cache_files: list(str)
+ :param cache_files: A list of cache files to be bundled with the job
+ :type cache_archives: list(str)
+ :param cache_archives: A list of jar archives to be bundled with the job
+ :type step_args: list(str)
+ :param step_args: A list of arguments to pass to the step
+ :type input: str or a list of str
+ :param input: The input uri
+ :type output: str
+ :param output: The output uri
+ """
+ self.name = name
+ self.mapper = mapper
+ self.reducer = reducer
+ self.action_on_failure = action_on_failure
+ self.cache_files = cache_files
+ self.cache_archives = cache_archives
+ self.input = input
+ self.output = output
+
+ if isinstance(step_args, basestring):
+ step_args = [step_args]
+
+ self.step_args = step_args
+
+ def jar(self):
+ return '/home/hadoop/contrib/streaming/hadoop-0.18-streaming.jar'
+
+ def main_class(self):
+ return None
+
+ def args(self):
+ args = ['-mapper', self.mapper]
+
+ if self.reducer:
+ args.extend(['-reducer', self.reducer])
+
+ if self.input:
+ if isinstance(self.input, list):
+ for input in self.input:
+ args.extend(('-input', input))
+ else:
+ args.extend(('-input', self.input))
+ if self.output:
+ args.extend(('-output', self.output))
+
+ if self.cache_files:
+ for cache_file in self.cache_files:
+ args.extend(('-cacheFile', cache_file))
+
+ if self.cache_archives:
+ for cache_archive in self.cache_archives:
+ args.extend(('-cacheArchive', cache_archive))
+
+ if self.step_args:
+ args.extend(self.step_args)
+
+ if not self.reducer:
+ args.extend(['-jobconf', 'mapred.reduce.tasks=0'])
+
+ return args
+
+ def __repr__(self):
+ return '%s.%s(name=%r, mapper=%r, reducer=%r, action_on_failure=%r, cache_files=%r, cache_archives=%r, step_args=%r, input=%r, output=%r)' % (
+ self.__class__.__module__, self.__class__.__name__,
+ self.name, self.mapper, self.reducer, self.action_on_failure,
+ self.cache_files, self.cache_archives, self.step_args,
+ self.input, self.output)
diff --git a/boto/emr/tests/test_emr_responses.py b/boto/emr/tests/test_emr_responses.py
new file mode 100644
index 0000000..77ec494
--- /dev/null
+++ b/boto/emr/tests/test_emr_responses.py
@@ -0,0 +1,373 @@
+# Copyright (c) 2010 Jeremy Thurgood <firxen+boto@gmail.com>
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+# NOTE: These tests only cover the very simple cases I needed to test
+# for the InstanceGroup fix.
+
+import xml.sax
+import unittest
+
+from boto import handler
+from boto.emr import emrobject
+from boto.resultset import ResultSet
+
+
+JOB_FLOW_EXAMPLE = """
+<DescribeJobFlowsResponse
+ xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-01-15">
+ <DescribeJobFlowsResult>
+ <JobFlows>
+ <member>
+ <ExecutionStatusDetail>
+ <CreationDateTime>2009-01-28T21:49:16Z</CreationDateTime>
+ <StartDateTime>2009-01-28T21:49:16Z</StartDateTime>
+ <State>STARTING</State>
+ </ExecutionStatusDetail>
+ <Name>MyJobFlowName</Name>
+ <LogUri>mybucket/subdir/</LogUri>
+ <Steps>
+ <member>
+ <ExecutionStatusDetail>
+ <CreationDateTime>2009-01-28T21:49:16Z</CreationDateTime>
+ <State>PENDING</State>
+ </ExecutionStatusDetail>
+ <StepConfig>
+ <HadoopJarStep>
+ <Jar>MyJarFile</Jar>
+ <MainClass>MyMailClass</MainClass>
+ <Args>
+ <member>arg1</member>
+ <member>arg2</member>
+ </Args>
+ <Properties/>
+ </HadoopJarStep>
+ <Name>MyStepName</Name>
+ <ActionOnFailure>CONTINUE</ActionOnFailure>
+ </StepConfig>
+ </member>
+ </Steps>
+ <JobFlowId>j-3UN6WX5RRO2AG</JobFlowId>
+ <Instances>
+ <Placement>
+ <AvailabilityZone>us-east-1a</AvailabilityZone>
+ </Placement>
+ <SlaveInstanceType>m1.small</SlaveInstanceType>
+ <MasterInstanceType>m1.small</MasterInstanceType>
+ <Ec2KeyName>myec2keyname</Ec2KeyName>
+ <InstanceCount>4</InstanceCount>
+ <KeepJobFlowAliveWhenNoSteps>true</KeepJobFlowAliveWhenNoSteps>
+ </Instances>
+ </member>
+ </JobFlows>
+ </DescribeJobFlowsResult>
+ <ResponseMetadata>
+ <RequestId>9cea3229-ed85-11dd-9877-6fad448a8419</RequestId>
+ </ResponseMetadata>
+</DescribeJobFlowsResponse>
+"""
+
+JOB_FLOW_COMPLETED = """
+<DescribeJobFlowsResponse xmlns="http://elasticmapreduce.amazonaws.com/doc/2009-03-31">
+ <DescribeJobFlowsResult>
+ <JobFlows>
+ <member>
+ <ExecutionStatusDetail>
+ <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
+ <LastStateChangeReason>Steps completed</LastStateChangeReason>
+ <StartDateTime>2010-10-21T01:03:59Z</StartDateTime>
+ <ReadyDateTime>2010-10-21T01:03:59Z</ReadyDateTime>
+ <State>COMPLETED</State>
+ <EndDateTime>2010-10-21T01:44:18Z</EndDateTime>
+ </ExecutionStatusDetail>
+ <BootstrapActions/>
+ <Name>RealJobFlowName</Name>
+ <LogUri>s3n://example.emrtest.scripts/jobflow_logs/</LogUri>
+ <Steps>
+ <member>
+ <StepConfig>
+ <HadoopJarStep>
+ <Jar>s3n://us-east-1.elasticmapreduce/libs/script-runner/script-runner.jar</Jar>
+ <Args>
+ <member>s3n://us-east-1.elasticmapreduce/libs/state-pusher/0.1/fetch</member>
+ </Args>
+ <Properties/>
+ </HadoopJarStep>
+ <Name>Setup Hadoop Debugging</Name>
+ <ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
+ </StepConfig>
+ <ExecutionStatusDetail>
+ <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
+ <StartDateTime>2010-10-21T01:03:59Z</StartDateTime>
+ <State>COMPLETED</State>
+ <EndDateTime>2010-10-21T01:04:22Z</EndDateTime>
+ </ExecutionStatusDetail>
+ </member>
+ <member>
+ <StepConfig>
+ <HadoopJarStep>
+ <Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
+ <Args>
+ <member>-mapper</member>
+ <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-InitialMapper.py</member>
+ <member>-reducer</member>
+ <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-InitialReducer.py</member>
+ <member>-input</member>
+ <member>s3://example.emrtest.data/raw/2010/10/20/*</member>
+ <member>-input</member>
+ <member>s3://example.emrtest.data/raw/2010/10/19/*</member>
+ <member>-input</member>
+ <member>s3://example.emrtest.data/raw/2010/10/18/*</member>
+ <member>-input</member>
+ <member>s3://example.emrtest.data/raw/2010/10/17/*</member>
+ <member>-input</member>
+ <member>s3://example.emrtest.data/raw/2010/10/16/*</member>
+ <member>-input</member>
+ <member>s3://example.emrtest.data/raw/2010/10/15/*</member>
+ <member>-input</member>
+ <member>s3://example.emrtest.data/raw/2010/10/14/*</member>
+ <member>-output</member>
+ <member>s3://example.emrtest.crunched/</member>
+ </Args>
+ <Properties/>
+ </HadoopJarStep>
+ <Name>testjob_Initial</Name>
+ <ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
+ </StepConfig>
+ <ExecutionStatusDetail>
+ <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
+ <StartDateTime>2010-10-21T01:04:22Z</StartDateTime>
+ <State>COMPLETED</State>
+ <EndDateTime>2010-10-21T01:36:18Z</EndDateTime>
+ </ExecutionStatusDetail>
+ </member>
+ <member>
+ <StepConfig>
+ <HadoopJarStep>
+ <Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
+ <Args>
+ <member>-mapper</member>
+ <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step1Mapper.py</member>
+ <member>-reducer</member>
+ <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step1Reducer.py</member>
+ <member>-input</member>
+ <member>s3://example.emrtest.crunched/*</member>
+ <member>-output</member>
+ <member>s3://example.emrtest.step1/</member>
+ </Args>
+ <Properties/>
+ </HadoopJarStep>
+ <Name>testjob_step1</Name>
+ <ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
+ </StepConfig>
+ <ExecutionStatusDetail>
+ <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
+ <StartDateTime>2010-10-21T01:36:18Z</StartDateTime>
+ <State>COMPLETED</State>
+ <EndDateTime>2010-10-21T01:37:51Z</EndDateTime>
+ </ExecutionStatusDetail>
+ </member>
+ <member>
+ <StepConfig>
+ <HadoopJarStep>
+ <Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
+ <Args>
+ <member>-mapper</member>
+ <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step2Mapper.py</member>
+ <member>-reducer</member>
+ <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step2Reducer.py</member>
+ <member>-input</member>
+ <member>s3://example.emrtest.crunched/*</member>
+ <member>-output</member>
+ <member>s3://example.emrtest.step2/</member>
+ </Args>
+ <Properties/>
+ </HadoopJarStep>
+ <Name>testjob_step2</Name>
+ <ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
+ </StepConfig>
+ <ExecutionStatusDetail>
+ <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
+ <StartDateTime>2010-10-21T01:37:51Z</StartDateTime>
+ <State>COMPLETED</State>
+ <EndDateTime>2010-10-21T01:39:32Z</EndDateTime>
+ </ExecutionStatusDetail>
+ </member>
+ <member>
+ <StepConfig>
+ <HadoopJarStep>
+ <Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
+ <Args>
+ <member>-mapper</member>
+ <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step3Mapper.py</member>
+ <member>-reducer</member>
+ <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step3Reducer.py</member>
+ <member>-input</member>
+ <member>s3://example.emrtest.step1/*</member>
+ <member>-output</member>
+ <member>s3://example.emrtest.step3/</member>
+ </Args>
+ <Properties/>
+ </HadoopJarStep>
+ <Name>testjob_step3</Name>
+ <ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
+ </StepConfig>
+ <ExecutionStatusDetail>
+ <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
+ <StartDateTime>2010-10-21T01:39:32Z</StartDateTime>
+ <State>COMPLETED</State>
+ <EndDateTime>2010-10-21T01:41:22Z</EndDateTime>
+ </ExecutionStatusDetail>
+ </member>
+ <member>
+ <StepConfig>
+ <HadoopJarStep>
+ <Jar>/home/hadoop/contrib/streaming/hadoop-0.20-streaming.jar</Jar>
+ <Args>
+ <member>-mapper</member>
+ <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step4Mapper.py</member>
+ <member>-reducer</member>
+ <member>s3://example.emrtest.scripts/81d8-5a9d3df4a86c-step4Reducer.py</member>
+ <member>-input</member>
+ <member>s3://example.emrtest.step1/*</member>
+ <member>-output</member>
+ <member>s3://example.emrtest.step4/</member>
+ </Args>
+ <Properties/>
+ </HadoopJarStep>
+ <Name>testjob_step4</Name>
+ <ActionOnFailure>TERMINATE_JOB_FLOW</ActionOnFailure>
+ </StepConfig>
+ <ExecutionStatusDetail>
+ <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
+ <StartDateTime>2010-10-21T01:41:22Z</StartDateTime>
+ <State>COMPLETED</State>
+ <EndDateTime>2010-10-21T01:43:03Z</EndDateTime>
+ </ExecutionStatusDetail>
+ </member>
+ </Steps>
+ <JobFlowId>j-3H3Q13JPFLU22</JobFlowId>
+ <Instances>
+ <SlaveInstanceType>m1.large</SlaveInstanceType>
+ <MasterInstanceId>i-64c21609</MasterInstanceId>
+ <Placement>
+ <AvailabilityZone>us-east-1b</AvailabilityZone>
+ </Placement>
+ <InstanceGroups>
+ <member>
+ <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
+ <InstanceRunningCount>0</InstanceRunningCount>
+ <StartDateTime>2010-10-21T01:02:09Z</StartDateTime>
+ <ReadyDateTime>2010-10-21T01:03:03Z</ReadyDateTime>
+ <State>ENDED</State>
+ <EndDateTime>2010-10-21T01:44:18Z</EndDateTime>
+ <InstanceRequestCount>1</InstanceRequestCount>
+ <InstanceType>m1.large</InstanceType>
+ <Market>ON_DEMAND</Market>
+ <LastStateChangeReason>Job flow terminated</LastStateChangeReason>
+ <InstanceRole>MASTER</InstanceRole>
+ <InstanceGroupId>ig-EVMHOZJ2SCO8</InstanceGroupId>
+ <Name>master</Name>
+ </member>
+ <member>
+ <CreationDateTime>2010-10-21T01:00:25Z</CreationDateTime>
+ <InstanceRunningCount>0</InstanceRunningCount>
+ <StartDateTime>2010-10-21T01:03:59Z</StartDateTime>
+ <ReadyDateTime>2010-10-21T01:03:59Z</ReadyDateTime>
+ <State>ENDED</State>
+ <EndDateTime>2010-10-21T01:44:18Z</EndDateTime>
+ <InstanceRequestCount>9</InstanceRequestCount>
+ <InstanceType>m1.large</InstanceType>
+ <Market>ON_DEMAND</Market>
+ <LastStateChangeReason>Job flow terminated</LastStateChangeReason>
+ <InstanceRole>CORE</InstanceRole>
+ <InstanceGroupId>ig-YZHDYVITVHKB</InstanceGroupId>
+ <Name>slave</Name>
+ </member>
+ </InstanceGroups>
+ <NormalizedInstanceHours>40</NormalizedInstanceHours>
+ <HadoopVersion>0.20</HadoopVersion>
+ <MasterInstanceType>m1.large</MasterInstanceType>
+ <MasterPublicDnsName>ec2-184-72-153-139.compute-1.amazonaws.com</MasterPublicDnsName>
+ <Ec2KeyName>myubersecurekey</Ec2KeyName>
+ <InstanceCount>10</InstanceCount>
+ <KeepJobFlowAliveWhenNoSteps>false</KeepJobFlowAliveWhenNoSteps>
+ </Instances>
+ </member>
+ </JobFlows>
+ </DescribeJobFlowsResult>
+ <ResponseMetadata>
+ <RequestId>c31e701d-dcb4-11df-b5d9-337fc7fe4773</RequestId>
+ </ResponseMetadata>
+</DescribeJobFlowsResponse>
+"""
+
+
+class TestEMRResponses(unittest.TestCase):
+ def _parse_xml(self, body, markers):
+ rs = ResultSet(markers)
+ h = handler.XmlHandler(rs, None)
+ xml.sax.parseString(body, h)
+ return rs
+
+ def _assert_fields(self, response, **fields):
+ for field, expected in fields.items():
+ actual = getattr(response, field)
+ self.assertEquals(expected, actual,
+ "Field %s: %r != %r" % (field, expected, actual))
+
+ def test_JobFlows_example(self):
+ [jobflow] = self._parse_xml(JOB_FLOW_EXAMPLE,
+ [('member', emrobject.JobFlow)])
+ self._assert_fields(jobflow,
+ creationdatetime='2009-01-28T21:49:16Z',
+ startdatetime='2009-01-28T21:49:16Z',
+ state='STARTING',
+ instancecount='4',
+ jobflowid='j-3UN6WX5RRO2AG',
+ loguri='mybucket/subdir/',
+ name='MyJobFlowName',
+ availabilityzone='us-east-1a',
+ slaveinstancetype='m1.small',
+ masterinstancetype='m1.small',
+ ec2keyname='myec2keyname',
+ keepjobflowalivewhennosteps='true')
+
+ def test_JobFlows_completed(self):
+ [jobflow] = self._parse_xml(JOB_FLOW_COMPLETED,
+ [('member', emrobject.JobFlow)])
+ self._assert_fields(jobflow,
+ creationdatetime='2010-10-21T01:00:25Z',
+ startdatetime='2010-10-21T01:03:59Z',
+ enddatetime='2010-10-21T01:44:18Z',
+ state='COMPLETED',
+ instancecount='10',
+ jobflowid='j-3H3Q13JPFLU22',
+ loguri='s3n://example.emrtest.scripts/jobflow_logs/',
+ name='RealJobFlowName',
+ availabilityzone='us-east-1b',
+ slaveinstancetype='m1.large',
+ masterinstancetype='m1.large',
+ ec2keyname='myubersecurekey',
+ keepjobflowalivewhennosteps='false')
+ self.assertEquals(6, len(jobflow.steps))
+ self.assertEquals(2, len(jobflow.instancegroups))
+
diff --git a/boto/exception.py b/boto/exception.py
new file mode 100644
index 0000000..718be46
--- /dev/null
+++ b/boto/exception.py
@@ -0,0 +1,430 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Exception classes - Subclassing allows you to check for specific errors
+"""
+import base64
+import xml.sax
+from boto import handler
+from boto.resultset import ResultSet
+
+
+class BotoClientError(StandardError):
+ """
+ General Boto Client error (error accessing AWS)
+ """
+
+ def __init__(self, reason):
+ StandardError.__init__(self)
+ self.reason = reason
+
+ def __repr__(self):
+ return 'BotoClientError: %s' % self.reason
+
+ def __str__(self):
+ return 'BotoClientError: %s' % self.reason
+
+class SDBPersistenceError(StandardError):
+
+ pass
+
+class StoragePermissionsError(BotoClientError):
+ """
+ Permissions error when accessing a bucket or key on a storage service.
+ """
+ pass
+
+class S3PermissionsError(StoragePermissionsError):
+ """
+ Permissions error when accessing a bucket or key on S3.
+ """
+ pass
+
+class GSPermissionsError(StoragePermissionsError):
+ """
+ Permissions error when accessing a bucket or key on GS.
+ """
+ pass
+
+class BotoServerError(StandardError):
+
+ def __init__(self, status, reason, body=None):
+ StandardError.__init__(self)
+ self.status = status
+ self.reason = reason
+ self.body = body or ''
+ self.request_id = None
+ self.error_code = None
+ self.error_message = None
+ self.box_usage = None
+
+ # Attempt to parse the error response. If body isn't present,
+ # then just ignore the error response.
+ if self.body:
+ try:
+ h = handler.XmlHandler(self, self)
+ xml.sax.parseString(self.body, h)
+ except xml.sax.SAXParseException, pe:
+ # Go ahead and clean up anything that may have
+ # managed to get into the error data so we
+ # don't get partial garbage.
+ print "Warning: failed to parse error message from AWS: %s" % pe
+ self._cleanupParsedProperties()
+
+ def __getattr__(self, name):
+ if name == 'message':
+ return self.error_message
+ if name == 'code':
+ return self.error_code
+ raise AttributeError
+
+ def __repr__(self):
+ return '%s: %s %s\n%s' % (self.__class__.__name__,
+ self.status, self.reason, self.body)
+
+ def __str__(self):
+ return '%s: %s %s\n%s' % (self.__class__.__name__,
+ self.status, self.reason, self.body)
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name in ('RequestId', 'RequestID'):
+ self.request_id = value
+ elif name == 'Code':
+ self.error_code = value
+ elif name == 'Message':
+ self.error_message = value
+ elif name == 'BoxUsage':
+ self.box_usage = value
+ return None
+
+ def _cleanupParsedProperties(self):
+ self.request_id = None
+ self.error_code = None
+ self.error_message = None
+ self.box_usage = None
+
+class ConsoleOutput:
+
+ def __init__(self, parent=None):
+ self.parent = parent
+ self.instance_id = None
+ self.timestamp = None
+ self.comment = None
+ self.output = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'instanceId':
+ self.instance_id = value
+ elif name == 'output':
+ self.output = base64.b64decode(value)
+ else:
+ setattr(self, name, value)
+
+class StorageCreateError(BotoServerError):
+ """
+ Error creating a bucket or key on a storage service.
+ """
+ def __init__(self, status, reason, body=None):
+ self.bucket = None
+ BotoServerError.__init__(self, status, reason, body)
+
+ def endElement(self, name, value, connection):
+ if name == 'BucketName':
+ self.bucket = value
+ else:
+ return BotoServerError.endElement(self, name, value, connection)
+
+class S3CreateError(StorageCreateError):
+ """
+ Error creating a bucket or key on S3.
+ """
+ pass
+
+class GSCreateError(StorageCreateError):
+ """
+ Error creating a bucket or key on GS.
+ """
+ pass
+
+class StorageCopyError(BotoServerError):
+ """
+ Error copying a key on a storage service.
+ """
+ pass
+
+class S3CopyError(StorageCopyError):
+ """
+ Error copying a key on S3.
+ """
+ pass
+
+class GSCopyError(StorageCopyError):
+ """
+ Error copying a key on GS.
+ """
+ pass
+
+class SQSError(BotoServerError):
+ """
+ General Error on Simple Queue Service.
+ """
+ def __init__(self, status, reason, body=None):
+ self.detail = None
+ self.type = None
+ BotoServerError.__init__(self, status, reason, body)
+
+ def startElement(self, name, attrs, connection):
+ return BotoServerError.startElement(self, name, attrs, connection)
+
+ def endElement(self, name, value, connection):
+ if name == 'Detail':
+ self.detail = value
+ elif name == 'Type':
+ self.type = value
+ else:
+ return BotoServerError.endElement(self, name, value, connection)
+
+ def _cleanupParsedProperties(self):
+ BotoServerError._cleanupParsedProperties(self)
+ for p in ('detail', 'type'):
+ setattr(self, p, None)
+
+class SQSDecodeError(BotoClientError):
+ """
+ Error when decoding an SQS message.
+ """
+ def __init__(self, reason, message):
+ BotoClientError.__init__(self, reason)
+ self.message = message
+
+ def __repr__(self):
+ return 'SQSDecodeError: %s' % self.reason
+
+ def __str__(self):
+ return 'SQSDecodeError: %s' % self.reason
+
+class StorageResponseError(BotoServerError):
+ """
+ Error in response from a storage service.
+ """
+ def __init__(self, status, reason, body=None):
+ self.resource = None
+ BotoServerError.__init__(self, status, reason, body)
+
+ def startElement(self, name, attrs, connection):
+ return BotoServerError.startElement(self, name, attrs, connection)
+
+ def endElement(self, name, value, connection):
+ if name == 'Resource':
+ self.resource = value
+ else:
+ return BotoServerError.endElement(self, name, value, connection)
+
+ def _cleanupParsedProperties(self):
+ BotoServerError._cleanupParsedProperties(self)
+ for p in ('resource'):
+ setattr(self, p, None)
+
+class S3ResponseError(StorageResponseError):
+ """
+ Error in response from S3.
+ """
+ pass
+
+class GSResponseError(StorageResponseError):
+ """
+ Error in response from GS.
+ """
+ pass
+
+class EC2ResponseError(BotoServerError):
+ """
+ Error in response from EC2.
+ """
+
+ def __init__(self, status, reason, body=None):
+ self.errors = None
+ self._errorResultSet = []
+ BotoServerError.__init__(self, status, reason, body)
+ self.errors = [ (e.error_code, e.error_message) \
+ for e in self._errorResultSet ]
+ if len(self.errors):
+ self.error_code, self.error_message = self.errors[0]
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Errors':
+ self._errorResultSet = ResultSet([('Error', _EC2Error)])
+ return self._errorResultSet
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'RequestID':
+ self.request_id = value
+ else:
+ return None # don't call subclass here
+
+ def _cleanupParsedProperties(self):
+ BotoServerError._cleanupParsedProperties(self)
+ self._errorResultSet = []
+ for p in ('errors'):
+ setattr(self, p, None)
+
+class EmrResponseError(BotoServerError):
+ """
+ Error in response from EMR
+ """
+ pass
+
+class _EC2Error:
+
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.error_code = None
+ self.error_message = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Code':
+ self.error_code = value
+ elif name == 'Message':
+ self.error_message = value
+ else:
+ return None
+
+class SDBResponseError(BotoServerError):
+ """
+ Error in responses from SDB.
+ """
+ pass
+
+class AWSConnectionError(BotoClientError):
+ """
+ General error connecting to Amazon Web Services.
+ """
+ pass
+
+class StorageDataError(BotoClientError):
+ """
+ Error receiving data from a storage service.
+ """
+ pass
+
+class S3DataError(StorageDataError):
+ """
+ Error receiving data from S3.
+ """
+ pass
+
+class GSDataError(StorageDataError):
+ """
+ Error receiving data from GS.
+ """
+ pass
+
+class FPSResponseError(BotoServerError):
+ pass
+
+class InvalidUriError(Exception):
+ """Exception raised when URI is invalid."""
+
+ def __init__(self, message):
+ Exception.__init__(self)
+ self.message = message
+
+class InvalidAclError(Exception):
+ """Exception raised when ACL XML is invalid."""
+
+ def __init__(self, message):
+ Exception.__init__(self)
+ self.message = message
+
+class NoAuthHandlerFound(Exception):
+ """Is raised when no auth handlers were found ready to authenticate."""
+ pass
+
+class TooManyAuthHandlerReadyToAuthenticate(Exception):
+ """Is raised when there are more than one auth handler ready.
+
+ In normal situation there should only be one auth handler that is ready to
+ authenticate. In case where more than one auth handler is ready to
+ authenticate, we raise this exception, to prevent unpredictable behavior
+ when multiple auth handlers can handle a particular case and the one chosen
+ depends on the order they were checked.
+ """
+ pass
+
+# Enum class for resumable upload failure disposition.
+class ResumableTransferDisposition(object):
+ # START_OVER means an attempt to resume an existing transfer failed,
+ # and a new resumable upload should be attempted (without delay).
+ START_OVER = 'START_OVER'
+
+ # WAIT_BEFORE_RETRY means the resumable transfer failed but that it can
+ # be retried after a time delay.
+ WAIT_BEFORE_RETRY = 'WAIT_BEFORE_RETRY'
+
+ # ABORT means the resumable transfer failed and that delaying/retrying
+ # within the current process will not help.
+ ABORT = 'ABORT'
+
+class ResumableUploadException(Exception):
+ """
+ Exception raised for various resumable upload problems.
+
+ self.disposition is of type ResumableTransferDisposition.
+ """
+
+ def __init__(self, message, disposition):
+ Exception.__init__(self)
+ self.message = message
+ self.disposition = disposition
+
+ def __repr__(self):
+ return 'ResumableUploadException("%s", %s)' % (
+ self.message, self.disposition)
+
+class ResumableDownloadException(Exception):
+ """
+ Exception raised for various resumable download problems.
+
+ self.disposition is of type ResumableTransferDisposition.
+ """
+
+ def __init__(self, message, disposition):
+ Exception.__init__(self)
+ self.message = message
+ self.disposition = disposition
+
+ def __repr__(self):
+ return 'ResumableDownloadException("%s", %s)' % (
+ self.message, self.disposition)
diff --git a/boto/file/README b/boto/file/README
new file mode 100644
index 0000000..af82455
--- /dev/null
+++ b/boto/file/README
@@ -0,0 +1,49 @@
+Handling of file:// URIs:
+
+This directory contains code to map basic boto connection, bucket, and key
+operations onto files in the local filesystem, in support of file://
+URI operations.
+
+Bucket storage operations cannot be mapped completely onto a file system
+because of the different naming semantics in these types of systems: the
+former have a flat name space of objects within each named bucket; the
+latter have a hierarchical name space of files, and nothing corresponding to
+the notion of a bucket. The mapping we selected was guided by the desire
+to achieve meaningful semantics for a useful subset of operations that can
+be implemented polymorphically across both types of systems. We considered
+several possibilities for mapping path names to bucket + object name:
+
+1) bucket = the file system root or local directory (for absolute vs
+relative file:// URIs, respectively) and object = remainder of path.
+We discarded this choice because the get_all_keys() method doesn't make
+sense under this approach: Enumerating all files under the root or current
+directory could include more than the caller intended. For example,
+StorageUri("file:///usr/bin/X11/vim").get_all_keys() would enumerate all
+files in the file system.
+
+2) bucket is treated mostly as an anonymous placeholder, with the object
+name holding the URI path (minus the "file://" part). Two sub-options,
+for object enumeration (the get_all_keys() call):
+ a) disallow get_all_keys(). This isn't great, as then the caller must
+ know the URI type before deciding whether to make this call.
+ b) return the single key for which this "bucket" was defined.
+ Note that this option means the app cannot use this API for listing
+ contents of the file system. While that makes the API less generally
+ useful, it avoids the potentially dangerous/unintended consequences
+ noted in option (1) above.
+
+We selected 2b, resulting in a class hierarchy where StorageUri is an abstract
+class, with FileStorageUri and BucketStorageUri subclasses.
+
+Some additional notes:
+
+BucketStorageUri and FileStorageUri each implement these methods:
+ - clone_replace_name() creates a same-type URI with a
+ different object name - which is useful for various enumeration cases
+ (e.g., implementing wildcarding in a command line utility).
+ - names_container() determines if the given URI names a container for
+ multiple objects/files - i.e., a bucket or directory.
+ - names_singleton() determines if the given URI names an individual object
+ or file.
+ - is_file_uri() and is_cloud_uri() determine if the given URI is a
+ FileStorageUri or BucketStorageUri, respectively
diff --git a/boto/file/__init__.py b/boto/file/__init__.py
new file mode 100755
index 0000000..0210b47
--- /dev/null
+++ b/boto/file/__init__.py
@@ -0,0 +1,28 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import boto
+
+from connection import FileConnection as Connection
+from key import Key
+from bucket import Bucket
+
+__all__ = ['Connection', 'Key', 'Bucket']
diff --git a/boto/file/bucket.py b/boto/file/bucket.py
new file mode 100644
index 0000000..7a1636b
--- /dev/null
+++ b/boto/file/bucket.py
@@ -0,0 +1,101 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+# File representation of bucket, for use with "file://" URIs.
+
+import os
+from key import Key
+from boto.file.simpleresultset import SimpleResultSet
+from boto.s3.bucketlistresultset import BucketListResultSet
+
+class Bucket(object):
+ def __init__(self, name, contained_key):
+ """Instantiate an anonymous file-based Bucket around a single key.
+ """
+ self.name = name
+ self.contained_key = contained_key
+
+ def __iter__(self):
+ return iter(BucketListResultSet(self))
+
+ def __str__(self):
+ return 'anonymous bucket for file://' + self.contained_key
+
+ def delete_key(self, key_name, headers=None,
+ version_id=None, mfa_token=None):
+ """
+ Deletes a key from the bucket.
+
+ :type key_name: string
+ :param key_name: The key name to delete
+
+ :type version_id: string
+ :param version_id: Unused in this subclass.
+
+ :type mfa_token: tuple or list of strings
+ :param mfa_token: Unused in this subclass.
+ """
+ os.remove(key_name)
+
+ def get_all_keys(self, headers=None, **params):
+ """
+ This method returns the single key around which this anonymous Bucket
+ was instantiated.
+
+ :rtype: SimpleResultSet
+ :return: The result from file system listing the keys requested
+
+ """
+ key = Key(self.name, self.contained_key)
+ return SimpleResultSet([key])
+
+ def get_key(self, key_name, headers=None, version_id=None):
+ """
+ Check to see if a particular key exists within the bucket.
+ Returns: An instance of a Key object or None
+
+ :type key_name: string
+ :param key_name: The name of the key to retrieve
+
+ :type version_id: string
+ :param version_id: Unused in this subclass.
+
+ :rtype: :class:`boto.file.key.Key`
+ :returns: A Key object from this bucket.
+ """
+ fp = open(key_name, 'rb')
+ return Key(self.name, key_name, fp)
+
+ def new_key(self, key_name=None):
+ """
+ Creates a new key
+
+ :type key_name: string
+ :param key_name: The name of the key to create
+
+ :rtype: :class:`boto.file.key.Key`
+ :returns: An instance of the newly created key object
+ """
+ dir_name = os.path.dirname(key_name)
+ if dir_name and not os.path.exists(dir_name):
+ os.makedirs(dir_name)
+ fp = open(key_name, 'wb')
+ return Key(self.name, key_name, fp)
diff --git a/boto/file/connection.py b/boto/file/connection.py
new file mode 100755
index 0000000..f453f71
--- /dev/null
+++ b/boto/file/connection.py
@@ -0,0 +1,33 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+# File representation of connection, for use with "file://" URIs.
+
+from bucket import Bucket
+
+class FileConnection(object):
+
+ def __init__(self, file_storage_uri):
+ # FileConnections are per-file storage URI.
+ self.file_storage_uri = file_storage_uri
+
+ def get_bucket(self, bucket_name, validate=True, headers=None):
+ return Bucket(bucket_name, self.file_storage_uri.object_name)
diff --git a/boto/file/key.py b/boto/file/key.py
new file mode 100755
index 0000000..af801a5
--- /dev/null
+++ b/boto/file/key.py
@@ -0,0 +1,123 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+# File representation of key, for use with "file://" URIs.
+
+import os, shutil, StringIO
+
+class Key(object):
+
+ def __init__(self, bucket, name, fp=None):
+ self.bucket = bucket
+ self.full_path = name
+ self.name = name
+ self.fp = fp
+
+ def __str__(self):
+ return 'file://' + self.full_path
+
+ def get_file(self, fp, headers=None, cb=None, num_cb=10, torrent=False):
+ """
+ Retrieves a file from a Key
+
+ :type fp: file
+ :param fp: File pointer to put the data into
+
+ :type headers: string
+ :param: ignored in this subclass.
+
+ :type cb: function
+ :param cb: ignored in this subclass.
+
+ :type cb: int
+ :param num_cb: ignored in this subclass.
+ """
+ key_file = open(self.full_path, 'rb')
+ shutil.copyfileobj(key_file, fp)
+
+ def set_contents_from_file(self, fp, headers=None, replace=True, cb=None,
+ num_cb=10, policy=None, md5=None):
+ """
+ Store an object in a file using the name of the Key object as the
+ key in file URI and the contents of the file pointed to by 'fp' as the
+ contents.
+
+ :type fp: file
+ :param fp: the file whose contents to upload
+
+ :type headers: dict
+ :param headers: ignored in this subclass.
+
+ :type replace: bool
+ :param replace: If this parameter is False, the method
+ will first check to see if an object exists in the
+ bucket with the same key. If it does, it won't
+ overwrite it. The default value is True which will
+ overwrite the object.
+
+ :type cb: function
+ :param cb: ignored in this subclass.
+
+ :type cb: int
+ :param num_cb: ignored in this subclass.
+
+ :type policy: :class:`boto.s3.acl.CannedACLStrings`
+ :param policy: ignored in this subclass.
+
+ :type md5: A tuple containing the hexdigest version of the MD5 checksum
+ of the file as the first element and the Base64-encoded
+ version of the plain checksum as the second element.
+ This is the same format returned by the compute_md5 method.
+ :param md5: ignored in this subclass.
+ """
+ if not replace and os.path.exists(self.full_path):
+ return
+ key_file = open(self.full_path, 'wb')
+ shutil.copyfileobj(fp, key_file)
+ key_file.close()
+
+ def get_contents_as_string(self, headers=None, cb=None, num_cb=10,
+ torrent=False):
+ """
+ Retrieve file data from the Key, and return contents as a string.
+
+ :type headers: dict
+ :param headers: ignored in this subclass.
+
+ :type cb: function
+ :param cb: ignored in this subclass.
+
+ :type cb: int
+ :param num_cb: ignored in this subclass.
+
+ :type cb: int
+ :param num_cb: ignored in this subclass.
+
+ :type torrent: bool
+ :param torrent: ignored in this subclass.
+
+ :rtype: string
+ :returns: The contents of the file as a string
+ """
+
+ fp = StringIO.StringIO()
+ self.get_contents_to_file(fp)
+ return fp.getvalue()
diff --git a/boto/file/simpleresultset.py b/boto/file/simpleresultset.py
new file mode 100755
index 0000000..5f94dc1
--- /dev/null
+++ b/boto/file/simpleresultset.py
@@ -0,0 +1,30 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class SimpleResultSet(list):
+ """
+ ResultSet facade built from a simple list, rather than via XML parsing.
+ """
+
+ def __init__(self, input_list):
+ for x in input_list:
+ self.append(x)
+ self.is_truncated = False
diff --git a/boto/fps/__init__.py b/boto/fps/__init__.py
new file mode 100644
index 0000000..2f44483
--- /dev/null
+++ b/boto/fps/__init__.py
@@ -0,0 +1,23 @@
+# Copyright (c) 2008, Chris Moyer http://coredumped.org
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+
diff --git a/boto/fps/connection.py b/boto/fps/connection.py
new file mode 100644
index 0000000..3d7812e
--- /dev/null
+++ b/boto/fps/connection.py
@@ -0,0 +1,356 @@
+# Copyright (c) 2008 Chris Moyer http://coredumped.org/
+# Copyringt (c) 2010 Jason R. Coombs http://www.jaraco.com/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import base64
+import hmac
+import hashlib
+import urllib
+import xml.sax
+import uuid
+import boto
+import boto.utils
+from boto import handler
+from boto.connection import AWSQueryConnection
+from boto.resultset import ResultSet
+from boto.exception import FPSResponseError
+
+class FPSConnection(AWSQueryConnection):
+
+ APIVersion = '2007-01-08'
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None,
+ host='fps.sandbox.amazonaws.com', debug=0,
+ https_connection_factory=None, path="/"):
+ AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key,
+ is_secure, port, proxy, proxy_port,
+ proxy_user, proxy_pass, host, debug,
+ https_connection_factory, path)
+
+ def _required_auth_capability(self):
+ return ['fps']
+
+ def install_payment_instruction(self, instruction, token_type="Unrestricted", transaction_id=None):
+ """
+ InstallPaymentInstruction
+ instruction: The PaymentInstruction to send, for example:
+
+ MyRole=='Caller' orSay 'Roles do not match';
+
+ token_type: Defaults to "Unrestricted"
+ transaction_id: Defaults to a new ID
+ """
+
+ if(transaction_id == None):
+ transaction_id = uuid.uuid4()
+ params = {}
+ params['PaymentInstruction'] = instruction
+ params['TokenType'] = token_type
+ params['CallerReference'] = transaction_id
+ response = self.make_request("InstallPaymentInstruction", params)
+ return response
+
+ def install_caller_instruction(self, token_type="Unrestricted", transaction_id=None):
+ """
+ Set us up as a caller
+ This will install a new caller_token into the FPS section.
+ This should really only be called to regenerate the caller token.
+ """
+ response = self.install_payment_instruction("MyRole=='Caller';", token_type=token_type, transaction_id=transaction_id)
+ body = response.read()
+ if(response.status == 200):
+ rs = ResultSet()
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ caller_token = rs.TokenId
+ try:
+ boto.config.save_system_option("FPS", "caller_token", caller_token)
+ except(IOError):
+ boto.config.save_user_option("FPS", "caller_token", caller_token)
+ return caller_token
+ else:
+ raise FPSResponseError(response.status, response.reason, body)
+
+ def install_recipient_instruction(self, token_type="Unrestricted", transaction_id=None):
+ """
+ Set us up as a Recipient
+ This will install a new caller_token into the FPS section.
+ This should really only be called to regenerate the recipient token.
+ """
+ response = self.install_payment_instruction("MyRole=='Recipient';", token_type=token_type, transaction_id=transaction_id)
+ body = response.read()
+ if(response.status == 200):
+ rs = ResultSet()
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ recipient_token = rs.TokenId
+ try:
+ boto.config.save_system_option("FPS", "recipient_token", recipient_token)
+ except(IOError):
+ boto.config.save_user_option("FPS", "recipient_token", recipient_token)
+
+ return recipient_token
+ else:
+ raise FPSResponseError(response.status, response.reason, body)
+
+ def make_url(self, returnURL, paymentReason, pipelineName, transactionAmount, **params):
+ """
+ Generate the URL with the signature required for a transaction
+ """
+ # use the sandbox authorization endpoint if we're using the
+ # sandbox for API calls.
+ endpoint_host = 'authorize.payments.amazon.com'
+ if 'sandbox' in self.host:
+ endpoint_host = 'authorize.payments-sandbox.amazon.com'
+ base = "/cobranded-ui/actions/start"
+
+
+ params['callerKey'] = str(self.aws_access_key_id)
+ params['returnURL'] = str(returnURL)
+ params['paymentReason'] = str(paymentReason)
+ params['pipelineName'] = pipelineName
+ params["signatureMethod"] = 'HmacSHA256'
+ params["signatureVersion"] = '2'
+ params["transactionAmount"] = transactionAmount
+
+ if(not params.has_key('callerReference')):
+ params['callerReference'] = str(uuid.uuid4())
+
+ parts = ''
+ for k in sorted(params.keys()):
+ parts += "&%s=%s" % (k, urllib.quote(params[k], '~'))
+
+ canonical = '\n'.join(['GET',
+ str(endpoint_host).lower(),
+ base,
+ parts[1:]])
+
+ signature = self._auth_handler.sign_string(canonical)
+ params["signature"] = signature
+
+ urlsuffix = ''
+ for k in sorted(params.keys()):
+ urlsuffix += "&%s=%s" % (k, urllib.quote(params[k], '~'))
+ urlsuffix = urlsuffix[1:] # strip the first &
+
+ fmt = "https://%(endpoint_host)s%(base)s?%(urlsuffix)s"
+ final = fmt % vars()
+ return final
+
+ def pay(self, transactionAmount, senderTokenId,
+ recipientTokenId=None, callerTokenId=None,
+ chargeFeeTo="Recipient",
+ callerReference=None, senderReference=None, recipientReference=None,
+ senderDescription=None, recipientDescription=None, callerDescription=None,
+ metadata=None, transactionDate=None, reserve=False):
+ """
+ Make a payment transaction. You must specify the amount.
+ This can also perform a Reserve request if 'reserve' is set to True.
+ """
+ params = {}
+ params['SenderTokenId'] = senderTokenId
+ # this is for 2008-09-17 specification
+ params['TransactionAmount.Amount'] = str(transactionAmount)
+ params['TransactionAmount.CurrencyCode'] = "USD"
+ #params['TransactionAmount'] = str(transactionAmount)
+ params['ChargeFeeTo'] = chargeFeeTo
+
+ params['RecipientTokenId'] = (
+ recipientTokenId if recipientTokenId is not None
+ else boto.config.get("FPS", "recipient_token")
+ )
+ params['CallerTokenId'] = (
+ callerTokenId if callerTokenId is not None
+ else boto.config.get("FPS", "caller_token")
+ )
+ if(transactionDate != None):
+ params['TransactionDate'] = transactionDate
+ if(senderReference != None):
+ params['SenderReference'] = senderReference
+ if(recipientReference != None):
+ params['RecipientReference'] = recipientReference
+ if(senderDescription != None):
+ params['SenderDescription'] = senderDescription
+ if(recipientDescription != None):
+ params['RecipientDescription'] = recipientDescription
+ if(callerDescription != None):
+ params['CallerDescription'] = callerDescription
+ if(metadata != None):
+ params['MetaData'] = metadata
+ if(callerReference == None):
+ callerReference = uuid.uuid4()
+ params['CallerReference'] = callerReference
+
+ if reserve:
+ response = self.make_request("Reserve", params)
+ else:
+ response = self.make_request("Pay", params)
+ body = response.read()
+ if(response.status == 200):
+ rs = ResultSet()
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs
+ else:
+ raise FPSResponseError(response.status, response.reason, body)
+
+ def get_transaction_status(self, transactionId):
+ """
+ Returns the status of a given transaction.
+ """
+ params = {}
+ params['TransactionId'] = transactionId
+
+ response = self.make_request("GetTransactionStatus", params)
+ body = response.read()
+ if(response.status == 200):
+ rs = ResultSet()
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs
+ else:
+ raise FPSResponseError(response.status, response.reason, body)
+
+ def cancel(self, transactionId, description=None):
+ """
+ Cancels a reserved or pending transaction.
+ """
+ params = {}
+ params['transactionId'] = transactionId
+ if(description != None):
+ params['description'] = description
+
+ response = self.make_request("Cancel", params)
+ body = response.read()
+ if(response.status == 200):
+ rs = ResultSet()
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs
+ else:
+ raise FPSResponseError(response.status, response.reason, body)
+
+ def settle(self, reserveTransactionId, transactionAmount=None):
+ """
+ Charges for a reserved payment.
+ """
+ params = {}
+ params['ReserveTransactionId'] = reserveTransactionId
+ if(transactionAmount != None):
+ params['TransactionAmount'] = transactionAmount
+
+ response = self.make_request("Settle", params)
+ body = response.read()
+ if(response.status == 200):
+ rs = ResultSet()
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs
+ else:
+ raise FPSResponseError(response.status, response.reason, body)
+
+ def refund(self, callerReference, transactionId, refundAmount=None, callerDescription=None):
+ """
+ Refund a transaction. This refunds the full amount by default unless 'refundAmount' is specified.
+ """
+ params = {}
+ params['CallerReference'] = callerReference
+ params['TransactionId'] = transactionId
+ if(refundAmount != None):
+ params['RefundAmount'] = refundAmount
+ if(callerDescription != None):
+ params['CallerDescription'] = callerDescription
+
+ response = self.make_request("Refund", params)
+ body = response.read()
+ if(response.status == 200):
+ rs = ResultSet()
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs
+ else:
+ raise FPSResponseError(response.status, response.reason, body)
+
+ def get_recipient_verification_status(self, recipientTokenId):
+ """
+ Test that the intended recipient has a verified Amazon Payments account.
+ """
+ params ={}
+ params['RecipientTokenId'] = recipientTokenId
+
+ response = self.make_request("GetRecipientVerificationStatus", params)
+ body = response.read()
+ if(response.status == 200):
+ rs = ResultSet()
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs
+ else:
+ raise FPSResponseError(response.status, response.reason, body)
+
+ def get_token_by_caller_reference(self, callerReference):
+ """
+ Returns details about the token specified by 'callerReference'.
+ """
+ params ={}
+ params['callerReference'] = callerReference
+
+ response = self.make_request("GetTokenByCaller", params)
+ body = response.read()
+ if(response.status == 200):
+ rs = ResultSet()
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs
+ else:
+ raise FPSResponseError(response.status, response.reason, body)
+ def get_token_by_caller_token(self, tokenId):
+ """
+ Returns details about the token specified by 'callerReference'.
+ """
+ params ={}
+ params['TokenId'] = tokenId
+
+ response = self.make_request("GetTokenByCaller", params)
+ body = response.read()
+ if(response.status == 200):
+ rs = ResultSet()
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs
+ else:
+ raise FPSResponseError(response.status, response.reason, body)
+
+ def verify_signature(self, end_point_url, http_parameters):
+ params = dict(
+ UrlEndPoint = end_point_url,
+ HttpParameters = http_parameters,
+ )
+ response = self.make_request("VerifySignature", params)
+ body = response.read()
+ if(response.status != 200):
+ raise FPSResponseError(response.status, response.reason, body)
+ rs = ResultSet()
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs
diff --git a/boto/fps/test/__init__.py b/boto/fps/test/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/boto/fps/test/__init__.py
diff --git a/boto/fps/test/test_install_caller_instruction.py b/boto/fps/test/test_install_caller_instruction.py
new file mode 100644
index 0000000..8095914
--- /dev/null
+++ b/boto/fps/test/test_install_caller_instruction.py
@@ -0,0 +1,4 @@
+from boto.fps.connection import FPSConnection
+conn = FPSConnection()
+conn.install_caller_instruction()
+conn.install_recipient_instruction()
diff --git a/boto/fps/test/test_verify_signature.py b/boto/fps/test/test_verify_signature.py
new file mode 100644
index 0000000..10c6b61
--- /dev/null
+++ b/boto/fps/test/test_verify_signature.py
@@ -0,0 +1,6 @@
+from boto.fps.connection import FPSConnection
+conn = FPSConnection()
+# example response from the docs
+params = 'expiry=08%2F2015&signature=ynDukZ9%2FG77uSJVb5YM0cadwHVwYKPMKOO3PNvgADbv6VtymgBxeOWEhED6KGHsGSvSJnMWDN%2FZl639AkRe9Ry%2F7zmn9CmiM%2FZkp1XtshERGTqi2YL10GwQpaH17MQqOX3u1cW4LlyFoLy4celUFBPq1WM2ZJnaNZRJIEY%2FvpeVnCVK8VIPdY3HMxPAkNi5zeF2BbqH%2BL2vAWef6vfHkNcJPlOuOl6jP4E%2B58F24ni%2B9ek%2FQH18O4kw%2FUJ7ZfKwjCCI13%2BcFybpofcKqddq8CuUJj5Ii7Pdw1fje7ktzHeeNhF0r9siWcYmd4JaxTP3NmLJdHFRq2T%2FgsF3vK9m3gw%3D%3D&signatureVersion=2&signatureMethod=RSA-SHA1&certificateUrl=https%3A%2F%2Ffps.sandbox.amazonaws.com%2Fcerts%2F090909%2FPKICert.pem&tokenID=A5BB3HUNAZFJ5CRXIPH72LIODZUNAUZIVP7UB74QNFQDSQ9MN4HPIKISQZWPLJXF&status=SC&callerReference=callerReferenceMultiUse1'
+endpoint = 'http://vamsik.desktop.amazon.com:8080/ipn.jsp'
+conn.verify_signature(endpoint, params)
diff --git a/boto/gs/__init__.py b/boto/gs/__init__.py
new file mode 100755
index 0000000..bf4c0b9
--- /dev/null
+++ b/boto/gs/__init__.py
@@ -0,0 +1,22 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
diff --git a/boto/gs/acl.py b/boto/gs/acl.py
new file mode 100755
index 0000000..33aaadf
--- /dev/null
+++ b/boto/gs/acl.py
@@ -0,0 +1,276 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.gs.user import User
+from boto.exception import InvalidAclError
+
+ACCESS_CONTROL_LIST = 'AccessControlList'
+ALL_AUTHENTICATED_USERS = 'AllAuthenticatedUsers'
+ALL_USERS = 'AllUsers'
+DOMAIN = 'Domain'
+EMAIL_ADDRESS = 'EmailAddress'
+ENTRY = 'Entry'
+ENTRIES = 'Entries'
+GROUP_BY_DOMAIN = 'GroupByDomain'
+GROUP_BY_EMAIL = 'GroupByEmail'
+GROUP_BY_ID = 'GroupById'
+ID = 'ID'
+NAME = 'Name'
+OWNER = 'Owner'
+PERMISSION = 'Permission'
+SCOPE = 'Scope'
+TYPE = 'type'
+USER_BY_EMAIL = 'UserByEmail'
+USER_BY_ID = 'UserById'
+
+
+CannedACLStrings = ['private', 'public-read',
+ 'public-read-write', 'authenticated-read',
+ 'bucket-owner-read', 'bucket-owner-full-control']
+
+SupportedPermissions = ['READ', 'WRITE', 'FULL_CONTROL']
+
+class ACL:
+
+ def __init__(self, parent=None):
+ self.parent = parent
+ self.entries = []
+
+ def __repr__(self):
+ # Owner is optional in GS ACLs.
+ if hasattr(self, 'owner'):
+ entries_repr = ['']
+ else:
+ entries_repr = ['Owner:%s' % self.owner.__repr__()]
+ acl_entries = self.entries
+ if acl_entries:
+ for e in acl_entries.entry_list:
+ entries_repr.append(e.__repr__())
+ return '<%s>' % ', '.join(entries_repr)
+
+ # Method with same signature as boto.s3.acl.ACL.add_email_grant(), to allow
+ # polymorphic treatment at application layer.
+ def add_email_grant(self, permission, email_address):
+ entry = Entry(type=USER_BY_EMAIL, email_address=email_address,
+ permission=permission)
+ self.entries.entry_list.append(entry)
+
+ # Method with same signature as boto.s3.acl.ACL.add_user_grant(), to allow
+ # polymorphic treatment at application layer.
+ def add_user_grant(self, permission, user_id):
+ entry = Entry(permission=permission, type=USER_BY_ID, id=user_id)
+ self.entries.entry_list.append(entry)
+
+ def add_group_email_grant(self, permission, email_address):
+ entry = Entry(type=GROUP_BY_EMAIL, email_address=email_address,
+ permission=permission)
+ self.entries.entry_list.append(entry)
+
+ def add_group_grant(self, permission, group_id):
+ entry = Entry(type=GROUP_BY_ID, id=group_id, permission=permission)
+ self.entries.entry_list.append(entry)
+
+ def startElement(self, name, attrs, connection):
+ if name == OWNER:
+ self.owner = User(self)
+ return self.owner
+ elif name == ENTRIES:
+ self.entries = Entries(self)
+ return self.entries
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == OWNER:
+ pass
+ elif name == ENTRIES:
+ pass
+ else:
+ setattr(self, name, value)
+
+ def to_xml(self):
+ s = '<%s>' % ACCESS_CONTROL_LIST
+ # Owner is optional in GS ACLs.
+ if hasattr(self, 'owner'):
+ s += self.owner.to_xml()
+ acl_entries = self.entries
+ if acl_entries:
+ s += acl_entries.to_xml()
+ s += '</%s>' % ACCESS_CONTROL_LIST
+ return s
+
+
+class Entries:
+
+ def __init__(self, parent=None):
+ self.parent = parent
+ # Entries is the class that represents the same-named XML
+ # element. entry_list is the list within this class that holds the data.
+ self.entry_list = []
+
+ def __repr__(self):
+ entries_repr = []
+ for e in self.entry_list:
+ entries_repr.append(e.__repr__())
+ return '<Entries: %s>' % ', '.join(entries_repr)
+
+ def startElement(self, name, attrs, connection):
+ if name == ENTRY:
+ entry = Entry(self)
+ self.entry_list.append(entry)
+ return entry
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == ENTRY:
+ pass
+ else:
+ setattr(self, name, value)
+
+ def to_xml(self):
+ s = '<%s>' % ENTRIES
+ for entry in self.entry_list:
+ s += entry.to_xml()
+ s += '</%s>' % ENTRIES
+ return s
+
+
+# Class that represents a single (Scope, Permission) entry in an ACL.
+class Entry:
+
+ def __init__(self, scope=None, type=None, id=None, name=None,
+ email_address=None, domain=None, permission=None):
+ if not scope:
+ scope = Scope(self, type, id, name, email_address, domain)
+ self.scope = scope
+ self.permission = permission
+
+ def __repr__(self):
+ return '<%s: %s>' % (self.scope.__repr__(), self.permission.__repr__())
+
+ def startElement(self, name, attrs, connection):
+ if name == SCOPE:
+ if not TYPE in attrs:
+ raise InvalidAclError('Missing "%s" in "%s" part of ACL' %
+ (TYPE, SCOPE))
+ self.scope = Scope(self, attrs[TYPE])
+ return self.scope
+ elif name == PERMISSION:
+ pass
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == SCOPE:
+ pass
+ elif name == PERMISSION:
+ value = value.strip()
+ if not value in SupportedPermissions:
+ raise InvalidAclError('Invalid Permission "%s"' % value)
+ self.permission = value
+ else:
+ setattr(self, name, value)
+
+ def to_xml(self):
+ s = '<%s>' % ENTRY
+ s += self.scope.to_xml()
+ s += '<%s>%s</%s>' % (PERMISSION, self.permission, PERMISSION)
+ s += '</%s>' % ENTRY
+ return s
+
+class Scope:
+
+ # Map from Scope type to list of allowed sub-elems.
+ ALLOWED_SCOPE_TYPE_SUB_ELEMS = {
+ ALL_AUTHENTICATED_USERS : [],
+ ALL_USERS : [],
+ GROUP_BY_DOMAIN : [DOMAIN],
+ GROUP_BY_EMAIL : [EMAIL_ADDRESS, NAME],
+ GROUP_BY_ID : [ID, NAME],
+ USER_BY_EMAIL : [EMAIL_ADDRESS, NAME],
+ USER_BY_ID : [ID, NAME]
+ }
+
+ def __init__(self, parent, type=None, id=None, name=None,
+ email_address=None, domain=None):
+ self.parent = parent
+ self.type = type
+ self.name = name
+ self.id = id
+ self.domain = domain
+ self.email_address = email_address
+ if not self.ALLOWED_SCOPE_TYPE_SUB_ELEMS.has_key(self.type):
+ raise InvalidAclError('Invalid %s %s "%s" ' %
+ (SCOPE, TYPE, self.type))
+
+ def __repr__(self):
+ named_entity = None
+ if self.id:
+ named_entity = self.id
+ elif self.email_address:
+ named_entity = self.email_address
+ elif self.domain:
+ named_entity = self.domain
+ if named_entity:
+ return '<%s: %s>' % (self.type, named_entity)
+ else:
+ return '<%s>' % self.type
+
+ def startElement(self, name, attrs, connection):
+ if not name in self.ALLOWED_SCOPE_TYPE_SUB_ELEMS[self.type]:
+ raise InvalidAclError('Element "%s" not allowed in %s %s "%s" ' %
+ (name, SCOPE, TYPE, self.type))
+ return None
+
+ def endElement(self, name, value, connection):
+ value = value.strip()
+ if name == DOMAIN:
+ self.domain = value
+ elif name == EMAIL_ADDRESS:
+ self.email_address = value
+ elif name == ID:
+ self.id = value
+ elif name == NAME:
+ self.name = value
+ else:
+ setattr(self, name, value)
+
+ def to_xml(self):
+ s = '<%s type="%s">' % (SCOPE, self.type)
+ if self.type == ALL_AUTHENTICATED_USERS or self.type == ALL_USERS:
+ pass
+ elif self.type == GROUP_BY_DOMAIN:
+ s += '<%s>%s</%s>' % (DOMAIN, self.domain, DOMAIN)
+ elif self.type == GROUP_BY_EMAIL or self.type == USER_BY_EMAIL:
+ s += '<%s>%s</%s>' % (EMAIL_ADDRESS, self.email_address,
+ EMAIL_ADDRESS)
+ if self.name:
+ s += '<%s>%s</%s>' % (NAME, self.name, NAME)
+ elif self.type == GROUP_BY_ID or self.type == USER_BY_ID:
+ s += '<%s>%s</%s>' % (ID, self.id, ID)
+ if self.name:
+ s += '<%s>%s</%s>' % (NAME, self.name, NAME)
+ else:
+ raise InvalidAclError('Invalid scope type "%s" ', self.type)
+
+ s += '</%s>' % SCOPE
+ return s
diff --git a/boto/gs/bucket.py b/boto/gs/bucket.py
new file mode 100644
index 0000000..b4b80e8
--- /dev/null
+++ b/boto/gs/bucket.py
@@ -0,0 +1,173 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import boto
+from boto import handler
+from boto.exception import InvalidAclError
+from boto.gs.acl import ACL
+from boto.gs.acl import SupportedPermissions as GSPermissions
+from boto.gs.key import Key as GSKey
+from boto.s3.acl import Policy
+from boto.s3.bucket import Bucket as S3Bucket
+import xml.sax
+
+class Bucket(S3Bucket):
+
+ def __init__(self, connection=None, name=None, key_class=GSKey):
+ super(Bucket, self).__init__(connection, name, key_class)
+
+ def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None):
+ if isinstance(acl_or_str, Policy):
+ raise InvalidAclError('Attempt to set S3 Policy on GS ACL')
+ elif isinstance(acl_or_str, ACL):
+ self.set_xml_acl(acl_or_str.to_xml(), key_name, headers=headers)
+ else:
+ self.set_canned_acl(acl_or_str, key_name, headers=headers)
+
+ def get_acl(self, key_name='', headers=None, version_id=None):
+ response = self.connection.make_request('GET', self.name, key_name,
+ query_args='acl', headers=headers)
+ body = response.read()
+ if response.status == 200:
+ acl = ACL(self)
+ h = handler.XmlHandler(acl, self)
+ xml.sax.parseString(body, h)
+ return acl
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ # Method with same signature as boto.s3.bucket.Bucket.add_email_grant(),
+ # to allow polymorphic treatment at application layer.
+ def add_email_grant(self, permission, email_address,
+ recursive=False, headers=None):
+ """
+ Convenience method that provides a quick way to add an email grant
+ to a bucket. This method retrieves the current ACL, creates a new
+ grant based on the parameters passed in, adds that grant to the ACL
+ and then PUT's the new ACL back to GS.
+
+ :type permission: string
+ :param permission: The permission being granted. Should be one of:
+ (READ, WRITE, FULL_CONTROL).
+
+ :type email_address: string
+ :param email_address: The email address associated with the GS
+ account your are granting the permission to.
+
+ :type recursive: boolean
+ :param recursive: A boolean value to controls whether the call
+ will apply the grant to all keys within the bucket
+ or not. The default value is False. By passing a
+ True value, the call will iterate through all keys
+ in the bucket and apply the same grant to each key.
+ CAUTION: If you have a lot of keys, this could take
+ a long time!
+ """
+ if permission not in GSPermissions:
+ raise self.connection.provider.storage_permissions_error(
+ 'Unknown Permission: %s' % permission)
+ acl = self.get_acl(headers=headers)
+ acl.add_email_grant(permission, email_address)
+ self.set_acl(acl, headers=headers)
+ if recursive:
+ for key in self:
+ key.add_email_grant(permission, email_address, headers=headers)
+
+ # Method with same signature as boto.s3.bucket.Bucket.add_user_grant(),
+ # to allow polymorphic treatment at application layer.
+ def add_user_grant(self, permission, user_id, recursive=False, headers=None):
+ """
+ Convenience method that provides a quick way to add a canonical user grant to a bucket.
+ This method retrieves the current ACL, creates a new grant based on the parameters
+ passed in, adds that grant to the ACL and then PUTs the new ACL back to GS.
+
+ :type permission: string
+ :param permission: The permission being granted. Should be one of:
+ (READ|WRITE|FULL_CONTROL)
+
+ :type user_id: string
+ :param user_id: The canonical user id associated with the GS account you are granting
+ the permission to.
+
+ :type recursive: bool
+ :param recursive: A boolean value to controls whether the call
+ will apply the grant to all keys within the bucket
+ or not. The default value is False. By passing a
+ True value, the call will iterate through all keys
+ in the bucket and apply the same grant to each key.
+ CAUTION: If you have a lot of keys, this could take
+ a long time!
+ """
+ if permission not in GSPermissions:
+ raise self.connection.provider.storage_permissions_error(
+ 'Unknown Permission: %s' % permission)
+ acl = self.get_acl(headers=headers)
+ acl.add_user_grant(permission, user_id)
+ self.set_acl(acl, headers=headers)
+ if recursive:
+ for key in self:
+ key.add_user_grant(permission, user_id, headers=headers)
+
+ def add_group_email_grant(self, permission, email_address, recursive=False,
+ headers=None):
+ """
+ Convenience method that provides a quick way to add an email group
+ grant to a bucket. This method retrieves the current ACL, creates a new
+ grant based on the parameters passed in, adds that grant to the ACL and
+ then PUT's the new ACL back to GS.
+
+ :type permission: string
+ :param permission: The permission being granted. Should be one of:
+ READ|WRITE|FULL_CONTROL
+ See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
+ for more details on permissions.
+
+ :type email_address: string
+ :param email_address: The email address associated with the Google
+ Group to which you are granting the permission.
+
+ :type recursive: bool
+ :param recursive: A boolean value to controls whether the call
+ will apply the grant to all keys within the bucket
+ or not. The default value is False. By passing a
+ True value, the call will iterate through all keys
+ in the bucket and apply the same grant to each key.
+ CAUTION: If you have a lot of keys, this could take
+ a long time!
+ """
+ if permission not in GSPermissions:
+ raise self.connection.provider.storage_permissions_error(
+ 'Unknown Permission: %s' % permission)
+ acl = self.get_acl(headers=headers)
+ acl.add_group_email_grant(permission, email_address)
+ self.set_acl(acl, headers=headers)
+ if recursive:
+ for key in self:
+ key.add_group_email_grant(permission, email_address,
+ headers=headers)
+
+ # Method with same input signature as boto.s3.bucket.Bucket.list_grants()
+ # (but returning different object type), to allow polymorphic treatment
+ # at application layer.
+ def list_grants(self, headers=None):
+ acl = self.get_acl(headers=headers)
+ return acl.entries
diff --git a/boto/gs/connection.py b/boto/gs/connection.py
new file mode 100755
index 0000000..ec81f32
--- /dev/null
+++ b/boto/gs/connection.py
@@ -0,0 +1,39 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.s3.connection import S3Connection
+from boto.s3.connection import SubdomainCallingFormat
+from boto.gs.bucket import Bucket
+
+class GSConnection(S3Connection):
+
+ DefaultHost = 'commondatastorage.googleapis.com'
+ QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'
+
+ def __init__(self, gs_access_key_id=None, gs_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None,
+ host=DefaultHost, debug=0, https_connection_factory=None,
+ calling_format=SubdomainCallingFormat(), path='/'):
+ S3Connection.__init__(self, gs_access_key_id, gs_secret_access_key,
+ is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
+ host, debug, https_connection_factory, calling_format, path,
+ "google", Bucket)
diff --git a/boto/gs/key.py b/boto/gs/key.py
new file mode 100644
index 0000000..608a9a5
--- /dev/null
+++ b/boto/gs/key.py
@@ -0,0 +1,247 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.s3.key import Key as S3Key
+
+class Key(S3Key):
+
+ def add_email_grant(self, permission, email_address):
+ """
+ Convenience method that provides a quick way to add an email grant to a
+ key. This method retrieves the current ACL, creates a new grant based on
+ the parameters passed in, adds that grant to the ACL and then PUT's the
+ new ACL back to GS.
+
+ :type permission: string
+ :param permission: The permission being granted. Should be one of:
+ READ|FULL_CONTROL
+ See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
+ for more details on permissions.
+
+ :type email_address: string
+ :param email_address: The email address associated with the Google
+ account to which you are granting the permission.
+ """
+ acl = self.get_acl()
+ acl.add_email_grant(permission, email_address)
+ self.set_acl(acl)
+
+ def add_user_grant(self, permission, user_id):
+ """
+ Convenience method that provides a quick way to add a canonical user
+ grant to a key. This method retrieves the current ACL, creates a new
+ grant based on the parameters passed in, adds that grant to the ACL and
+ then PUT's the new ACL back to GS.
+
+ :type permission: string
+ :param permission: The permission being granted. Should be one of:
+ READ|FULL_CONTROL
+ See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
+ for more details on permissions.
+
+ :type user_id: string
+ :param user_id: The canonical user id associated with the GS account to
+ which you are granting the permission.
+ """
+ acl = self.get_acl()
+ acl.add_user_grant(permission, user_id)
+ self.set_acl(acl)
+
+ def add_group_email_grant(self, permission, email_address, headers=None):
+ """
+ Convenience method that provides a quick way to add an email group
+ grant to a key. This method retrieves the current ACL, creates a new
+ grant based on the parameters passed in, adds that grant to the ACL and
+ then PUT's the new ACL back to GS.
+
+ :type permission: string
+ :param permission: The permission being granted. Should be one of:
+ READ|FULL_CONTROL
+ See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
+ for more details on permissions.
+
+ :type email_address: string
+ :param email_address: The email address associated with the Google
+ Group to which you are granting the permission.
+ """
+ acl = self.get_acl(headers=headers)
+ acl.add_group_email_grant(permission, email_address)
+ self.set_acl(acl, headers=headers)
+
+ def add_group_grant(self, permission, group_id):
+ """
+ Convenience method that provides a quick way to add a canonical group
+ grant to a key. This method retrieves the current ACL, creates a new
+ grant based on the parameters passed in, adds that grant to the ACL and
+ then PUT's the new ACL back to GS.
+
+ :type permission: string
+ :param permission: The permission being granted. Should be one of:
+ READ|FULL_CONTROL
+ See http://code.google.com/apis/storage/docs/developer-guide.html#authorization
+ for more details on permissions.
+
+ :type group_id: string
+ :param group_id: The canonical group id associated with the Google
+ Groups account you are granting the permission to.
+ """
+ acl = self.get_acl()
+ acl.add_group_grant(permission, group_id)
+ self.set_acl(acl)
+
+ def set_contents_from_file(self, fp, headers={}, replace=True,
+ cb=None, num_cb=10, policy=None, md5=None,
+ res_upload_handler=None):
+ """
+ Store an object in GS using the name of the Key object as the
+ key in GS and the contents of the file pointed to by 'fp' as the
+ contents.
+
+ :type fp: file
+ :param fp: the file whose contents are to be uploaded
+
+ :type headers: dict
+ :param headers: additional HTTP headers to be sent with the PUT request.
+
+ :type replace: bool
+ :param replace: If this parameter is False, the method will first check
+ to see if an object exists in the bucket with the same key. If it
+ does, it won't overwrite it. The default value is True which will
+ overwrite the object.
+
+ :type cb: function
+ :param cb: a callback function that will be called to report
+ progress on the upload. The callback should accept two integer
+ parameters, the first representing the number of bytes that have
+ been successfully transmitted to GS and the second representing the
+ total number of bytes that need to be transmitted.
+
+ :type num_cb: int
+ :param num_cb: (optional) If a callback is specified with the cb
+ parameter, this parameter determines the granularity of the callback
+ by defining the maximum number of times the callback will be called
+ during the file transfer.
+
+ :type policy: :class:`boto.gs.acl.CannedACLStrings`
+ :param policy: A canned ACL policy that will be applied to the new key
+ in GS.
+
+ :type md5: A tuple containing the hexdigest version of the MD5 checksum
+ of the file as the first element and the Base64-encoded version of
+ the plain checksum as the second element. This is the same format
+ returned by the compute_md5 method.
+ :param md5: If you need to compute the MD5 for any reason prior to
+ upload, it's silly to have to do it twice so this param, if present,
+ will be used as the MD5 values of the file. Otherwise, the checksum
+ will be computed.
+
+ :type res_upload_handler: ResumableUploadHandler
+ :param res_upload_handler: If provided, this handler will perform the
+ upload.
+
+ TODO: At some point we should refactor the Bucket and Key classes,
+ to move functionality common to all providers into a parent class,
+ and provider-specific functionality into subclasses (rather than
+ just overriding/sharing code the way it currently works).
+ """
+ provider = self.bucket.connection.provider
+ if headers is None:
+ headers = {}
+ if policy:
+ headers[provider.acl_header] = policy
+ if hasattr(fp, 'name'):
+ self.path = fp.name
+ if self.bucket != None:
+ if not md5:
+ md5 = self.compute_md5(fp)
+ else:
+ # Even if md5 is provided, still need to set size of content.
+ fp.seek(0, 2)
+ self.size = fp.tell()
+ fp.seek(0)
+ self.md5 = md5[0]
+ self.base64md5 = md5[1]
+ if self.name == None:
+ self.name = self.md5
+ if not replace:
+ k = self.bucket.lookup(self.name)
+ if k:
+ return
+ if res_upload_handler:
+ res_upload_handler.send_file(self, fp, headers, cb, num_cb)
+ else:
+ # Not a resumable transfer so use basic send_file mechanism.
+ self.send_file(fp, headers, cb, num_cb)
+
+ def set_contents_from_filename(self, filename, headers=None, replace=True,
+ cb=None, num_cb=10, policy=None, md5=None,
+ reduced_redundancy=None,
+ res_upload_handler=None):
+ """
+ Store an object in GS using the name of the Key object as the
+ key in GS and the contents of the file named by 'filename'.
+ See set_contents_from_file method for details about the
+ parameters.
+
+ :type filename: string
+ :param filename: The name of the file that you want to put onto GS
+
+ :type headers: dict
+ :param headers: Additional headers to pass along with the request to GS.
+
+ :type replace: bool
+ :param replace: If True, replaces the contents of the file if it
+ already exists.
+
+ :type cb: function
+ :param cb: (optional) a callback function that will be called to report
+ progress on the download. The callback should accept two integer
+ parameters, the first representing the number of bytes that have
+ been successfully transmitted from GS and the second representing
+ the total number of bytes that need to be transmitted.
+
+ :type cb: int
+ :param num_cb: (optional) If a callback is specified with the cb
+ parameter this parameter determines the granularity of the callback
+ by defining the maximum number of times the callback will be called
+ during the file transfer.
+
+ :type policy: :class:`boto.gs.acl.CannedACLStrings`
+ :param policy: A canned ACL policy that will be applied to the new key
+ in GS.
+
+ :type md5: A tuple containing the hexdigest version of the MD5 checksum
+ of the file as the first element and the Base64-encoded version of
+ the plain checksum as the second element. This is the same format
+ returned by the compute_md5 method.
+ :param md5: If you need to compute the MD5 for any reason prior to
+ upload, it's silly to have to do it twice so this param, if present,
+ will be used as the MD5 values of the file. Otherwise, the checksum
+ will be computed.
+
+ :type res_upload_handler: ResumableUploadHandler
+ :param res_upload_handler: If provided, this handler will perform the
+ upload.
+ """
+ fp = open(filename, 'rb')
+ self.set_contents_from_file(fp, headers, replace, cb, num_cb,
+ policy, md5, res_upload_handler)
+ fp.close()
diff --git a/boto/gs/resumable_upload_handler.py b/boto/gs/resumable_upload_handler.py
new file mode 100644
index 0000000..e8d5b03
--- /dev/null
+++ b/boto/gs/resumable_upload_handler.py
@@ -0,0 +1,526 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import cgi
+import errno
+import httplib
+import os
+import re
+import socket
+import time
+import urlparse
+import boto
+from boto import config
+from boto.connection import AWSAuthConnection
+from boto.exception import InvalidUriError
+from boto.exception import ResumableTransferDisposition
+from boto.exception import ResumableUploadException
+
+"""
+Handler for Google Storage resumable uploads. See
+http://code.google.com/apis/storage/docs/developer-guide.html#resumable
+for details.
+
+Resumable uploads will retry failed uploads, resuming at the byte
+count completed by the last upload attempt. If too many retries happen with
+no progress (per configurable num_retries param), the upload will be aborted.
+
+The caller can optionally specify a tracker_file_name param in the
+ResumableUploadHandler constructor. If you do this, that file will
+save the state needed to allow retrying later, in a separate process
+(e.g., in a later run of gsutil).
+"""
+
+
+class ResumableUploadHandler(object):
+
+ BUFFER_SIZE = 8192
+ RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error,
+ socket.gaierror)
+
+ # (start, end) response indicating server has nothing (upload protocol uses
+ # inclusive numbering).
+ SERVER_HAS_NOTHING = (0, -1)
+
+ def __init__(self, tracker_file_name=None, num_retries=None):
+ """
+ Constructor. Instantiate once for each uploaded file.
+
+ :type tracker_file_name: string
+ :param tracker_file_name: optional file name to save tracker URI.
+ If supplied and the current process fails the upload, it can be
+ retried in a new process. If called with an existing file containing
+ a valid tracker URI, we'll resume the upload from this URI; else
+ we'll start a new resumable upload (and write the URI to this
+ tracker file).
+
+ :type num_retries: int
+ :param num_retries: the number of times we'll re-try a resumable upload
+ making no progress. (Count resets every time we get progress, so
+ upload can span many more than this number of retries.)
+ """
+ self.tracker_file_name = tracker_file_name
+ self.num_retries = num_retries
+ self.server_has_bytes = 0 # Byte count at last server check.
+ self.tracker_uri = None
+ if tracker_file_name:
+ self._load_tracker_uri_from_file()
+ # Save upload_start_point in instance state so caller can find how
+ # much was transferred by this ResumableUploadHandler (across retries).
+ self.upload_start_point = None
+
+ def _load_tracker_uri_from_file(self):
+ f = None
+ try:
+ f = open(self.tracker_file_name, 'r')
+ uri = f.readline().strip()
+ self._set_tracker_uri(uri)
+ except IOError, e:
+ # Ignore non-existent file (happens first time an upload
+ # is attempted on a file), but warn user for other errors.
+ if e.errno != errno.ENOENT:
+ # Will restart because self.tracker_uri == None.
+ print('Couldn\'t read URI tracker file (%s): %s. Restarting '
+ 'upload from scratch.' %
+ (self.tracker_file_name, e.strerror))
+ except InvalidUriError, e:
+ # Warn user, but proceed (will restart because
+ # self.tracker_uri == None).
+ print('Invalid tracker URI (%s) found in URI tracker file '
+ '(%s). Restarting upload from scratch.' %
+ (uri, self.tracker_file_name))
+ finally:
+ if f:
+ f.close()
+
+ def _save_tracker_uri_to_file(self):
+ """
+ Saves URI to tracker file if one was passed to constructor.
+ """
+ if not self.tracker_file_name:
+ return
+ f = None
+ try:
+ f = open(self.tracker_file_name, 'w')
+ f.write(self.tracker_uri)
+ except IOError, e:
+ raise ResumableUploadException(
+ 'Couldn\'t write URI tracker file (%s): %s.\nThis can happen'
+ 'if you\'re using an incorrectly configured upload tool\n'
+ '(e.g., gsutil configured to save tracker files to an '
+ 'unwritable directory)' %
+ (self.tracker_file_name, e.strerror),
+ ResumableTransferDisposition.ABORT)
+ finally:
+ if f:
+ f.close()
+
+ def _set_tracker_uri(self, uri):
+ """
+ Called when we start a new resumable upload or get a new tracker
+ URI for the upload. Saves URI and resets upload state.
+
+ Raises InvalidUriError if URI is syntactically invalid.
+ """
+ parse_result = urlparse.urlparse(uri)
+ if (parse_result.scheme.lower() not in ['http', 'https'] or
+ not parse_result.netloc or not parse_result.query):
+ raise InvalidUriError('Invalid tracker URI (%s)' % uri)
+ qdict = cgi.parse_qs(parse_result.query)
+ if not qdict or not 'upload_id' in qdict:
+ raise InvalidUriError('Invalid tracker URI (%s)' % uri)
+ self.tracker_uri = uri
+ self.tracker_uri_host = parse_result.netloc
+ self.tracker_uri_path = '%s/?%s' % (parse_result.netloc,
+ parse_result.query)
+ self.server_has_bytes = 0
+
+ def get_tracker_uri(self):
+ """
+ Returns upload tracker URI, or None if the upload has not yet started.
+ """
+ return self.tracker_uri
+
+ def _remove_tracker_file(self):
+ if (self.tracker_file_name and
+ os.path.exists(self.tracker_file_name)):
+ os.unlink(self.tracker_file_name)
+
+ def _build_content_range_header(self, range_spec='*', length_spec='*'):
+ return 'bytes %s/%s' % (range_spec, length_spec)
+
+ def _query_server_state(self, conn, file_length):
+ """
+ Queries server to find out what bytes it currently has.
+
+ Note that this method really just makes special case use of the
+ fact that the upload server always returns the current start/end
+ state whenever a PUT doesn't complete.
+
+ Returns (server_start, server_end), where the values are inclusive.
+ For example, (0, 2) would mean that the server has bytes 0, 1, *and* 2.
+
+ Raises ResumableUploadException if problem querying server.
+ """
+ # Send an empty PUT so that server replies with this resumable
+ # transfer's state.
+ put_headers = {}
+ put_headers['Content-Range'] = (
+ self._build_content_range_header('*', file_length))
+ put_headers['Content-Length'] = '0'
+ resp = AWSAuthConnection.make_request(conn, 'PUT',
+ path=self.tracker_uri_path,
+ auth_path=self.tracker_uri_path,
+ headers=put_headers,
+ host=self.tracker_uri_host)
+ if resp.status == 200:
+ return (0, file_length) # Completed upload.
+ if resp.status != 308:
+ # This means the server didn't have any state for the given
+ # upload ID, which can happen (for example) if the caller saved
+ # the tracker URI to a file and then tried to restart the transfer
+ # after that upload ID has gone stale. In that case we need to
+ # start a new transfer (and the caller will then save the new
+ # tracker URI to the tracker file).
+ raise ResumableUploadException(
+ 'Got non-308 response (%s) from server state query' %
+ resp.status, ResumableTransferDisposition.START_OVER)
+ got_valid_response = False
+ range_spec = resp.getheader('range')
+ if range_spec:
+ # Parse 'bytes=<from>-<to>' range_spec.
+ m = re.search('bytes=(\d+)-(\d+)', range_spec)
+ if m:
+ server_start = long(m.group(1))
+ server_end = long(m.group(2))
+ got_valid_response = True
+ else:
+ # No Range header, which means the server does not yet have
+ # any bytes. Note that the Range header uses inclusive 'from'
+ # and 'to' values. Since Range 0-0 would mean that the server
+ # has byte 0, omitting the Range header is used to indicate that
+ # the server doesn't have any bytes.
+ return self.SERVER_HAS_NOTHING
+ if not got_valid_response:
+ raise ResumableUploadException(
+ 'Couldn\'t parse upload server state query response (%s)' %
+ str(resp.getheaders()), ResumableTransferDisposition.START_OVER)
+ if conn.debug >= 1:
+ print 'Server has: Range: %d - %d.' % (server_start, server_end)
+ return (server_start, server_end)
+
+ def _start_new_resumable_upload(self, key, headers=None):
+ """
+ Starts a new resumable upload.
+
+ Raises ResumableUploadException if any errors occur.
+ """
+ conn = key.bucket.connection
+ if conn.debug >= 1:
+ print 'Starting new resumable upload.'
+ self.server_has_bytes = 0
+
+ # Start a new resumable upload by sending a POST request with an
+ # empty body and the "X-Goog-Resumable: start" header. Include any
+ # caller-provided headers (e.g., Content-Type) EXCEPT Content-Length
+ # (and raise an exception if they tried to pass one, since it's
+ # a semantic error to specify it at this point, and if we were to
+ # include one now it would cause the server to expect that many
+ # bytes; the POST doesn't include the actual file bytes We set
+ # the Content-Length in the subsequent PUT, based on the uploaded
+ # file size.
+ post_headers = {}
+ for k in headers:
+ if k.lower() == 'content-length':
+ raise ResumableUploadException(
+ 'Attempt to specify Content-Length header (disallowed)',
+ ResumableTransferDisposition.ABORT)
+ post_headers[k] = headers[k]
+ post_headers[conn.provider.resumable_upload_header] = 'start'
+
+ resp = conn.make_request(
+ 'POST', key.bucket.name, key.name, post_headers)
+ # Get tracker URI from response 'Location' header.
+ body = resp.read()
+ # Check for '201 Created' response code.
+ if resp.status != 201:
+ raise ResumableUploadException(
+ 'Got status %d from attempt to start resumable upload' %
+ resp.status, ResumableTransferDisposition.WAIT_BEFORE_RETRY)
+ tracker_uri = resp.getheader('Location')
+ if not tracker_uri:
+ raise ResumableUploadException(
+ 'No resumable tracker URI found in resumable initiation '
+ 'POST response (%s)' % body,
+ ResumableTransferDisposition.WAIT_BEFORE_RETRY)
+ self._set_tracker_uri(tracker_uri)
+ self._save_tracker_uri_to_file()
+
+ def _upload_file_bytes(self, conn, http_conn, fp, file_length,
+ total_bytes_uploaded, cb, num_cb):
+ """
+ Makes one attempt to upload file bytes, using an existing resumable
+ upload connection.
+
+ Returns etag from server upon success.
+
+ Raises ResumableUploadException if any problems occur.
+ """
+ buf = fp.read(self.BUFFER_SIZE)
+ if cb:
+ if num_cb > 2:
+ cb_count = file_length / self.BUFFER_SIZE / (num_cb-2)
+ elif num_cb < 0:
+ cb_count = -1
+ else:
+ cb_count = 0
+ i = 0
+ cb(total_bytes_uploaded, file_length)
+
+ # Build resumable upload headers for the transfer. Don't send a
+ # Content-Range header if the file is 0 bytes long, because the
+ # resumable upload protocol uses an *inclusive* end-range (so, sending
+ # 'bytes 0-0/1' would actually mean you're sending a 1-byte file).
+ put_headers = {}
+ if file_length:
+ range_header = self._build_content_range_header(
+ '%d-%d' % (total_bytes_uploaded, file_length - 1),
+ file_length)
+ put_headers['Content-Range'] = range_header
+ # Set Content-Length to the total bytes we'll send with this PUT.
+ put_headers['Content-Length'] = str(file_length - total_bytes_uploaded)
+ http_request = AWSAuthConnection.build_base_http_request(
+ conn, 'PUT', path=self.tracker_uri_path, auth_path=None,
+ headers=put_headers, host=self.tracker_uri_host)
+ http_conn.putrequest('PUT', http_request.path)
+ for k in put_headers:
+ http_conn.putheader(k, put_headers[k])
+ http_conn.endheaders()
+
+ # Turn off debug on http connection so upload content isn't included
+ # in debug stream.
+ http_conn.set_debuglevel(0)
+ while buf:
+ http_conn.send(buf)
+ total_bytes_uploaded += len(buf)
+ if cb:
+ i += 1
+ if i == cb_count or cb_count == -1:
+ cb(total_bytes_uploaded, file_length)
+ i = 0
+ buf = fp.read(self.BUFFER_SIZE)
+ if cb:
+ cb(total_bytes_uploaded, file_length)
+ if total_bytes_uploaded != file_length:
+ raise ResumableUploadException('File changed during upload: EOF at '
+ '%d bytes of %d byte file.' %
+ (total_bytes_uploaded, file_length),
+ ResumableTransferDisposition.ABORT)
+ resp = http_conn.getresponse()
+ body = resp.read()
+ # Restore http connection debug level.
+ http_conn.set_debuglevel(conn.debug)
+
+ additional_note = ''
+ if resp.status == 200:
+ return resp.getheader('etag') # Success
+ # Retry status 503 errors after a delay.
+ elif resp.status == 503:
+ disposition = ResumableTransferDisposition.WAIT_BEFORE_RETRY
+ elif resp.status == 500:
+ disposition = ResumableTransferDisposition.ABORT
+ additional_note = ('This can happen if you attempt to upload a '
+ 'different size file on a already partially '
+ 'uploaded resumable upload')
+ else:
+ disposition = ResumableTransferDisposition.ABORT
+ raise ResumableUploadException('Got response code %d while attempting '
+ 'upload (%s)%s' %
+ (resp.status, resp.reason,
+ additional_note), disposition)
+
+ def _attempt_resumable_upload(self, key, fp, file_length, headers, cb,
+ num_cb):
+ """
+ Attempts a resumable upload.
+
+ Returns etag from server upon success.
+
+ Raises ResumableUploadException if any problems occur.
+ """
+ (server_start, server_end) = self.SERVER_HAS_NOTHING
+ conn = key.bucket.connection
+ if self.tracker_uri:
+ # Try to resume existing resumable upload.
+ try:
+ (server_start, server_end) = (
+ self._query_server_state(conn, file_length))
+ self.server_has_bytes = server_start
+ if conn.debug >= 1:
+ print 'Resuming transfer.'
+ except ResumableUploadException, e:
+ if conn.debug >= 1:
+ print 'Unable to resume transfer (%s).' % e.message
+ self._start_new_resumable_upload(key, headers)
+ else:
+ self._start_new_resumable_upload(key, headers)
+
+ # upload_start_point allows the code that instantiated the
+ # ResumableUploadHandler to find out the point from which it started
+ # uploading (e.g., so it can correctly compute throughput).
+ if self.upload_start_point is None:
+ self.upload_start_point = server_end
+
+ if server_end == file_length:
+ return # Done.
+ total_bytes_uploaded = server_end + 1
+ fp.seek(total_bytes_uploaded)
+ conn = key.bucket.connection
+
+ # Get a new HTTP connection (vs conn.get_http_connection(), which reuses
+ # pool connections) because httplib requires a new HTTP connection per
+ # transaction. (Without this, calling http_conn.getresponse() would get
+ # "ResponseNotReady".)
+ http_conn = conn.new_http_connection(self.tracker_uri_host,
+ conn.is_secure)
+ http_conn.set_debuglevel(conn.debug)
+
+ # Make sure to close http_conn at end so if a local file read
+ # failure occurs partway through server will terminate current upload
+ # and can report that progress on next attempt.
+ try:
+ return self._upload_file_bytes(conn, http_conn, fp, file_length,
+ total_bytes_uploaded, cb, num_cb)
+ finally:
+ http_conn.close()
+
+ def _check_final_md5(self, key, etag):
+ """
+ Checks that etag from server agrees with md5 computed before upload.
+ This is important, since the upload could have spanned a number of
+ hours and multiple processes (e.g., gsutil runs), and the user could
+ change some of the file and not realize they have inconsistent data.
+ """
+ if key.bucket.connection.debug >= 1:
+ print 'Checking md5 against etag.'
+ if key.md5 != etag.strip('"\''):
+ # Call key.open_read() before attempting to delete the
+ # (incorrect-content) key, so we perform that request on a
+ # different HTTP connection. This is neededb because httplib
+ # will return a "Response not ready" error if you try to perform
+ # a second transaction on the connection.
+ key.open_read()
+ key.close()
+ key.delete()
+ raise ResumableUploadException(
+ 'File changed during upload: md5 signature doesn\'t match etag '
+ '(incorrect uploaded object deleted)',
+ ResumableTransferDisposition.ABORT)
+
+ def send_file(self, key, fp, headers, cb=None, num_cb=10):
+ """
+ Upload a file to a key into a bucket on GS, using GS resumable upload
+ protocol.
+
+ :type key: :class:`boto.s3.key.Key` or subclass
+ :param key: The Key object to which data is to be uploaded
+
+ :type fp: file-like object
+ :param fp: The file pointer to upload
+
+ :type headers: dict
+ :param headers: The headers to pass along with the PUT request
+
+ :type cb: function
+ :param cb: a callback function that will be called to report progress on
+ the upload. The callback should accept two integer parameters, the
+ first representing the number of bytes that have been successfully
+ transmitted to GS, and the second representing the total number of
+ bytes that need to be transmitted.
+
+ :type num_cb: int
+ :param num_cb: (optional) If a callback is specified with the cb
+ parameter, this parameter determines the granularity of the callback
+ by defining the maximum number of times the callback will be called
+ during the file transfer. Providing a negative integer will cause
+ your callback to be called with each buffer read.
+
+ Raises ResumableUploadException if a problem occurs during the transfer.
+ """
+
+ if not headers:
+ headers = {}
+
+ fp.seek(0, os.SEEK_END)
+ file_length = fp.tell()
+ fp.seek(0)
+ debug = key.bucket.connection.debug
+
+ # Use num-retries from constructor if one was provided; else check
+ # for a value specified in the boto config file; else default to 5.
+ if self.num_retries is None:
+ self.num_retries = config.getint('Boto', 'num_retries', 5)
+ progress_less_iterations = 0
+
+ while True: # Retry as long as we're making progress.
+ server_had_bytes_before_attempt = self.server_has_bytes
+ try:
+ etag = self._attempt_resumable_upload(key, fp, file_length,
+ headers, cb, num_cb)
+ # Upload succceded, so remove the tracker file (if have one).
+ self._remove_tracker_file()
+ self._check_final_md5(key, etag)
+ if debug >= 1:
+ print 'Resumable upload complete.'
+ return
+ except self.RETRYABLE_EXCEPTIONS, e:
+ if debug >= 1:
+ print('Caught exception (%s)' % e.__repr__())
+ except ResumableUploadException, e:
+ if e.disposition == ResumableTransferDisposition.ABORT:
+ if debug >= 1:
+ print('Caught non-retryable ResumableUploadException '
+ '(%s)' % e.message)
+ raise
+ else:
+ if debug >= 1:
+ print('Caught ResumableUploadException (%s) - will '
+ 'retry' % e.message)
+
+ # At this point we had a re-tryable failure; see if made progress.
+ if self.server_has_bytes > server_had_bytes_before_attempt:
+ progress_less_iterations = 0
+ else:
+ progress_less_iterations += 1
+
+ if progress_less_iterations > self.num_retries:
+ # Don't retry any longer in the current process.
+ raise ResumableUploadException(
+ 'Too many resumable upload attempts failed without '
+ 'progress. You might try this upload again later',
+ ResumableTransferDisposition.ABORT)
+
+ sleep_time_secs = 2**progress_less_iterations
+ if debug >= 1:
+ print ('Got retryable failure (%d progress-less in a row).\n'
+ 'Sleeping %d seconds before re-trying' %
+ (progress_less_iterations, sleep_time_secs))
+ time.sleep(sleep_time_secs)
diff --git a/boto/gs/user.py b/boto/gs/user.py
new file mode 100755
index 0000000..62f2cf5
--- /dev/null
+++ b/boto/gs/user.py
@@ -0,0 +1,54 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+class User:
+ def __init__(self, parent=None, id='', name=''):
+ if parent:
+ parent.owner = self
+ self.type = None
+ self.id = id
+ self.name = name
+
+ def __repr__(self):
+ return self.id
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Name':
+ self.name = value
+ elif name == 'ID':
+ self.id = value
+ else:
+ setattr(self, name, value)
+
+ def to_xml(self, element_name='Owner'):
+ if self.type:
+ s = '<%s type="%s">' % (element_name, self.type)
+ else:
+ s = '<%s>' % element_name
+ s += '<ID>%s</ID>' % self.id
+ if self.name:
+ s += '<Name>%s</Name>' % self.name
+ s += '</%s>' % element_name
+ return s
diff --git a/boto/handler.py b/boto/handler.py
new file mode 100644
index 0000000..525f9c9
--- /dev/null
+++ b/boto/handler.py
@@ -0,0 +1,46 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import xml.sax
+
+class XmlHandler(xml.sax.ContentHandler):
+
+ def __init__(self, root_node, connection):
+ self.connection = connection
+ self.nodes = [('root', root_node)]
+ self.current_text = ''
+
+ def startElement(self, name, attrs):
+ self.current_text = ''
+ new_node = self.nodes[-1][1].startElement(name, attrs, self.connection)
+ if new_node != None:
+ self.nodes.append((name, new_node))
+
+ def endElement(self, name):
+ self.nodes[-1][1].endElement(name, self.current_text, self.connection)
+ if self.nodes[-1][0] == name:
+ self.nodes.pop()
+ self.current_text = ''
+
+ def characters(self, content):
+ self.current_text += content
+
+
diff --git a/boto/iam/__init__.py b/boto/iam/__init__.py
new file mode 100644
index 0000000..498d736
--- /dev/null
+++ b/boto/iam/__init__.py
@@ -0,0 +1,27 @@
+# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+# this is here for backward compatibility
+# originally, the IAMConnection class was defined here
+from connection import IAMConnection
+
+
diff --git a/boto/iam/connection.py b/boto/iam/connection.py
new file mode 100644
index 0000000..39ab704
--- /dev/null
+++ b/boto/iam/connection.py
@@ -0,0 +1,1006 @@
+# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import boto
+import boto.jsonresponse
+from boto.connection import AWSQueryConnection
+
+#boto.set_stream_logger('iam')
+
+class IAMConnection(AWSQueryConnection):
+
+ APIVersion = '2010-05-08'
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, host='iam.amazonaws.com',
+ debug=0, https_connection_factory=None, path='/'):
+ AWSQueryConnection.__init__(self, aws_access_key_id,
+ aws_secret_access_key,
+ is_secure, port, proxy,
+ proxy_port, proxy_user, proxy_pass,
+ host, debug, https_connection_factory, path)
+
+ def _required_auth_capability(self):
+ return ['iam']
+
+ def get_response(self, action, params, path='/', parent=None,
+ verb='GET', list_marker='Set'):
+ """
+ Utility method to handle calls to IAM and parsing of responses.
+ """
+ if not parent:
+ parent = self
+ response = self.make_request(action, params, path, verb)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 200:
+ e = boto.jsonresponse.Element(list_marker=list_marker)
+ h = boto.jsonresponse.XmlHandler(e, parent)
+ h.parse(body)
+ return e
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
+ #
+ # Group methods
+ #
+
+ def get_all_groups(self, path_prefix='/', marker=None, max_items=None):
+ """
+ List the groups that have the specified path prefix.
+
+ :type path_prefix: string
+ :param path_prefix: If provided, only groups whose paths match
+ the provided prefix will be returned.
+
+ :type marker: string
+ :param marker: Use this only when paginating results and only in
+ follow-up request after you've received a response
+ where the results are truncated. Set this to the
+ value of the Marker element in the response you
+ just received.
+
+ :type max_items: int
+ :param max_items: Use this only when paginating results to indicate
+ the maximum number of groups you want in the
+ response.
+ """
+ params = {}
+ if path_prefix:
+ params['PathPrefix'] = path_prefix
+ if marker:
+ params['Marker'] = marker
+ if max_items:
+ params['MaxItems'] = max_items
+ return self.get_response('ListGroups', params,
+ list_marker='Groups')
+
+ def get_group(self, group_name, marker=None, max_items=None):
+ """
+ Return a list of users that are in the specified group.
+
+ :type group_name: string
+ :param group_name: The name of the group whose information should
+ be returned.
+ :type marker: string
+ :param marker: Use this only when paginating results and only in
+ follow-up request after you've received a response
+ where the results are truncated. Set this to the
+ value of the Marker element in the response you
+ just received.
+
+ :type max_items: int
+ :param max_items: Use this only when paginating results to indicate
+ the maximum number of groups you want in the
+ response.
+ """
+ params = {'GroupName' : group_name}
+ if marker:
+ params['Marker'] = marker
+ if max_items:
+ params['MaxItems'] = max_items
+ return self.get_response('GetGroup', params, list_marker='Users')
+
+ def create_group(self, group_name, path='/'):
+ """
+ Create a group.
+
+ :type group_name: string
+ :param group_name: The name of the new group
+
+ :type path: string
+ :param path: The path to the group (Optional). Defaults to /.
+
+ """
+ params = {'GroupName' : group_name,
+ 'Path' : path}
+ return self.get_response('CreateGroup', params)
+
+ def delete_group(self, group_name):
+ """
+ Delete a group. The group must not contain any Users or
+ have any attached policies
+
+ :type group_name: string
+ :param group_name: The name of the group to delete.
+
+ """
+ params = {'GroupName' : group_name}
+ return self.get_response('DeleteGroup', params)
+
+ def update_group(self, group_name, new_group_name=None, new_path=None):
+ """
+ Updates name and/or path of the specified group.
+
+ :type group_name: string
+ :param group_name: The name of the new group
+
+ :type new_group_name: string
+ :param new_group_name: If provided, the name of the group will be
+ changed to this name.
+
+ :type new_path: string
+ :param new_path: If provided, the path of the group will be
+ changed to this path.
+
+ """
+ params = {'GroupName' : group_name}
+ if new_group_name:
+ params['NewGroupName'] = new_group_name
+ if new_path:
+ params['NewPath'] = new_path
+ return self.get_response('UpdateGroup', params)
+
+ def add_user_to_group(self, group_name, user_name):
+ """
+ Add a user to a group
+
+ :type group_name: string
+ :param group_name: The name of the new group
+
+ :type user_name: string
+ :param user_name: The to be added to the group.
+
+ """
+ params = {'GroupName' : group_name,
+ 'UserName' : user_name}
+ return self.get_response('AddUserToGroup', params)
+
+ def remove_user_from_group(self, group_name, user_name):
+ """
+ Remove a user from a group.
+
+ :type group_name: string
+ :param group_name: The name of the new group
+
+ :type user_name: string
+ :param user_name: The user to remove from the group.
+
+ """
+ params = {'GroupName' : group_name,
+ 'UserName' : user_name}
+ return self.get_response('RemoveUserFromGroup', params)
+
+ def put_group_policy(self, group_name, policy_name, policy_json):
+ """
+ Adds or updates the specified policy document for the specified group.
+
+ :type group_name: string
+ :param group_name: The name of the group the policy is associated with.
+
+ :type policy_name: string
+ :param policy_name: The policy document to get.
+
+ :type policy_json: string
+ :param policy_json: The policy document.
+
+ """
+ params = {'GroupName' : group_name,
+ 'PolicyName' : policy_name,
+ 'PolicyDocument' : policy_json}
+ return self.get_response('PutGroupPolicy', params, verb='POST')
+
+ def get_all_group_policies(self, group_name, marker=None, max_items=None):
+ """
+ List the names of the policies associated with the specified group.
+
+ :type group_name: string
+ :param group_name: The name of the group the policy is associated with.
+
+ :type marker: string
+ :param marker: Use this only when paginating results and only in
+ follow-up request after you've received a response
+ where the results are truncated. Set this to the
+ value of the Marker element in the response you
+ just received.
+
+ :type max_items: int
+ :param max_items: Use this only when paginating results to indicate
+ the maximum number of groups you want in the
+ response.
+ """
+ params = {'GroupName' : group_name}
+ if marker:
+ params['Marker'] = marker
+ if max_items:
+ params['MaxItems'] = max_items
+ return self.get_response('ListGroupPolicies', params,
+ list_marker='PolicyNames')
+
+ def get_group_policy(self, group_name, policy_name):
+ """
+ Retrieves the specified policy document for the specified group.
+
+ :type group_name: string
+ :param group_name: The name of the group the policy is associated with.
+
+ :type policy_name: string
+ :param policy_name: The policy document to get.
+
+ """
+ params = {'GroupName' : group_name,
+ 'PolicyName' : policy_name}
+ return self.get_response('GetGroupPolicy', params, verb='POST')
+
+ def delete_group_policy(self, group_name, policy_name):
+ """
+ Deletes the specified policy document for the specified group.
+
+ :type group_name: string
+ :param group_name: The name of the group the policy is associated with.
+
+ :type policy_name: string
+ :param policy_name: The policy document to delete.
+
+ """
+ params = {'GroupName' : group_name,
+ 'PolicyName' : policy_name}
+ return self.get_response('DeleteGroupPolicy', params, verb='POST')
+
+ def get_all_users(self, path_prefix='/', marker=None, max_items=None):
+ """
+ List the users that have the specified path prefix.
+
+ :type path_prefix: string
+ :param path_prefix: If provided, only users whose paths match
+ the provided prefix will be returned.
+
+ :type marker: string
+ :param marker: Use this only when paginating results and only in
+ follow-up request after you've received a response
+ where the results are truncated. Set this to the
+ value of the Marker element in the response you
+ just received.
+
+ :type max_items: int
+ :param max_items: Use this only when paginating results to indicate
+ the maximum number of groups you want in the
+ response.
+ """
+ params = {'PathPrefix' : path_prefix}
+ if marker:
+ params['Marker'] = marker
+ if max_items:
+ params['MaxItems'] = max_items
+ return self.get_response('ListUsers', params, list_marker='Users')
+
+ #
+ # User methods
+ #
+
+ def create_user(self, user_name, path='/'):
+ """
+ Create a user.
+
+ :type user_name: string
+ :param user_name: The name of the new user
+
+ :type path: string
+ :param path: The path in which the user will be created.
+ Defaults to /.
+
+ """
+ params = {'UserName' : user_name,
+ 'Path' : path}
+ return self.get_response('CreateUser', params)
+
+ def delete_user(self, user_name):
+ """
+ Delete a user including the user's path, GUID and ARN.
+
+ If the user_name is not specified, the user_name is determined
+ implicitly based on the AWS Access Key ID used to sign the request.
+
+ :type user_name: string
+ :param user_name: The name of the user to delete.
+
+ """
+ params = {'UserName' : user_name}
+ return self.get_response('DeleteUser', params)
+
+ def get_user(self, user_name=None):
+ """
+ Retrieve information about the specified user.
+
+ If the user_name is not specified, the user_name is determined
+ implicitly based on the AWS Access Key ID used to sign the request.
+
+ :type user_name: string
+ :param user_name: The name of the user to delete.
+ If not specified, defaults to user making
+ request.
+
+ """
+ params = {}
+ if user_name:
+ params['UserName'] = user_name
+ return self.get_response('GetUser', params)
+
+ def update_user(self, user_name, new_user_name=None, new_path=None):
+ """
+ Updates name and/or path of the specified user.
+
+ :type user_name: string
+ :param user_name: The name of the user
+
+ :type new_user_name: string
+ :param new_user_name: If provided, the username of the user will be
+ changed to this username.
+
+ :type new_path: string
+ :param new_path: If provided, the path of the user will be
+ changed to this path.
+
+ """
+ params = {'UserName' : user_name}
+ if new_user_name:
+ params['NewUserName'] = new_user_name
+ if new_path:
+ params['NewPath'] = new_path
+ return self.get_response('UpdateUser', params)
+
+ def get_all_user_policies(self, user_name, marker=None, max_items=None):
+ """
+ List the names of the policies associated with the specified user.
+
+ :type user_name: string
+ :param user_name: The name of the user the policy is associated with.
+
+ :type marker: string
+ :param marker: Use this only when paginating results and only in
+ follow-up request after you've received a response
+ where the results are truncated. Set this to the
+ value of the Marker element in the response you
+ just received.
+
+ :type max_items: int
+ :param max_items: Use this only when paginating results to indicate
+ the maximum number of groups you want in the
+ response.
+ """
+ params = {'UserName' : user_name}
+ if marker:
+ params['Marker'] = marker
+ if max_items:
+ params['MaxItems'] = max_items
+ return self.get_response('ListUserPolicies', params,
+ list_marker='PolicyNames')
+
+ def put_user_policy(self, user_name, policy_name, policy_json):
+ """
+ Adds or updates the specified policy document for the specified user.
+
+ :type user_name: string
+ :param user_name: The name of the user the policy is associated with.
+
+ :type policy_name: string
+ :param policy_name: The policy document to get.
+
+ :type policy_json: string
+ :param policy_json: The policy document.
+
+ """
+ params = {'UserName' : user_name,
+ 'PolicyName' : policy_name,
+ 'PolicyDocument' : policy_json}
+ return self.get_response('PutUserPolicy', params, verb='POST')
+
+ def get_user_policy(self, user_name, policy_name):
+ """
+ Retrieves the specified policy document for the specified user.
+
+ :type user_name: string
+ :param user_name: The name of the user the policy is associated with.
+
+ :type policy_name: string
+ :param policy_name: The policy document to get.
+
+ """
+ params = {'UserName' : user_name,
+ 'PolicyName' : policy_name}
+ return self.get_response('GetUserPolicy', params, verb='POST')
+
+ def delete_user_policy(self, user_name, policy_name):
+ """
+ Deletes the specified policy document for the specified user.
+
+ :type user_name: string
+ :param user_name: The name of the user the policy is associated with.
+
+ :type policy_name: string
+ :param policy_name: The policy document to delete.
+
+ """
+ params = {'UserName' : user_name,
+ 'PolicyName' : policy_name}
+ return self.get_response('DeleteUserPolicy', params, verb='POST')
+
+ def get_groups_for_user(self, user_name, marker=None, max_items=None):
+ """
+ List the groups that a specified user belongs to.
+
+ :type user_name: string
+ :param user_name: The name of the user to list groups for.
+
+ :type marker: string
+ :param marker: Use this only when paginating results and only in
+ follow-up request after you've received a response
+ where the results are truncated. Set this to the
+ value of the Marker element in the response you
+ just received.
+
+ :type max_items: int
+ :param max_items: Use this only when paginating results to indicate
+ the maximum number of groups you want in the
+ response.
+ """
+ params = {'UserName' : user_name}
+ if marker:
+ params['Marker'] = marker
+ if max_items:
+ params['MaxItems'] = max_items
+ return self.get_response('ListGroupsForUser', params,
+ list_marker='Groups')
+
+ #
+ # Access Keys
+ #
+
+ def get_all_access_keys(self, user_name, marker=None, max_items=None):
+ """
+ Get all access keys associated with an account.
+
+ :type user_name: string
+ :param user_name: The username of the new user
+
+ :type marker: string
+ :param marker: Use this only when paginating results and only in
+ follow-up request after you've received a response
+ where the results are truncated. Set this to the
+ value of the Marker element in the response you
+ just received.
+
+ :type max_items: int
+ :param max_items: Use this only when paginating results to indicate
+ the maximum number of groups you want in the
+ response.
+ """
+ params = {'UserName' : user_name}
+ if marker:
+ params['Marker'] = marker
+ if max_items:
+ params['MaxItems'] = max_items
+ return self.get_response('ListAccessKeys', params,
+ list_marker='AccessKeyMetadata')
+
+ def create_access_key(self, user_name=None):
+ """
+ Create a new AWS Secret Access Key and corresponding AWS Access Key ID
+ for the specified user. The default status for new keys is Active
+
+ If the user_name is not specified, the user_name is determined
+ implicitly based on the AWS Access Key ID used to sign the request.
+
+ :type user_name: string
+ :param user_name: The username of the new user
+
+ """
+ params = {'UserName' : user_name}
+ return self.get_response('CreateAccessKey', params)
+
+ def update_access_key(self, access_key_id, status, user_name=None):
+ """
+ Changes the status of the specified access key from Active to Inactive
+ or vice versa. This action can be used to disable a user's key as
+ part of a key rotation workflow.
+
+ If the user_name is not specified, the user_name is determined
+ implicitly based on the AWS Access Key ID used to sign the request.
+
+ :type access_key_id: string
+ :param access_key_id: The ID of the access key.
+
+ :type status: string
+ :param status: Either Active or Inactive.
+
+ :type user_name: string
+ :param user_name: The username of user (optional).
+
+ """
+ params = {'AccessKeyId' : access_key_id,
+ 'Status' : status}
+ if user_name:
+ params['UserName'] = user_name
+ return self.get_response('UpdateAccessKey', params)
+
+ def delete_access_key(self, access_key_id, user_name=None):
+ """
+ Delete an access key associated with a user.
+
+ If the user_name is not specified, it is determined implicitly based
+ on the AWS Access Key ID used to sign the request.
+
+ :type access_key_id: string
+ :param access_key_id: The ID of the access key to be deleted.
+
+ :type user_name: string
+ :param user_name: The username of the new user
+
+ """
+ params = {'AccessKeyId' : access_key_id}
+ if user_name:
+ params['UserName'] = user_name
+ return self.get_response('DeleteAccessKey', params)
+
+ #
+ # Signing Certificates
+ #
+
+ def get_all_signing_certs(self, marker=None, max_items=None,
+ user_name=None):
+ """
+ Get all signing certificates associated with an account.
+
+ If the user_name is not specified, it is determined implicitly based
+ on the AWS Access Key ID used to sign the request.
+
+ :type marker: string
+ :param marker: Use this only when paginating results and only in
+ follow-up request after you've received a response
+ where the results are truncated. Set this to the
+ value of the Marker element in the response you
+ just received.
+
+ :type max_items: int
+ :param max_items: Use this only when paginating results to indicate
+ the maximum number of groups you want in the
+ response.
+
+ :type user_name: string
+ :param user_name: The username of the user
+
+ """
+ params = {}
+ if marker:
+ params['Marker'] = marker
+ if max_items:
+ params['MaxItems'] = max_items
+ if user_name:
+ params['UserName'] = user_name
+ return self.get_response('ListSigningCertificates',
+ params, list_marker='Certificates')
+
+ def update_signing_cert(self, cert_id, status, user_name=None):
+ """
+ Change the status of the specified signing certificate from
+ Active to Inactive or vice versa.
+
+ If the user_name is not specified, it is determined implicitly based
+ on the AWS Access Key ID used to sign the request.
+
+ :type cert_id: string
+ :param cert_id: The ID of the signing certificate
+
+ :type status: string
+ :param status: Either Active or Inactive.
+
+ :type user_name: string
+ :param user_name: The username of the user
+ """
+ params = {'CertificateId' : cert_id,
+ 'Status' : status}
+ if user_name:
+ params['UserName'] = user_name
+ return self.get_response('UpdateSigningCertificate', params)
+
+ def upload_signing_cert(self, cert_body, user_name=None):
+ """
+ Uploads an X.509 signing certificate and associates it with
+ the specified user.
+
+ If the user_name is not specified, it is determined implicitly based
+ on the AWS Access Key ID used to sign the request.
+
+ :type cert_body: string
+ :param cert_body: The body of the signing certificate.
+
+ :type user_name: string
+ :param user_name: The username of the new user
+
+ """
+ params = {'CertificateBody' : cert_body}
+ if user_name:
+ params['UserName'] = user_name
+ return self.get_response('UploadSigningCertificate', params,
+ verb='POST')
+
+ def delete_signing_cert(self, cert_id, user_name=None):
+ """
+ Delete a signing certificate associated with a user.
+
+ If the user_name is not specified, it is determined implicitly based
+ on the AWS Access Key ID used to sign the request.
+
+ :type user_name: string
+ :param user_name: The username of the new user
+
+ :type cert_id: string
+ :param cert_id: The ID of the certificate.
+
+ """
+ params = {'CertificateId' : cert_id}
+ if user_name:
+ params['UserName'] = user_name
+ return self.get_response('DeleteSigningCertificate', params)
+
+ #
+ # Server Certificates
+ #
+
+ def get_all_server_certs(self, path_prefix='/',
+ marker=None, max_items=None):
+ """
+ Lists the server certificates that have the specified path prefix.
+ If none exist, the action returns an empty list.
+
+ :type path_prefix: string
+ :param path_prefix: If provided, only certificates whose paths match
+ the provided prefix will be returned.
+
+ :type marker: string
+ :param marker: Use this only when paginating results and only in
+ follow-up request after you've received a response
+ where the results are truncated. Set this to the
+ value of the Marker element in the response you
+ just received.
+
+ :type max_items: int
+ :param max_items: Use this only when paginating results to indicate
+ the maximum number of groups you want in the
+ response.
+
+ """
+ params = {}
+ if path_prefix:
+ params['PathPrefix'] = path_prefix
+ if marker:
+ params['Marker'] = marker
+ if max_items:
+ params['MaxItems'] = max_items
+ return self.get_response('ListServerCertificates',
+ params,
+ list_marker='ServerCertificateMetadataList')
+
+ def update_server_cert(self, cert_name, new_cert_name=None,
+ new_path=None):
+ """
+ Updates the name and/or the path of the specified server certificate.
+
+ :type cert_name: string
+ :param cert_name: The name of the server certificate that you want
+ to update.
+
+ :type new_cert_name: string
+ :param new_cert_name: The new name for the server certificate.
+ Include this only if you are updating the
+ server certificate's name.
+
+ :type new_path: string
+ :param new_path: If provided, the path of the certificate will be
+ changed to this path.
+ """
+ params = {'ServerCertificateName' : cert_name}
+ if new_cert_name:
+ params['NewServerCertificateName'] = new_cert_name
+ if new_path:
+ params['NewPath'] = new_path
+ return self.get_response('UpdateServerCertificate', params)
+
+ def upload_server_cert(self, cert_name, cert_body, private_key,
+ cert_chain=None, path=None):
+ """
+ Uploads a server certificate entity for the AWS Account.
+ The server certificate entity includes a public key certificate,
+ a private key, and an optional certificate chain, which should
+ all be PEM-encoded.
+
+ :type cert_name: string
+ :param cert_name: The name for the server certificate. Do not
+ include the path in this value.
+
+ :type cert_body: string
+ :param cert_body: The contents of the public key certificate
+ in PEM-encoded format.
+
+ :type private_key: string
+ :param private_key: The contents of the private key in
+ PEM-encoded format.
+
+ :type cert_chain: string
+ :param cert_chain: The contents of the certificate chain. This
+ is typically a concatenation of the PEM-encoded
+ public key certificates of the chain.
+
+ :type path: string
+ :param path: The path for the server certificate.
+
+ """
+ params = {'ServerCertificateName' : cert_name,
+ 'CertificateBody' : cert_body,
+ 'PrivateKey' : private_key}
+ if cert_chain:
+ params['CertificateChain'] = cert_chain
+ if path:
+ params['Path'] = path
+ return self.get_response('UploadServerCertificate', params,
+ verb='POST')
+
+ def get_server_certificate(self, cert_name):
+ """
+ Retrieves information about the specified server certificate.
+
+ :type cert_name: string
+ :param cert_name: The name of the server certificate you want
+ to retrieve information about.
+
+ """
+ params = {'ServerCertificateName' : cert_name}
+ return self.get_response('GetServerCertificate', params)
+
+ def delete_server_cert(self, cert_name):
+ """
+ Delete the specified server certificate.
+
+ :type cert_name: string
+ :param cert_name: The name of the server certificate you want
+ to delete.
+
+ """
+ params = {'ServerCertificateName' : cert_name}
+ return self.get_response('DeleteServerCertificate', params)
+
+ #
+ # MFA Devices
+ #
+
+ def get_all_mfa_devices(self, user_name, marker=None, max_items=None):
+ """
+ Get all MFA devices associated with an account.
+
+ :type user_name: string
+ :param user_name: The username of the user
+
+ :type marker: string
+ :param marker: Use this only when paginating results and only in
+ follow-up request after you've received a response
+ where the results are truncated. Set this to the
+ value of the Marker element in the response you
+ just received.
+
+ :type max_items: int
+ :param max_items: Use this only when paginating results to indicate
+ the maximum number of groups you want in the
+ response.
+
+ """
+ params = {'UserName' : user_name}
+ if marker:
+ params['Marker'] = marker
+ if max_items:
+ params['MaxItems'] = max_items
+ return self.get_response('ListMFADevices',
+ params, list_marker='MFADevices')
+
+ def enable_mfa_device(self, user_name, serial_number,
+ auth_code_1, auth_code_2):
+ """
+ Enables the specified MFA device and associates it with the
+ specified user.
+
+ :type user_name: string
+ :param user_name: The username of the user
+
+ :type serial_number: string
+ :param seriasl_number: The serial number which uniquely identifies
+ the MFA device.
+
+ :type auth_code_1: string
+ :param auth_code_1: An authentication code emitted by the device.
+
+ :type auth_code_2: string
+ :param auth_code_2: A subsequent authentication code emitted
+ by the device.
+
+ """
+ params = {'UserName' : user_name,
+ 'SerialNumber' : serial_number,
+ 'AuthenticationCode1' : auth_code_1,
+ 'AuthenticationCode2' : auth_code_2}
+ return self.get_response('EnableMFADevice', params)
+
+ def deactivate_mfa_device(self, user_name, serial_number):
+ """
+ Deactivates the specified MFA device and removes it from
+ association with the user.
+
+ :type user_name: string
+ :param user_name: The username of the user
+
+ :type serial_number: string
+ :param seriasl_number: The serial number which uniquely identifies
+ the MFA device.
+
+ """
+ params = {'UserName' : user_name,
+ 'SerialNumber' : serial_number}
+ return self.get_response('DeactivateMFADevice', params)
+
+ def resync_mfa_device(self, user_name, serial_number,
+ auth_code_1, auth_code_2):
+ """
+ Syncronizes the specified MFA device with the AWS servers.
+
+ :type user_name: string
+ :param user_name: The username of the user
+
+ :type serial_number: string
+ :param seriasl_number: The serial number which uniquely identifies
+ the MFA device.
+
+ :type auth_code_1: string
+ :param auth_code_1: An authentication code emitted by the device.
+
+ :type auth_code_2: string
+ :param auth_code_2: A subsequent authentication code emitted
+ by the device.
+
+ """
+ params = {'UserName' : user_name,
+ 'SerialNumber' : serial_number,
+ 'AuthenticationCode1' : auth_code_1,
+ 'AuthenticationCode2' : auth_code_2}
+ return self.get_response('ResyncMFADevice', params)
+
+ #
+ # Login Profiles
+ #
+
+ def create_login_profile(self, user_name, password):
+ """
+ Creates a login profile for the specified user, give the user the
+ ability to access AWS services and the AWS Management Console.
+
+ :type user_name: string
+ :param user_name: The name of the new user
+
+ :type password: string
+ :param password: The new password for the user
+
+ """
+ params = {'UserName' : user_name,
+ 'Password' : password}
+ return self.get_response('CreateLoginProfile', params)
+
+ def delete_login_profile(self, user_name):
+ """
+ Deletes the login profile associated with the specified user.
+
+ :type user_name: string
+ :param user_name: The name of the user to delete.
+
+ """
+ params = {'UserName' : user_name}
+ return self.get_response('DeleteLoginProfile', params)
+
+ def update_login_profile(self, user_name, password):
+ """
+ Resets the password associated with the user's login profile.
+
+ :type user_name: string
+ :param user_name: The name of the user
+
+ :type password: string
+ :param password: The new password for the user
+
+ """
+ params = {'UserName' : user_name,
+ 'Password' : password}
+ return self.get_response('UpdateLoginProfile', params)
+
+ def create_account_alias(self, alias):
+ """
+ Creates a new alias for the AWS account.
+
+ For more information on account id aliases, please see
+ http://goo.gl/ToB7G
+
+ :type alias: string
+ :param alias: The alias to attach to the account.
+ """
+ params = {'AccountAlias': alias}
+ return self.get_response('CreateAccountAlias', params)
+
+ def delete_account_alias(self, alias):
+ """
+ Deletes an alias for the AWS account.
+
+ For more information on account id aliases, please see
+ http://goo.gl/ToB7G
+
+ :type alias: string
+ :param alias: The alias to remove from the account.
+ """
+ params = {'AccountAlias': alias}
+ return self.get_response('DeleteAccountAlias', params)
+
+ def get_account_alias(self):
+ """
+ Get the alias for the current account.
+
+ This is referred to in the docs as list_account_aliases,
+ but it seems you can only have one account alias currently.
+
+ For more information on account id aliases, please see
+ http://goo.gl/ToB7G
+ """
+ r = self.get_response('ListAccountAliases', {})
+ response = r.get('ListAccountAliasesResponse')
+ result = response.get('ListAccountAliasesResult')
+ aliases = result.get('AccountAliases')
+ return aliases.get('member', None)
+
+ def get_signin_url(self, service='ec2'):
+ """
+ Get the URL where IAM users can use their login profile to sign in
+ to this account's console.
+
+ :type service: string
+ :param service: Default service to go to in the console.
+ """
+ alias = self.get_account_alias()
+ if not alias:
+ raise Exception('No alias associated with this account. Please use iam.create_account_alias() first.')
+
+ return "https://%s.signin.aws.amazon.com/console/%s" % (alias, service)
diff --git a/boto/jsonresponse.py b/boto/jsonresponse.py
new file mode 100644
index 0000000..beb50ce
--- /dev/null
+++ b/boto/jsonresponse.py
@@ -0,0 +1,143 @@
+# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import xml.sax
+import utils
+
+class XmlHandler(xml.sax.ContentHandler):
+
+ def __init__(self, root_node, connection):
+ self.connection = connection
+ self.nodes = [('root', root_node)]
+ self.current_text = ''
+
+ def startElement(self, name, attrs):
+ self.current_text = ''
+ t = self.nodes[-1][1].startElement(name, attrs, self.connection)
+ if t != None:
+ if isinstance(t, tuple):
+ self.nodes.append(t)
+ else:
+ self.nodes.append((name, t))
+
+ def endElement(self, name):
+ self.nodes[-1][1].endElement(name, self.current_text, self.connection)
+ if self.nodes[-1][0] == name:
+ self.nodes.pop()
+ self.current_text = ''
+
+ def characters(self, content):
+ self.current_text += content
+
+ def parse(self, s):
+ xml.sax.parseString(s, self)
+
+class Element(dict):
+
+ def __init__(self, connection=None, element_name=None,
+ stack=None, parent=None, list_marker=('Set',),
+ item_marker=('member', 'item')):
+ dict.__init__(self)
+ self.connection = connection
+ self.element_name = element_name
+ self.list_marker = utils.mklist(list_marker)
+ self.item_marker = utils.mklist(item_marker)
+ if stack is None:
+ self.stack = []
+ else:
+ self.stack = stack
+ self.parent = parent
+
+ def __getattr__(self, key):
+ if key in self:
+ return self[key]
+ for k in self:
+ e = self[k]
+ if isinstance(e, Element):
+ try:
+ return getattr(e, key)
+ except AttributeError:
+ pass
+ raise AttributeError
+
+ def startElement(self, name, attrs, connection):
+ self.stack.append(name)
+ for lm in self.list_marker:
+ if name.endswith(lm):
+ l = ListElement(self.connection, name, self.list_marker,
+ self.item_marker)
+ self[name] = l
+ return l
+ if len(self.stack) > 0:
+ element_name = self.stack[-1]
+ e = Element(self.connection, element_name, self.stack, self,
+ self.list_marker, self.item_marker)
+ self[element_name] = e
+ return (element_name, e)
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if len(self.stack) > 0:
+ self.stack.pop()
+ value = value.strip()
+ if value:
+ if isinstance(self.parent, Element):
+ self.parent[name] = value
+ elif isinstance(self.parent, ListElement):
+ self.parent.append(value)
+
+class ListElement(list):
+
+ def __init__(self, connection=None, element_name=None,
+ list_marker=['Set'], item_marker=('member', 'item')):
+ list.__init__(self)
+ self.connection = connection
+ self.element_name = element_name
+ self.list_marker = list_marker
+ self.item_marker = item_marker
+
+ def startElement(self, name, attrs, connection):
+ for lm in self.list_marker:
+ if name.endswith(lm):
+ l = ListElement(self.connection, name, self.item_marker)
+ setattr(self, name, l)
+ return l
+ if name in self.item_marker:
+ e = Element(self.connection, name, parent=self)
+ self.append(e)
+ return e
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == self.element_name:
+ if len(self) > 0:
+ empty = []
+ for e in self:
+ if isinstance(e, Element):
+ if len(e) == 0:
+ empty.append(e)
+ for e in empty:
+ self.remove(e)
+ else:
+ setattr(self, name, value)
diff --git a/boto/manage/__init__.py b/boto/manage/__init__.py
new file mode 100644
index 0000000..49d029b
--- /dev/null
+++ b/boto/manage/__init__.py
@@ -0,0 +1,23 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+
diff --git a/boto/manage/cmdshell.py b/boto/manage/cmdshell.py
new file mode 100644
index 0000000..cbd2e60
--- /dev/null
+++ b/boto/manage/cmdshell.py
@@ -0,0 +1,174 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.mashups.interactive import interactive_shell
+import boto
+import os
+import time
+import shutil
+import StringIO
+import paramiko
+import socket
+import subprocess
+
+
+class SSHClient(object):
+
+ def __init__(self, server,
+ host_key_file='~/.ssh/known_hosts',
+ uname='root', ssh_pwd=None):
+ self.server = server
+ self.host_key_file = host_key_file
+ self.uname = uname
+ self._pkey = paramiko.RSAKey.from_private_key_file(server.ssh_key_file,
+ password=ssh_pwd)
+ self._ssh_client = paramiko.SSHClient()
+ self._ssh_client.load_system_host_keys()
+ self._ssh_client.load_host_keys(os.path.expanduser(host_key_file))
+ self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ self.connect()
+
+ def connect(self):
+ retry = 0
+ while retry < 5:
+ try:
+ self._ssh_client.connect(self.server.hostname,
+ username=self.uname,
+ pkey=self._pkey)
+ return
+ except socket.error, (value,message):
+ if value == 61 or value == 111:
+ print 'SSH Connection refused, will retry in 5 seconds'
+ time.sleep(5)
+ retry += 1
+ else:
+ raise
+ except paramiko.BadHostKeyException:
+ print "%s has an entry in ~/.ssh/known_hosts and it doesn't match" % self.server.hostname
+ print 'Edit that file to remove the entry and then hit return to try again'
+ raw_input('Hit Enter when ready')
+ retry += 1
+ except EOFError:
+ print 'Unexpected Error from SSH Connection, retry in 5 seconds'
+ time.sleep(5)
+ retry += 1
+ print 'Could not establish SSH connection'
+
+ def get_file(self, src, dst):
+ sftp_client = self._ssh_client.open_sftp()
+ sftp_client.get(src, dst)
+
+ def put_file(self, src, dst):
+ sftp_client = self._ssh_client.open_sftp()
+ sftp_client.put(src, dst)
+
+ def listdir(self, path):
+ sftp_client = self._ssh_client.open_sftp()
+ return sftp_client.listdir(path)
+
+ def open_sftp(self):
+ return self._ssh_client.open_sftp()
+
+ def isdir(self, path):
+ status = self.run('[ -d %s ] || echo "FALSE"' % path)
+ if status[1].startswith('FALSE'):
+ return 0
+ return 1
+
+ def exists(self, path):
+ status = self.run('[ -a %s ] || echo "FALSE"' % path)
+ if status[1].startswith('FALSE'):
+ return 0
+ return 1
+
+ def shell(self):
+ channel = self._ssh_client.invoke_shell()
+ interactive_shell(channel)
+
+ def run(self, command):
+ boto.log.info('running:%s on %s' % (command, self.server.instance_id))
+ log_fp = StringIO.StringIO()
+ status = 0
+ try:
+ t = self._ssh_client.exec_command(command)
+ except paramiko.SSHException:
+ status = 1
+ log_fp.write(t[1].read())
+ log_fp.write(t[2].read())
+ t[0].close()
+ t[1].close()
+ t[2].close()
+ boto.log.info('output: %s' % log_fp.getvalue())
+ return (status, log_fp.getvalue())
+
+ def close(self):
+ transport = self._ssh_client.get_transport()
+ transport.close()
+ self.server.reset_cmdshell()
+
+class LocalClient(object):
+
+ def __init__(self, server, host_key_file=None, uname='root'):
+ self.server = server
+ self.host_key_file = host_key_file
+ self.uname = uname
+
+ def get_file(self, src, dst):
+ shutil.copyfile(src, dst)
+
+ def put_file(self, src, dst):
+ shutil.copyfile(src, dst)
+
+ def listdir(self, path):
+ return os.listdir(path)
+
+ def isdir(self, path):
+ return os.path.isdir(path)
+
+ def exists(self, path):
+ return os.path.exists(path)
+
+ def shell(self):
+ raise NotImplementedError, 'shell not supported with LocalClient'
+
+ def run(self):
+ boto.log.info('running:%s' % self.command)
+ log_fp = StringIO.StringIO()
+ process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ while process.poll() == None:
+ time.sleep(1)
+ t = process.communicate()
+ log_fp.write(t[0])
+ log_fp.write(t[1])
+ boto.log.info(log_fp.getvalue())
+ boto.log.info('output: %s' % log_fp.getvalue())
+ return (process.returncode, log_fp.getvalue())
+
+ def close(self):
+ pass
+
+def start(server):
+ instance_id = boto.config.get('Instance', 'instance-id', None)
+ if instance_id == server.instance_id:
+ return LocalClient(server)
+ else:
+ return SSHClient(server)
diff --git a/boto/manage/propget.py b/boto/manage/propget.py
new file mode 100644
index 0000000..45b2ff2
--- /dev/null
+++ b/boto/manage/propget.py
@@ -0,0 +1,64 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+def get(prop, choices=None):
+ prompt = prop.verbose_name
+ if not prompt:
+ prompt = prop.name
+ if choices:
+ if callable(choices):
+ choices = choices()
+ else:
+ choices = prop.get_choices()
+ valid = False
+ while not valid:
+ if choices:
+ min = 1
+ max = len(choices)
+ for i in range(min, max+1):
+ value = choices[i-1]
+ if isinstance(value, tuple):
+ value = value[0]
+ print '[%d] %s' % (i, value)
+ value = raw_input('%s [%d-%d]: ' % (prompt, min, max))
+ try:
+ int_value = int(value)
+ value = choices[int_value-1]
+ if isinstance(value, tuple):
+ value = value[1]
+ valid = True
+ except ValueError:
+ print '%s is not a valid choice' % value
+ except IndexError:
+ print '%s is not within the range[%d-%d]' % (min, max)
+ else:
+ value = raw_input('%s: ' % prompt)
+ try:
+ value = prop.validate(value)
+ if prop.empty(value) and prop.required:
+ print 'A value is required'
+ else:
+ valid = True
+ except:
+ print 'Invalid value: %s' % value
+ return value
+
diff --git a/boto/manage/server.py b/boto/manage/server.py
new file mode 100644
index 0000000..3c7a303
--- /dev/null
+++ b/boto/manage/server.py
@@ -0,0 +1,556 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010 Chris Moyer http://coredumped.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+High-level abstraction of an EC2 server
+"""
+from __future__ import with_statement
+import boto.ec2
+from boto.mashups.iobject import IObject
+from boto.pyami.config import BotoConfigPath, Config
+from boto.sdb.db.model import Model
+from boto.sdb.db.property import StringProperty, IntegerProperty, BooleanProperty, CalculatedProperty
+from boto.manage import propget
+from boto.ec2.zone import Zone
+from boto.ec2.keypair import KeyPair
+import os, time, StringIO
+from contextlib import closing
+from boto.exception import EC2ResponseError
+
+InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge',
+ 'c1.medium', 'c1.xlarge',
+ 'm2.2xlarge', 'm2.4xlarge']
+
+class Bundler(object):
+
+ def __init__(self, server, uname='root'):
+ from boto.manage.cmdshell import SSHClient
+ self.server = server
+ self.uname = uname
+ self.ssh_client = SSHClient(server, uname=uname)
+
+ def copy_x509(self, key_file, cert_file):
+ print '\tcopying cert and pk over to /mnt directory on server'
+ self.ssh_client.open_sftp()
+ path, name = os.path.split(key_file)
+ self.remote_key_file = '/mnt/%s' % name
+ self.ssh_client.put_file(key_file, self.remote_key_file)
+ path, name = os.path.split(cert_file)
+ self.remote_cert_file = '/mnt/%s' % name
+ self.ssh_client.put_file(cert_file, self.remote_cert_file)
+ print '...complete!'
+
+ def bundle_image(self, prefix, size, ssh_key):
+ command = ""
+ if self.uname != 'root':
+ command = "sudo "
+ command += 'ec2-bundle-vol '
+ command += '-c %s -k %s ' % (self.remote_cert_file, self.remote_key_file)
+ command += '-u %s ' % self.server._reservation.owner_id
+ command += '-p %s ' % prefix
+ command += '-s %d ' % size
+ command += '-d /mnt '
+ if self.server.instance_type == 'm1.small' or self.server.instance_type == 'c1.medium':
+ command += '-r i386'
+ else:
+ command += '-r x86_64'
+ return command
+
+ def upload_bundle(self, bucket, prefix, ssh_key):
+ command = ""
+ if self.uname != 'root':
+ command = "sudo "
+ command += 'ec2-upload-bundle '
+ command += '-m /mnt/%s.manifest.xml ' % prefix
+ command += '-b %s ' % bucket
+ command += '-a %s ' % self.server.ec2.aws_access_key_id
+ command += '-s %s ' % self.server.ec2.aws_secret_access_key
+ return command
+
+ def bundle(self, bucket=None, prefix=None, key_file=None, cert_file=None,
+ size=None, ssh_key=None, fp=None, clear_history=True):
+ iobject = IObject()
+ if not bucket:
+ bucket = iobject.get_string('Name of S3 bucket')
+ if not prefix:
+ prefix = iobject.get_string('Prefix for AMI file')
+ if not key_file:
+ key_file = iobject.get_filename('Path to RSA private key file')
+ if not cert_file:
+ cert_file = iobject.get_filename('Path to RSA public cert file')
+ if not size:
+ size = iobject.get_int('Size (in MB) of bundled image')
+ if not ssh_key:
+ ssh_key = self.server.get_ssh_key_file()
+ self.copy_x509(key_file, cert_file)
+ if not fp:
+ fp = StringIO.StringIO()
+ fp.write('sudo mv %s /mnt/boto.cfg; ' % BotoConfigPath)
+ fp.write('mv ~/.ssh/authorized_keys /mnt/authorized_keys; ')
+ if clear_history:
+ fp.write('history -c; ')
+ fp.write(self.bundle_image(prefix, size, ssh_key))
+ fp.write('; ')
+ fp.write(self.upload_bundle(bucket, prefix, ssh_key))
+ fp.write('; ')
+ fp.write('sudo mv /mnt/boto.cfg %s; ' % BotoConfigPath)
+ fp.write('mv /mnt/authorized_keys ~/.ssh/authorized_keys')
+ command = fp.getvalue()
+ print 'running the following command on the remote server:'
+ print command
+ t = self.ssh_client.run(command)
+ print '\t%s' % t[0]
+ print '\t%s' % t[1]
+ print '...complete!'
+ print 'registering image...'
+ self.image_id = self.server.ec2.register_image(name=prefix, image_location='%s/%s.manifest.xml' % (bucket, prefix))
+ return self.image_id
+
+class CommandLineGetter(object):
+
+ def get_ami_list(self):
+ my_amis = []
+ for ami in self.ec2.get_all_images():
+ # hack alert, need a better way to do this!
+ if ami.location.find('pyami') >= 0:
+ my_amis.append((ami.location, ami))
+ return my_amis
+
+ def get_region(self, params):
+ region = params.get('region', None)
+ if isinstance(region, str) or isinstance(region, unicode):
+ region = boto.ec2.get_region(region)
+ params['region'] = region
+ if not region:
+ prop = self.cls.find_property('region_name')
+ params['region'] = propget.get(prop, choices=boto.ec2.regions)
+ self.ec2 = params['region'].connect()
+
+ def get_name(self, params):
+ if not params.get('name', None):
+ prop = self.cls.find_property('name')
+ params['name'] = propget.get(prop)
+
+ def get_description(self, params):
+ if not params.get('description', None):
+ prop = self.cls.find_property('description')
+ params['description'] = propget.get(prop)
+
+ def get_instance_type(self, params):
+ if not params.get('instance_type', None):
+ prop = StringProperty(name='instance_type', verbose_name='Instance Type',
+ choices=InstanceTypes)
+ params['instance_type'] = propget.get(prop)
+
+ def get_quantity(self, params):
+ if not params.get('quantity', None):
+ prop = IntegerProperty(name='quantity', verbose_name='Number of Instances')
+ params['quantity'] = propget.get(prop)
+
+ def get_zone(self, params):
+ if not params.get('zone', None):
+ prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone',
+ choices=self.ec2.get_all_zones)
+ params['zone'] = propget.get(prop)
+
+ def get_ami_id(self, params):
+ valid = False
+ while not valid:
+ ami = params.get('ami', None)
+ if not ami:
+ prop = StringProperty(name='ami', verbose_name='AMI')
+ ami = propget.get(prop)
+ try:
+ rs = self.ec2.get_all_images([ami])
+ if len(rs) == 1:
+ valid = True
+ params['ami'] = rs[0]
+ except EC2ResponseError:
+ pass
+
+ def get_group(self, params):
+ group = params.get('group', None)
+ if isinstance(group, str) or isinstance(group, unicode):
+ group_list = self.ec2.get_all_security_groups()
+ for g in group_list:
+ if g.name == group:
+ group = g
+ params['group'] = g
+ if not group:
+ prop = StringProperty(name='group', verbose_name='EC2 Security Group',
+ choices=self.ec2.get_all_security_groups)
+ params['group'] = propget.get(prop)
+
+ def get_key(self, params):
+ keypair = params.get('keypair', None)
+ if isinstance(keypair, str) or isinstance(keypair, unicode):
+ key_list = self.ec2.get_all_key_pairs()
+ for k in key_list:
+ if k.name == keypair:
+ keypair = k.name
+ params['keypair'] = k.name
+ if not keypair:
+ prop = StringProperty(name='keypair', verbose_name='EC2 KeyPair',
+ choices=self.ec2.get_all_key_pairs)
+ params['keypair'] = propget.get(prop).name
+
+ def get(self, cls, params):
+ self.cls = cls
+ self.get_region(params)
+ self.ec2 = params['region'].connect()
+ self.get_name(params)
+ self.get_description(params)
+ self.get_instance_type(params)
+ self.get_zone(params)
+ self.get_quantity(params)
+ self.get_ami_id(params)
+ self.get_group(params)
+ self.get_key(params)
+
+class Server(Model):
+
+ #
+ # The properties of this object consists of real properties for data that
+ # is not already stored in EC2 somewhere (e.g. name, description) plus
+ # calculated properties for all of the properties that are already in
+ # EC2 (e.g. hostname, security groups, etc.)
+ #
+ name = StringProperty(unique=True, verbose_name="Name")
+ description = StringProperty(verbose_name="Description")
+ region_name = StringProperty(verbose_name="EC2 Region Name")
+ instance_id = StringProperty(verbose_name="EC2 Instance ID")
+ elastic_ip = StringProperty(verbose_name="EC2 Elastic IP Address")
+ production = BooleanProperty(verbose_name="Is This Server Production", default=False)
+ ami_id = CalculatedProperty(verbose_name="AMI ID", calculated_type=str, use_method=True)
+ zone = CalculatedProperty(verbose_name="Availability Zone Name", calculated_type=str, use_method=True)
+ hostname = CalculatedProperty(verbose_name="Public DNS Name", calculated_type=str, use_method=True)
+ private_hostname = CalculatedProperty(verbose_name="Private DNS Name", calculated_type=str, use_method=True)
+ groups = CalculatedProperty(verbose_name="Security Groups", calculated_type=list, use_method=True)
+ security_group = CalculatedProperty(verbose_name="Primary Security Group Name", calculated_type=str, use_method=True)
+ key_name = CalculatedProperty(verbose_name="Key Name", calculated_type=str, use_method=True)
+ instance_type = CalculatedProperty(verbose_name="Instance Type", calculated_type=str, use_method=True)
+ status = CalculatedProperty(verbose_name="Current Status", calculated_type=str, use_method=True)
+ launch_time = CalculatedProperty(verbose_name="Server Launch Time", calculated_type=str, use_method=True)
+ console_output = CalculatedProperty(verbose_name="Console Output", calculated_type=file, use_method=True)
+
+ packages = []
+ plugins = []
+
+ @classmethod
+ def add_credentials(cls, cfg, aws_access_key_id, aws_secret_access_key):
+ if not cfg.has_section('Credentials'):
+ cfg.add_section('Credentials')
+ cfg.set('Credentials', 'aws_access_key_id', aws_access_key_id)
+ cfg.set('Credentials', 'aws_secret_access_key', aws_secret_access_key)
+ if not cfg.has_section('DB_Server'):
+ cfg.add_section('DB_Server')
+ cfg.set('DB_Server', 'db_type', 'SimpleDB')
+ cfg.set('DB_Server', 'db_name', cls._manager.domain.name)
+
+ @classmethod
+ def create(cls, config_file=None, logical_volume = None, cfg = None, **params):
+ """
+ Create a new instance based on the specified configuration file or the specified
+ configuration and the passed in parameters.
+
+ If the config_file argument is not None, the configuration is read from there.
+ Otherwise, the cfg argument is used.
+
+ The config file may include other config files with a #import reference. The included
+ config files must reside in the same directory as the specified file.
+
+ The logical_volume argument, if supplied, will be used to get the current physical
+ volume ID and use that as an override of the value specified in the config file. This
+ may be useful for debugging purposes when you want to debug with a production config
+ file but a test Volume.
+
+ The dictionary argument may be used to override any EC2 configuration values in the
+ config file.
+ """
+ if config_file:
+ cfg = Config(path=config_file)
+ if cfg.has_section('EC2'):
+ # include any EC2 configuration values that aren't specified in params:
+ for option in cfg.options('EC2'):
+ if option not in params:
+ params[option] = cfg.get('EC2', option)
+ getter = CommandLineGetter()
+ getter.get(cls, params)
+ region = params.get('region')
+ ec2 = region.connect()
+ cls.add_credentials(cfg, ec2.aws_access_key_id, ec2.aws_secret_access_key)
+ ami = params.get('ami')
+ kp = params.get('keypair')
+ group = params.get('group')
+ zone = params.get('zone')
+ # deal with possibly passed in logical volume:
+ if logical_volume != None:
+ cfg.set('EBS', 'logical_volume_name', logical_volume.name)
+ cfg_fp = StringIO.StringIO()
+ cfg.write(cfg_fp)
+ # deal with the possibility that zone and/or keypair are strings read from the config file:
+ if isinstance(zone, Zone):
+ zone = zone.name
+ if isinstance(kp, KeyPair):
+ kp = kp.name
+ reservation = ami.run(min_count=1,
+ max_count=params.get('quantity', 1),
+ key_name=kp,
+ security_groups=[group],
+ instance_type=params.get('instance_type'),
+ placement = zone,
+ user_data = cfg_fp.getvalue())
+ l = []
+ i = 0
+ elastic_ip = params.get('elastic_ip')
+ instances = reservation.instances
+ if elastic_ip != None and instances.__len__() > 0:
+ instance = instances[0]
+ print 'Waiting for instance to start so we can set its elastic IP address...'
+ # Sometimes we get a message from ec2 that says that the instance does not exist.
+ # Hopefully the following delay will giv eec2 enough time to get to a stable state:
+ time.sleep(5)
+ while instance.update() != 'running':
+ time.sleep(1)
+ instance.use_ip(elastic_ip)
+ print 'set the elastic IP of the first instance to %s' % elastic_ip
+ for instance in instances:
+ s = cls()
+ s.ec2 = ec2
+ s.name = params.get('name') + '' if i==0 else str(i)
+ s.description = params.get('description')
+ s.region_name = region.name
+ s.instance_id = instance.id
+ if elastic_ip and i == 0:
+ s.elastic_ip = elastic_ip
+ s.put()
+ l.append(s)
+ i += 1
+ return l
+
+ @classmethod
+ def create_from_instance_id(cls, instance_id, name, description=''):
+ regions = boto.ec2.regions()
+ for region in regions:
+ ec2 = region.connect()
+ try:
+ rs = ec2.get_all_instances([instance_id])
+ except:
+ rs = []
+ if len(rs) == 1:
+ s = cls()
+ s.ec2 = ec2
+ s.name = name
+ s.description = description
+ s.region_name = region.name
+ s.instance_id = instance_id
+ s._reservation = rs[0]
+ for instance in s._reservation.instances:
+ if instance.id == instance_id:
+ s._instance = instance
+ s.put()
+ return s
+ return None
+
+ @classmethod
+ def create_from_current_instances(cls):
+ servers = []
+ regions = boto.ec2.regions()
+ for region in regions:
+ ec2 = region.connect()
+ rs = ec2.get_all_instances()
+ for reservation in rs:
+ for instance in reservation.instances:
+ try:
+ Server.find(instance_id=instance.id).next()
+ boto.log.info('Server for %s already exists' % instance.id)
+ except StopIteration:
+ s = cls()
+ s.ec2 = ec2
+ s.name = instance.id
+ s.region_name = region.name
+ s.instance_id = instance.id
+ s._reservation = reservation
+ s.put()
+ servers.append(s)
+ return servers
+
+ def __init__(self, id=None, **kw):
+ Model.__init__(self, id, **kw)
+ self.ssh_key_file = None
+ self.ec2 = None
+ self._cmdshell = None
+ self._reservation = None
+ self._instance = None
+ self._setup_ec2()
+
+ def _setup_ec2(self):
+ if self.ec2 and self._instance and self._reservation:
+ return
+ if self.id:
+ if self.region_name:
+ for region in boto.ec2.regions():
+ if region.name == self.region_name:
+ self.ec2 = region.connect()
+ if self.instance_id and not self._instance:
+ try:
+ rs = self.ec2.get_all_instances([self.instance_id])
+ if len(rs) >= 1:
+ for instance in rs[0].instances:
+ if instance.id == self.instance_id:
+ self._reservation = rs[0]
+ self._instance = instance
+ except EC2ResponseError:
+ pass
+
+ def _status(self):
+ status = ''
+ if self._instance:
+ self._instance.update()
+ status = self._instance.state
+ return status
+
+ def _hostname(self):
+ hostname = ''
+ if self._instance:
+ hostname = self._instance.public_dns_name
+ return hostname
+
+ def _private_hostname(self):
+ hostname = ''
+ if self._instance:
+ hostname = self._instance.private_dns_name
+ return hostname
+
+ def _instance_type(self):
+ it = ''
+ if self._instance:
+ it = self._instance.instance_type
+ return it
+
+ def _launch_time(self):
+ lt = ''
+ if self._instance:
+ lt = self._instance.launch_time
+ return lt
+
+ def _console_output(self):
+ co = ''
+ if self._instance:
+ co = self._instance.get_console_output()
+ return co
+
+ def _groups(self):
+ gn = []
+ if self._reservation:
+ gn = self._reservation.groups
+ return gn
+
+ def _security_group(self):
+ groups = self._groups()
+ if len(groups) >= 1:
+ return groups[0].id
+ return ""
+
+ def _zone(self):
+ zone = None
+ if self._instance:
+ zone = self._instance.placement
+ return zone
+
+ def _key_name(self):
+ kn = None
+ if self._instance:
+ kn = self._instance.key_name
+ return kn
+
+ def put(self):
+ Model.put(self)
+ self._setup_ec2()
+
+ def delete(self):
+ if self.production:
+ raise ValueError, "Can't delete a production server"
+ #self.stop()
+ Model.delete(self)
+
+ def stop(self):
+ if self.production:
+ raise ValueError, "Can't delete a production server"
+ if self._instance:
+ self._instance.stop()
+
+ def terminate(self):
+ if self.production:
+ raise ValueError, "Can't delete a production server"
+ if self._instance:
+ self._instance.terminate()
+
+ def reboot(self):
+ if self._instance:
+ self._instance.reboot()
+
+ def wait(self):
+ while self.status != 'running':
+ time.sleep(5)
+
+ def get_ssh_key_file(self):
+ if not self.ssh_key_file:
+ ssh_dir = os.path.expanduser('~/.ssh')
+ if os.path.isdir(ssh_dir):
+ ssh_file = os.path.join(ssh_dir, '%s.pem' % self.key_name)
+ if os.path.isfile(ssh_file):
+ self.ssh_key_file = ssh_file
+ if not self.ssh_key_file:
+ iobject = IObject()
+ self.ssh_key_file = iobject.get_filename('Path to OpenSSH Key file')
+ return self.ssh_key_file
+
+ def get_cmdshell(self):
+ if not self._cmdshell:
+ import cmdshell
+ self.get_ssh_key_file()
+ self._cmdshell = cmdshell.start(self)
+ return self._cmdshell
+
+ def reset_cmdshell(self):
+ self._cmdshell = None
+
+ def run(self, command):
+ with closing(self.get_cmdshell()) as cmd:
+ status = cmd.run(command)
+ return status
+
+ def get_bundler(self, uname='root'):
+ self.get_ssh_key_file()
+ return Bundler(self, uname)
+
+ def get_ssh_client(self, uname='root', ssh_pwd=None):
+ from boto.manage.cmdshell import SSHClient
+ self.get_ssh_key_file()
+ return SSHClient(self, uname=uname, ssh_pwd=ssh_pwd)
+
+ def install(self, pkg):
+ return self.run('apt-get -y install %s' % pkg)
+
+
+
diff --git a/boto/manage/task.py b/boto/manage/task.py
new file mode 100644
index 0000000..2f9d7d0
--- /dev/null
+++ b/boto/manage/task.py
@@ -0,0 +1,175 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import boto
+from boto.sdb.db.property import StringProperty, DateTimeProperty, IntegerProperty
+from boto.sdb.db.model import Model
+import datetime, subprocess, StringIO, time
+
+def check_hour(val):
+ if val == '*':
+ return
+ if int(val) < 0 or int(val) > 23:
+ raise ValueError
+
+class Task(Model):
+
+ """
+ A scheduled, repeating task that can be executed by any participating servers.
+ The scheduling is similar to cron jobs. Each task has an hour attribute.
+ The allowable values for hour are [0-23|*].
+
+ To keep the operation reasonably efficient and not cause excessive polling,
+ the minimum granularity of a Task is hourly. Some examples:
+
+ hour='*' - the task would be executed each hour
+ hour='3' - the task would be executed at 3AM GMT each day.
+
+ """
+ name = StringProperty()
+ hour = StringProperty(required=True, validator=check_hour, default='*')
+ command = StringProperty(required=True)
+ last_executed = DateTimeProperty()
+ last_status = IntegerProperty()
+ last_output = StringProperty()
+ message_id = StringProperty()
+
+ @classmethod
+ def start_all(cls, queue_name):
+ for task in cls.all():
+ task.start(queue_name)
+
+ def __init__(self, id=None, **kw):
+ Model.__init__(self, id, **kw)
+ self.hourly = self.hour == '*'
+ self.daily = self.hour != '*'
+ self.now = datetime.datetime.utcnow()
+
+ def check(self):
+ """
+ Determine how long until the next scheduled time for a Task.
+ Returns the number of seconds until the next scheduled time or zero
+ if the task needs to be run immediately.
+ If it's an hourly task and it's never been run, run it now.
+ If it's a daily task and it's never been run and the hour is right, run it now.
+ """
+ boto.log.info('checking Task[%s]-now=%s, last=%s' % (self.name, self.now, self.last_executed))
+
+ if self.hourly and not self.last_executed:
+ return 0
+
+ if self.daily and not self.last_executed:
+ if int(self.hour) == self.now.hour:
+ return 0
+ else:
+ return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60
+
+ delta = self.now - self.last_executed
+ if self.hourly:
+ if delta.seconds >= 60*60:
+ return 0
+ else:
+ return 60*60 - delta.seconds
+ else:
+ if int(self.hour) == self.now.hour:
+ if delta.days >= 1:
+ return 0
+ else:
+ return 82800 # 23 hours, just to be safe
+ else:
+ return max( (int(self.hour)-self.now.hour), (self.now.hour-int(self.hour)) )*60*60
+
+ def _run(self, msg, vtimeout):
+ boto.log.info('Task[%s] - running:%s' % (self.name, self.command))
+ log_fp = StringIO.StringIO()
+ process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ nsecs = 5
+ current_timeout = vtimeout
+ while process.poll() == None:
+ boto.log.info('nsecs=%s, timeout=%s' % (nsecs, current_timeout))
+ if nsecs >= current_timeout:
+ current_timeout += vtimeout
+ boto.log.info('Task[%s] - setting timeout to %d seconds' % (self.name, current_timeout))
+ if msg:
+ msg.change_visibility(current_timeout)
+ time.sleep(5)
+ nsecs += 5
+ t = process.communicate()
+ log_fp.write(t[0])
+ log_fp.write(t[1])
+ boto.log.info('Task[%s] - output: %s' % (self.name, log_fp.getvalue()))
+ self.last_executed = self.now
+ self.last_status = process.returncode
+ self.last_output = log_fp.getvalue()[0:1023]
+
+ def run(self, msg, vtimeout=60):
+ delay = self.check()
+ boto.log.info('Task[%s] - delay=%s seconds' % (self.name, delay))
+ if delay == 0:
+ self._run(msg, vtimeout)
+ queue = msg.queue
+ new_msg = queue.new_message(self.id)
+ new_msg = queue.write(new_msg)
+ self.message_id = new_msg.id
+ self.put()
+ boto.log.info('Task[%s] - new message id=%s' % (self.name, new_msg.id))
+ msg.delete()
+ boto.log.info('Task[%s] - deleted message %s' % (self.name, msg.id))
+ else:
+ boto.log.info('new_vtimeout: %d' % delay)
+ msg.change_visibility(delay)
+
+ def start(self, queue_name):
+ boto.log.info('Task[%s] - starting with queue: %s' % (self.name, queue_name))
+ queue = boto.lookup('sqs', queue_name)
+ msg = queue.new_message(self.id)
+ msg = queue.write(msg)
+ self.message_id = msg.id
+ self.put()
+ boto.log.info('Task[%s] - start successful' % self.name)
+
+class TaskPoller(object):
+
+ def __init__(self, queue_name):
+ self.sqs = boto.connect_sqs()
+ self.queue = self.sqs.lookup(queue_name)
+
+ def poll(self, wait=60, vtimeout=60):
+ while 1:
+ m = self.queue.read(vtimeout)
+ if m:
+ task = Task.get_by_id(m.get_body())
+ if task:
+ if not task.message_id or m.id == task.message_id:
+ boto.log.info('Task[%s] - read message %s' % (task.name, m.id))
+ task.run(m, vtimeout)
+ else:
+ boto.log.info('Task[%s] - found extraneous message, ignoring' % task.name)
+ else:
+ time.sleep(wait)
+
+
+
+
+
+
diff --git a/boto/manage/test_manage.py b/boto/manage/test_manage.py
new file mode 100644
index 0000000..e0b032a
--- /dev/null
+++ b/boto/manage/test_manage.py
@@ -0,0 +1,34 @@
+from boto.manage.server import Server
+from boto.manage.volume import Volume
+import time
+
+print '--> Creating New Volume'
+volume = Volume.create()
+print volume
+
+print '--> Creating New Server'
+server_list = Server.create()
+server = server_list[0]
+print server
+
+print '----> Waiting for Server to start up'
+while server.status != 'running':
+ print '*'
+ time.sleep(10)
+print '----> Server is running'
+
+print '--> Run "df -k" on Server'
+status = server.run('df -k')
+print status[1]
+
+print '--> Now run volume.make_ready to make the volume ready to use on server'
+volume.make_ready(server)
+
+print '--> Run "df -k" on Server'
+status = server.run('df -k')
+print status[1]
+
+print '--> Do an "ls -al" on the new filesystem'
+status = server.run('ls -al %s' % volume.mount_point)
+print status[1]
+
diff --git a/boto/manage/volume.py b/boto/manage/volume.py
new file mode 100644
index 0000000..66a458f
--- /dev/null
+++ b/boto/manage/volume.py
@@ -0,0 +1,420 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from __future__ import with_statement
+from boto.sdb.db.model import Model
+from boto.sdb.db.property import StringProperty, IntegerProperty, ListProperty, ReferenceProperty, CalculatedProperty
+from boto.manage.server import Server
+from boto.manage import propget
+import boto.ec2
+import time
+import traceback
+from contextlib import closing
+import dateutil.parser
+import datetime
+
+
+class CommandLineGetter(object):
+
+ def get_region(self, params):
+ if not params.get('region', None):
+ prop = self.cls.find_property('region_name')
+ params['region'] = propget.get(prop, choices=boto.ec2.regions)
+
+ def get_zone(self, params):
+ if not params.get('zone', None):
+ prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone',
+ choices=self.ec2.get_all_zones)
+ params['zone'] = propget.get(prop)
+
+ def get_name(self, params):
+ if not params.get('name', None):
+ prop = self.cls.find_property('name')
+ params['name'] = propget.get(prop)
+
+ def get_size(self, params):
+ if not params.get('size', None):
+ prop = IntegerProperty(name='size', verbose_name='Size (GB)')
+ params['size'] = propget.get(prop)
+
+ def get_mount_point(self, params):
+ if not params.get('mount_point', None):
+ prop = self.cls.find_property('mount_point')
+ params['mount_point'] = propget.get(prop)
+
+ def get_device(self, params):
+ if not params.get('device', None):
+ prop = self.cls.find_property('device')
+ params['device'] = propget.get(prop)
+
+ def get(self, cls, params):
+ self.cls = cls
+ self.get_region(params)
+ self.ec2 = params['region'].connect()
+ self.get_zone(params)
+ self.get_name(params)
+ self.get_size(params)
+ self.get_mount_point(params)
+ self.get_device(params)
+
+class Volume(Model):
+
+ name = StringProperty(required=True, unique=True, verbose_name='Name')
+ region_name = StringProperty(required=True, verbose_name='EC2 Region')
+ zone_name = StringProperty(required=True, verbose_name='EC2 Zone')
+ mount_point = StringProperty(verbose_name='Mount Point')
+ device = StringProperty(verbose_name="Device Name", default='/dev/sdp')
+ volume_id = StringProperty(required=True)
+ past_volume_ids = ListProperty(item_type=str)
+ server = ReferenceProperty(Server, collection_name='volumes',
+ verbose_name='Server Attached To')
+ volume_state = CalculatedProperty(verbose_name="Volume State",
+ calculated_type=str, use_method=True)
+ attachment_state = CalculatedProperty(verbose_name="Attachment State",
+ calculated_type=str, use_method=True)
+ size = CalculatedProperty(verbose_name="Size (GB)",
+ calculated_type=int, use_method=True)
+
+ @classmethod
+ def create(cls, **params):
+ getter = CommandLineGetter()
+ getter.get(cls, params)
+ region = params.get('region')
+ ec2 = region.connect()
+ zone = params.get('zone')
+ size = params.get('size')
+ ebs_volume = ec2.create_volume(size, zone.name)
+ v = cls()
+ v.ec2 = ec2
+ v.volume_id = ebs_volume.id
+ v.name = params.get('name')
+ v.mount_point = params.get('mount_point')
+ v.device = params.get('device')
+ v.region_name = region.name
+ v.zone_name = zone.name
+ v.put()
+ return v
+
+ @classmethod
+ def create_from_volume_id(cls, region_name, volume_id, name):
+ vol = None
+ ec2 = boto.ec2.connect_to_region(region_name)
+ rs = ec2.get_all_volumes([volume_id])
+ if len(rs) == 1:
+ v = rs[0]
+ vol = cls()
+ vol.volume_id = v.id
+ vol.name = name
+ vol.region_name = v.region.name
+ vol.zone_name = v.zone
+ vol.put()
+ return vol
+
+ def create_from_latest_snapshot(self, name, size=None):
+ snapshot = self.get_snapshots()[-1]
+ return self.create_from_snapshot(name, snapshot, size)
+
+ def create_from_snapshot(self, name, snapshot, size=None):
+ if size < self.size:
+ size = self.size
+ ec2 = self.get_ec2_connection()
+ if self.zone_name == None or self.zone_name == '':
+ # deal with the migration case where the zone is not set in the logical volume:
+ current_volume = ec2.get_all_volumes([self.volume_id])[0]
+ self.zone_name = current_volume.zone
+ ebs_volume = ec2.create_volume(size, self.zone_name, snapshot)
+ v = Volume()
+ v.ec2 = self.ec2
+ v.volume_id = ebs_volume.id
+ v.name = name
+ v.mount_point = self.mount_point
+ v.device = self.device
+ v.region_name = self.region_name
+ v.zone_name = self.zone_name
+ v.put()
+ return v
+
+ def get_ec2_connection(self):
+ if self.server:
+ return self.server.ec2
+ if not hasattr(self, 'ec2') or self.ec2 == None:
+ self.ec2 = boto.ec2.connect_to_region(self.region_name)
+ return self.ec2
+
+ def _volume_state(self):
+ ec2 = self.get_ec2_connection()
+ rs = ec2.get_all_volumes([self.volume_id])
+ return rs[0].volume_state()
+
+ def _attachment_state(self):
+ ec2 = self.get_ec2_connection()
+ rs = ec2.get_all_volumes([self.volume_id])
+ return rs[0].attachment_state()
+
+ def _size(self):
+ if not hasattr(self, '__size'):
+ ec2 = self.get_ec2_connection()
+ rs = ec2.get_all_volumes([self.volume_id])
+ self.__size = rs[0].size
+ return self.__size
+
+ def install_xfs(self):
+ if self.server:
+ self.server.install('xfsprogs xfsdump')
+
+ def get_snapshots(self):
+ """
+ Returns a list of all completed snapshots for this volume ID.
+ """
+ ec2 = self.get_ec2_connection()
+ rs = ec2.get_all_snapshots()
+ all_vols = [self.volume_id] + self.past_volume_ids
+ snaps = []
+ for snapshot in rs:
+ if snapshot.volume_id in all_vols:
+ if snapshot.progress == '100%':
+ snapshot.date = dateutil.parser.parse(snapshot.start_time)
+ snapshot.keep = True
+ snaps.append(snapshot)
+ snaps.sort(cmp=lambda x,y: cmp(x.date, y.date))
+ return snaps
+
+ def attach(self, server=None):
+ if self.attachment_state == 'attached':
+ print 'already attached'
+ return None
+ if server:
+ self.server = server
+ self.put()
+ ec2 = self.get_ec2_connection()
+ ec2.attach_volume(self.volume_id, self.server.instance_id, self.device)
+
+ def detach(self, force=False):
+ state = self.attachment_state
+ if state == 'available' or state == None or state == 'detaching':
+ print 'already detached'
+ return None
+ ec2 = self.get_ec2_connection()
+ ec2.detach_volume(self.volume_id, self.server.instance_id, self.device, force)
+ self.server = None
+ self.put()
+
+ def checkfs(self, use_cmd=None):
+ if self.server == None:
+ raise ValueError, 'server attribute must be set to run this command'
+ # detemine state of file system on volume, only works if attached
+ if use_cmd:
+ cmd = use_cmd
+ else:
+ cmd = self.server.get_cmdshell()
+ status = cmd.run('xfs_check %s' % self.device)
+ if not use_cmd:
+ cmd.close()
+ if status[1].startswith('bad superblock magic number 0'):
+ return False
+ return True
+
+ def wait(self):
+ if self.server == None:
+ raise ValueError, 'server attribute must be set to run this command'
+ with closing(self.server.get_cmdshell()) as cmd:
+ # wait for the volume device to appear
+ cmd = self.server.get_cmdshell()
+ while not cmd.exists(self.device):
+ boto.log.info('%s still does not exist, waiting 10 seconds' % self.device)
+ time.sleep(10)
+
+ def format(self):
+ if self.server == None:
+ raise ValueError, 'server attribute must be set to run this command'
+ status = None
+ with closing(self.server.get_cmdshell()) as cmd:
+ if not self.checkfs(cmd):
+ boto.log.info('make_fs...')
+ status = cmd.run('mkfs -t xfs %s' % self.device)
+ return status
+
+ def mount(self):
+ if self.server == None:
+ raise ValueError, 'server attribute must be set to run this command'
+ boto.log.info('handle_mount_point')
+ with closing(self.server.get_cmdshell()) as cmd:
+ cmd = self.server.get_cmdshell()
+ if not cmd.isdir(self.mount_point):
+ boto.log.info('making directory')
+ # mount directory doesn't exist so create it
+ cmd.run("mkdir %s" % self.mount_point)
+ else:
+ boto.log.info('directory exists already')
+ status = cmd.run('mount -l')
+ lines = status[1].split('\n')
+ for line in lines:
+ t = line.split()
+ if t and t[2] == self.mount_point:
+ # something is already mounted at the mount point
+ # unmount that and mount it as /tmp
+ if t[0] != self.device:
+ cmd.run('umount %s' % self.mount_point)
+ cmd.run('mount %s /tmp' % t[0])
+ cmd.run('chmod 777 /tmp')
+ break
+ # Mount up our new EBS volume onto mount_point
+ cmd.run("mount %s %s" % (self.device, self.mount_point))
+ cmd.run('xfs_growfs %s' % self.mount_point)
+
+ def make_ready(self, server):
+ self.server = server
+ self.put()
+ self.install_xfs()
+ self.attach()
+ self.wait()
+ self.format()
+ self.mount()
+
+ def freeze(self):
+ if self.server:
+ return self.server.run("/usr/sbin/xfs_freeze -f %s" % self.mount_point)
+
+ def unfreeze(self):
+ if self.server:
+ return self.server.run("/usr/sbin/xfs_freeze -u %s" % self.mount_point)
+
+ def snapshot(self):
+ # if this volume is attached to a server
+ # we need to freeze the XFS file system
+ try:
+ self.freeze()
+ if self.server == None:
+ snapshot = self.get_ec2_connection().create_snapshot(self.volume_id)
+ else:
+ snapshot = self.server.ec2.create_snapshot(self.volume_id)
+ boto.log.info('Snapshot of Volume %s created: %s' % (self.name, snapshot))
+ except Exception:
+ boto.log.info('Snapshot error')
+ boto.log.info(traceback.format_exc())
+ finally:
+ status = self.unfreeze()
+ return status
+
+ def get_snapshot_range(self, snaps, start_date=None, end_date=None):
+ l = []
+ for snap in snaps:
+ if start_date and end_date:
+ if snap.date >= start_date and snap.date <= end_date:
+ l.append(snap)
+ elif start_date:
+ if snap.date >= start_date:
+ l.append(snap)
+ elif end_date:
+ if snap.date <= end_date:
+ l.append(snap)
+ else:
+ l.append(snap)
+ return l
+
+ def trim_snapshots(self, delete=False):
+ """
+ Trim the number of snapshots for this volume. This method always
+ keeps the oldest snapshot. It then uses the parameters passed in
+ to determine how many others should be kept.
+
+ The algorithm is to keep all snapshots from the current day. Then
+ it will keep the first snapshot of the day for the previous seven days.
+ Then, it will keep the first snapshot of the week for the previous
+ four weeks. After than, it will keep the first snapshot of the month
+ for as many months as there are.
+
+ """
+ snaps = self.get_snapshots()
+ # Always keep the oldest and the newest
+ if len(snaps) <= 2:
+ return snaps
+ snaps = snaps[1:-1]
+ now = datetime.datetime.now(snaps[0].date.tzinfo)
+ midnight = datetime.datetime(year=now.year, month=now.month,
+ day=now.day, tzinfo=now.tzinfo)
+ # Keep the first snapshot from each day of the previous week
+ one_week = datetime.timedelta(days=7, seconds=60*60)
+ print midnight-one_week, midnight
+ previous_week = self.get_snapshot_range(snaps, midnight-one_week, midnight)
+ print previous_week
+ if not previous_week:
+ return snaps
+ current_day = None
+ for snap in previous_week:
+ if current_day and current_day == snap.date.day:
+ snap.keep = False
+ else:
+ current_day = snap.date.day
+ # Get ourselves onto the next full week boundary
+ if previous_week:
+ week_boundary = previous_week[0].date
+ if week_boundary.weekday() != 0:
+ delta = datetime.timedelta(days=week_boundary.weekday())
+ week_boundary = week_boundary - delta
+ # Keep one within this partial week
+ partial_week = self.get_snapshot_range(snaps, week_boundary, previous_week[0].date)
+ if len(partial_week) > 1:
+ for snap in partial_week[1:]:
+ snap.keep = False
+ # Keep the first snapshot of each week for the previous 4 weeks
+ for i in range(0,4):
+ weeks_worth = self.get_snapshot_range(snaps, week_boundary-one_week, week_boundary)
+ if len(weeks_worth) > 1:
+ for snap in weeks_worth[1:]:
+ snap.keep = False
+ week_boundary = week_boundary - one_week
+ # Now look through all remaining snaps and keep one per month
+ remainder = self.get_snapshot_range(snaps, end_date=week_boundary)
+ current_month = None
+ for snap in remainder:
+ if current_month and current_month == snap.date.month:
+ snap.keep = False
+ else:
+ current_month = snap.date.month
+ if delete:
+ for snap in snaps:
+ if not snap.keep:
+ boto.log.info('Deleting %s(%s) for %s' % (snap, snap.date, self.name))
+ snap.delete()
+ return snaps
+
+ def grow(self, size):
+ pass
+
+ def copy(self, snapshot):
+ pass
+
+ def get_snapshot_from_date(self, date):
+ pass
+
+ def delete(self, delete_ebs_volume=False):
+ if delete_ebs_volume:
+ self.detach()
+ ec2 = self.get_ec2_connection()
+ ec2.delete_volume(self.volume_id)
+ Model.delete(self)
+
+ def archive(self):
+ # snapshot volume, trim snaps, delete volume-id
+ pass
+
+
diff --git a/boto/mashups/__init__.py b/boto/mashups/__init__.py
new file mode 100644
index 0000000..449bd16
--- /dev/null
+++ b/boto/mashups/__init__.py
@@ -0,0 +1,23 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+
diff --git a/boto/mashups/interactive.py b/boto/mashups/interactive.py
new file mode 100644
index 0000000..b80e661
--- /dev/null
+++ b/boto/mashups/interactive.py
@@ -0,0 +1,97 @@
+# Copyright (C) 2003-2007 Robey Pointer <robey@lag.net>
+#
+# This file is part of paramiko.
+#
+# Paramiko is free software; you can redistribute it and/or modify it under the
+# terms of the GNU Lesser General Public License as published by the Free
+# Software Foundation; either version 2.1 of the License, or (at your option)
+# any later version.
+#
+# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
+# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
+# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+
+
+import socket
+import sys
+
+# windows does not have termios...
+try:
+ import termios
+ import tty
+ has_termios = True
+except ImportError:
+ has_termios = False
+
+
+def interactive_shell(chan):
+ if has_termios:
+ posix_shell(chan)
+ else:
+ windows_shell(chan)
+
+
+def posix_shell(chan):
+ import select
+
+ oldtty = termios.tcgetattr(sys.stdin)
+ try:
+ tty.setraw(sys.stdin.fileno())
+ tty.setcbreak(sys.stdin.fileno())
+ chan.settimeout(0.0)
+
+ while True:
+ r, w, e = select.select([chan, sys.stdin], [], [])
+ if chan in r:
+ try:
+ x = chan.recv(1024)
+ if len(x) == 0:
+ print '\r\n*** EOF\r\n',
+ break
+ sys.stdout.write(x)
+ sys.stdout.flush()
+ except socket.timeout:
+ pass
+ if sys.stdin in r:
+ x = sys.stdin.read(1)
+ if len(x) == 0:
+ break
+ chan.send(x)
+
+ finally:
+ termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
+
+
+# thanks to Mike Looijmans for this code
+def windows_shell(chan):
+ import threading
+
+ sys.stdout.write("Line-buffered terminal emulation. Press F6 or ^Z to send EOF.\r\n\r\n")
+
+ def writeall(sock):
+ while True:
+ data = sock.recv(256)
+ if not data:
+ sys.stdout.write('\r\n*** EOF ***\r\n\r\n')
+ sys.stdout.flush()
+ break
+ sys.stdout.write(data)
+ sys.stdout.flush()
+
+ writer = threading.Thread(target=writeall, args=(chan,))
+ writer.start()
+
+ try:
+ while True:
+ d = sys.stdin.read(1)
+ if not d:
+ break
+ chan.send(d)
+ except EOFError:
+ # user hit ^Z or F6
+ pass
diff --git a/boto/mashups/iobject.py b/boto/mashups/iobject.py
new file mode 100644
index 0000000..a226b5c
--- /dev/null
+++ b/boto/mashups/iobject.py
@@ -0,0 +1,115 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import os
+
+def int_val_fn(v):
+ try:
+ int(v)
+ return True
+ except:
+ return False
+
+class IObject(object):
+
+ def choose_from_list(self, item_list, search_str='',
+ prompt='Enter Selection'):
+ if not item_list:
+ print 'No Choices Available'
+ return
+ choice = None
+ while not choice:
+ n = 1
+ choices = []
+ for item in item_list:
+ if isinstance(item, str):
+ print '[%d] %s' % (n, item)
+ choices.append(item)
+ n += 1
+ else:
+ obj, id, desc = item
+ if desc:
+ if desc.find(search_str) >= 0:
+ print '[%d] %s - %s' % (n, id, desc)
+ choices.append(obj)
+ n += 1
+ else:
+ if id.find(search_str) >= 0:
+ print '[%d] %s' % (n, id)
+ choices.append(obj)
+ n += 1
+ if choices:
+ val = raw_input('%s[1-%d]: ' % (prompt, len(choices)))
+ if val.startswith('/'):
+ search_str = val[1:]
+ else:
+ try:
+ int_val = int(val)
+ if int_val == 0:
+ return None
+ choice = choices[int_val-1]
+ except ValueError:
+ print '%s is not a valid choice' % val
+ except IndexError:
+ print '%s is not within the range[1-%d]' % (val,
+ len(choices))
+ else:
+ print "No objects matched your pattern"
+ search_str = ''
+ return choice
+
+ def get_string(self, prompt, validation_fn=None):
+ okay = False
+ while not okay:
+ val = raw_input('%s: ' % prompt)
+ if validation_fn:
+ okay = validation_fn(val)
+ if not okay:
+ print 'Invalid value: %s' % val
+ else:
+ okay = True
+ return val
+
+ def get_filename(self, prompt):
+ okay = False
+ val = ''
+ while not okay:
+ val = raw_input('%s: %s' % (prompt, val))
+ val = os.path.expanduser(val)
+ if os.path.isfile(val):
+ okay = True
+ elif os.path.isdir(val):
+ path = val
+ val = self.choose_from_list(os.listdir(path))
+ if val:
+ val = os.path.join(path, val)
+ okay = True
+ else:
+ val = ''
+ else:
+ print 'Invalid value: %s' % val
+ val = ''
+ return val
+
+ def get_int(self, prompt):
+ s = self.get_string(prompt, int_val_fn)
+ return int(s)
+
diff --git a/boto/mashups/order.py b/boto/mashups/order.py
new file mode 100644
index 0000000..6efdc3e
--- /dev/null
+++ b/boto/mashups/order.py
@@ -0,0 +1,211 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+High-level abstraction of an EC2 order for servers
+"""
+
+import boto
+import boto.ec2
+from boto.mashups.server import Server, ServerSet
+from boto.mashups.iobject import IObject
+from boto.pyami.config import Config
+from boto.sdb.persist import get_domain, set_domain
+import time, StringIO
+
+InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge']
+
+class Item(IObject):
+
+ def __init__(self):
+ self.region = None
+ self.name = None
+ self.instance_type = None
+ self.quantity = 0
+ self.zone = None
+ self.ami = None
+ self.groups = []
+ self.key = None
+ self.ec2 = None
+ self.config = None
+
+ def set_userdata(self, key, value):
+ self.userdata[key] = value
+
+ def get_userdata(self, key):
+ return self.userdata[key]
+
+ def set_region(self, region=None):
+ if region:
+ self.region = region
+ else:
+ l = [(r, r.name, r.endpoint) for r in boto.ec2.regions()]
+ self.region = self.choose_from_list(l, prompt='Choose Region')
+
+ def set_name(self, name=None):
+ if name:
+ self.name = name
+ else:
+ self.name = self.get_string('Name')
+
+ def set_instance_type(self, instance_type=None):
+ if instance_type:
+ self.instance_type = instance_type
+ else:
+ self.instance_type = self.choose_from_list(InstanceTypes, 'Instance Type')
+
+ def set_quantity(self, n=0):
+ if n > 0:
+ self.quantity = n
+ else:
+ self.quantity = self.get_int('Quantity')
+
+ def set_zone(self, zone=None):
+ if zone:
+ self.zone = zone
+ else:
+ l = [(z, z.name, z.state) for z in self.ec2.get_all_zones()]
+ self.zone = self.choose_from_list(l, prompt='Choose Availability Zone')
+
+ def set_ami(self, ami=None):
+ if ami:
+ self.ami = ami
+ else:
+ l = [(a, a.id, a.location) for a in self.ec2.get_all_images()]
+ self.ami = self.choose_from_list(l, prompt='Choose AMI')
+
+ def add_group(self, group=None):
+ if group:
+ self.groups.append(group)
+ else:
+ l = [(s, s.name, s.description) for s in self.ec2.get_all_security_groups()]
+ self.groups.append(self.choose_from_list(l, prompt='Choose Security Group'))
+
+ def set_key(self, key=None):
+ if key:
+ self.key = key
+ else:
+ l = [(k, k.name, '') for k in self.ec2.get_all_key_pairs()]
+ self.key = self.choose_from_list(l, prompt='Choose Keypair')
+
+ def update_config(self):
+ if not self.config.has_section('Credentials'):
+ self.config.add_section('Credentials')
+ self.config.set('Credentials', 'aws_access_key_id', self.ec2.aws_access_key_id)
+ self.config.set('Credentials', 'aws_secret_access_key', self.ec2.aws_secret_access_key)
+ if not self.config.has_section('Pyami'):
+ self.config.add_section('Pyami')
+ sdb_domain = get_domain()
+ if sdb_domain:
+ self.config.set('Pyami', 'server_sdb_domain', sdb_domain)
+ self.config.set('Pyami', 'server_sdb_name', self.name)
+
+ def set_config(self, config_path=None):
+ if not config_path:
+ config_path = self.get_filename('Specify Config file')
+ self.config = Config(path=config_path)
+
+ def get_userdata_string(self):
+ s = StringIO.StringIO()
+ self.config.write(s)
+ return s.getvalue()
+
+ def enter(self, **params):
+ self.region = params.get('region', self.region)
+ if not self.region:
+ self.set_region()
+ self.ec2 = self.region.connect()
+ self.name = params.get('name', self.name)
+ if not self.name:
+ self.set_name()
+ self.instance_type = params.get('instance_type', self.instance_type)
+ if not self.instance_type:
+ self.set_instance_type()
+ self.zone = params.get('zone', self.zone)
+ if not self.zone:
+ self.set_zone()
+ self.quantity = params.get('quantity', self.quantity)
+ if not self.quantity:
+ self.set_quantity()
+ self.ami = params.get('ami', self.ami)
+ if not self.ami:
+ self.set_ami()
+ self.groups = params.get('groups', self.groups)
+ if not self.groups:
+ self.add_group()
+ self.key = params.get('key', self.key)
+ if not self.key:
+ self.set_key()
+ self.config = params.get('config', self.config)
+ if not self.config:
+ self.set_config()
+ self.update_config()
+
+class Order(IObject):
+
+ def __init__(self):
+ self.items = []
+ self.reservation = None
+
+ def add_item(self, **params):
+ item = Item()
+ item.enter(**params)
+ self.items.append(item)
+
+ def display(self):
+ print 'This Order consists of the following items'
+ print
+ print 'QTY\tNAME\tTYPE\nAMI\t\tGroups\t\t\tKeyPair'
+ for item in self.items:
+ print '%s\t%s\t%s\t%s\t%s\t%s' % (item.quantity, item.name, item.instance_type,
+ item.ami.id, item.groups, item.key.name)
+
+ def place(self, block=True):
+ if get_domain() == None:
+ print 'SDB Persistence Domain not set'
+ domain_name = self.get_string('Specify SDB Domain')
+ set_domain(domain_name)
+ s = ServerSet()
+ for item in self.items:
+ r = item.ami.run(min_count=1, max_count=item.quantity,
+ key_name=item.key.name, user_data=item.get_userdata_string(),
+ security_groups=item.groups, instance_type=item.instance_type,
+ placement=item.zone.name)
+ if block:
+ states = [i.state for i in r.instances]
+ if states.count('running') != len(states):
+ print states
+ time.sleep(15)
+ states = [i.update() for i in r.instances]
+ for i in r.instances:
+ server = Server()
+ server.name = item.name
+ server.instance_id = i.id
+ server.reservation = r
+ server.save()
+ s.append(server)
+ if len(s) == 1:
+ return s[0]
+ else:
+ return s
+
+
+
diff --git a/boto/mashups/server.py b/boto/mashups/server.py
new file mode 100644
index 0000000..6cea106
--- /dev/null
+++ b/boto/mashups/server.py
@@ -0,0 +1,395 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+High-level abstraction of an EC2 server
+"""
+import boto
+import boto.utils
+from boto.mashups.iobject import IObject
+from boto.pyami.config import Config, BotoConfigPath
+from boto.mashups.interactive import interactive_shell
+from boto.sdb.db.model import Model
+from boto.sdb.db.property import StringProperty
+import os
+import StringIO
+
+
+class ServerSet(list):
+
+ def __getattr__(self, name):
+ results = []
+ is_callable = False
+ for server in self:
+ try:
+ val = getattr(server, name)
+ if callable(val):
+ is_callable = True
+ results.append(val)
+ except:
+ results.append(None)
+ if is_callable:
+ self.map_list = results
+ return self.map
+ return results
+
+ def map(self, *args):
+ results = []
+ for fn in self.map_list:
+ results.append(fn(*args))
+ return results
+
+class Server(Model):
+
+ @property
+ def ec2(self):
+ if self._ec2 is None:
+ self._ec2 = boto.connect_ec2()
+ return self._ec2
+
+ @classmethod
+ def Inventory(cls):
+ """
+ Returns a list of Server instances, one for each Server object
+ persisted in the db
+ """
+ l = ServerSet()
+ rs = cls.find()
+ for server in rs:
+ l.append(server)
+ return l
+
+ @classmethod
+ def Register(cls, name, instance_id, description=''):
+ s = cls()
+ s.name = name
+ s.instance_id = instance_id
+ s.description = description
+ s.save()
+ return s
+
+ def __init__(self, id=None, **kw):
+ Model.__init__(self, id, **kw)
+ self._reservation = None
+ self._instance = None
+ self._ssh_client = None
+ self._pkey = None
+ self._config = None
+ self._ec2 = None
+
+ name = StringProperty(unique=True, verbose_name="Name")
+ instance_id = StringProperty(verbose_name="Instance ID")
+ config_uri = StringProperty()
+ ami_id = StringProperty(verbose_name="AMI ID")
+ zone = StringProperty(verbose_name="Availability Zone")
+ security_group = StringProperty(verbose_name="Security Group", default="default")
+ key_name = StringProperty(verbose_name="Key Name")
+ elastic_ip = StringProperty(verbose_name="Elastic IP")
+ instance_type = StringProperty(verbose_name="Instance Type")
+ description = StringProperty(verbose_name="Description")
+ log = StringProperty()
+
+ def setReadOnly(self, value):
+ raise AttributeError
+
+ def getInstance(self):
+ if not self._instance:
+ if self.instance_id:
+ try:
+ rs = self.ec2.get_all_instances([self.instance_id])
+ except:
+ return None
+ if len(rs) > 0:
+ self._reservation = rs[0]
+ self._instance = self._reservation.instances[0]
+ return self._instance
+
+ instance = property(getInstance, setReadOnly, None, 'The Instance for the server')
+
+ def getAMI(self):
+ if self.instance:
+ return self.instance.image_id
+
+ ami = property(getAMI, setReadOnly, None, 'The AMI for the server')
+
+ def getStatus(self):
+ if self.instance:
+ self.instance.update()
+ return self.instance.state
+
+ status = property(getStatus, setReadOnly, None,
+ 'The status of the server')
+
+ def getHostname(self):
+ if self.instance:
+ return self.instance.public_dns_name
+
+ hostname = property(getHostname, setReadOnly, None,
+ 'The public DNS name of the server')
+
+ def getPrivateHostname(self):
+ if self.instance:
+ return self.instance.private_dns_name
+
+ private_hostname = property(getPrivateHostname, setReadOnly, None,
+ 'The private DNS name of the server')
+
+ def getLaunchTime(self):
+ if self.instance:
+ return self.instance.launch_time
+
+ launch_time = property(getLaunchTime, setReadOnly, None,
+ 'The time the Server was started')
+
+ def getConsoleOutput(self):
+ if self.instance:
+ return self.instance.get_console_output()
+
+ console_output = property(getConsoleOutput, setReadOnly, None,
+ 'Retrieve the console output for server')
+
+ def getGroups(self):
+ if self._reservation:
+ return self._reservation.groups
+ else:
+ return None
+
+ groups = property(getGroups, setReadOnly, None,
+ 'The Security Groups controlling access to this server')
+
+ def getConfig(self):
+ if not self._config:
+ remote_file = BotoConfigPath
+ local_file = '%s.ini' % self.instance.id
+ self.get_file(remote_file, local_file)
+ self._config = Config(local_file)
+ return self._config
+
+ def setConfig(self, config):
+ local_file = '%s.ini' % self.instance.id
+ fp = open(local_file)
+ config.write(fp)
+ fp.close()
+ self.put_file(local_file, BotoConfigPath)
+ self._config = config
+
+ config = property(getConfig, setConfig, None,
+ 'The instance data for this server')
+
+ def set_config(self, config):
+ """
+ Set SDB based config
+ """
+ self._config = config
+ self._config.dump_to_sdb("botoConfigs", self.id)
+
+ def load_config(self):
+ self._config = Config(do_load=False)
+ self._config.load_from_sdb("botoConfigs", self.id)
+
+ def stop(self):
+ if self.instance:
+ self.instance.stop()
+
+ def start(self):
+ self.stop()
+ ec2 = boto.connect_ec2()
+ ami = ec2.get_all_images(image_ids = [str(self.ami_id)])[0]
+ groups = ec2.get_all_security_groups(groupnames=[str(self.security_group)])
+ if not self._config:
+ self.load_config()
+ if not self._config.has_section("Credentials"):
+ self._config.add_section("Credentials")
+ self._config.set("Credentials", "aws_access_key_id", ec2.aws_access_key_id)
+ self._config.set("Credentials", "aws_secret_access_key", ec2.aws_secret_access_key)
+
+ if not self._config.has_section("Pyami"):
+ self._config.add_section("Pyami")
+
+ if self._manager.domain:
+ self._config.set('Pyami', 'server_sdb_domain', self._manager.domain.name)
+ self._config.set("Pyami", 'server_sdb_name', self.name)
+
+ cfg = StringIO.StringIO()
+ self._config.write(cfg)
+ cfg = cfg.getvalue()
+ r = ami.run(min_count=1,
+ max_count=1,
+ key_name=self.key_name,
+ security_groups = groups,
+ instance_type = self.instance_type,
+ placement = self.zone,
+ user_data = cfg)
+ i = r.instances[0]
+ self.instance_id = i.id
+ self.put()
+ if self.elastic_ip:
+ ec2.associate_address(self.instance_id, self.elastic_ip)
+
+ def reboot(self):
+ if self.instance:
+ self.instance.reboot()
+
+ def get_ssh_client(self, key_file=None, host_key_file='~/.ssh/known_hosts',
+ uname='root'):
+ import paramiko
+ if not self.instance:
+ print 'No instance yet!'
+ return
+ if not self._ssh_client:
+ if not key_file:
+ iobject = IObject()
+ key_file = iobject.get_filename('Path to OpenSSH Key file')
+ self._pkey = paramiko.RSAKey.from_private_key_file(key_file)
+ self._ssh_client = paramiko.SSHClient()
+ self._ssh_client.load_system_host_keys()
+ self._ssh_client.load_host_keys(os.path.expanduser(host_key_file))
+ self._ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ self._ssh_client.connect(self.instance.public_dns_name,
+ username=uname, pkey=self._pkey)
+ return self._ssh_client
+
+ def get_file(self, remotepath, localpath):
+ ssh_client = self.get_ssh_client()
+ sftp_client = ssh_client.open_sftp()
+ sftp_client.get(remotepath, localpath)
+
+ def put_file(self, localpath, remotepath):
+ ssh_client = self.get_ssh_client()
+ sftp_client = ssh_client.open_sftp()
+ sftp_client.put(localpath, remotepath)
+
+ def listdir(self, remotepath):
+ ssh_client = self.get_ssh_client()
+ sftp_client = ssh_client.open_sftp()
+ return sftp_client.listdir(remotepath)
+
+ def shell(self, key_file=None):
+ ssh_client = self.get_ssh_client(key_file)
+ channel = ssh_client.invoke_shell()
+ interactive_shell(channel)
+
+ def bundle_image(self, prefix, key_file, cert_file, size):
+ print 'bundling image...'
+ print '\tcopying cert and pk over to /mnt directory on server'
+ ssh_client = self.get_ssh_client()
+ sftp_client = ssh_client.open_sftp()
+ path, name = os.path.split(key_file)
+ remote_key_file = '/mnt/%s' % name
+ self.put_file(key_file, remote_key_file)
+ path, name = os.path.split(cert_file)
+ remote_cert_file = '/mnt/%s' % name
+ self.put_file(cert_file, remote_cert_file)
+ print '\tdeleting %s' % BotoConfigPath
+ # delete the metadata.ini file if it exists
+ try:
+ sftp_client.remove(BotoConfigPath)
+ except:
+ pass
+ command = 'sudo ec2-bundle-vol '
+ command += '-c %s -k %s ' % (remote_cert_file, remote_key_file)
+ command += '-u %s ' % self._reservation.owner_id
+ command += '-p %s ' % prefix
+ command += '-s %d ' % size
+ command += '-d /mnt '
+ if self.instance.instance_type == 'm1.small' or self.instance_type == 'c1.medium':
+ command += '-r i386'
+ else:
+ command += '-r x86_64'
+ print '\t%s' % command
+ t = ssh_client.exec_command(command)
+ response = t[1].read()
+ print '\t%s' % response
+ print '\t%s' % t[2].read()
+ print '...complete!'
+
+ def upload_bundle(self, bucket, prefix):
+ print 'uploading bundle...'
+ command = 'ec2-upload-bundle '
+ command += '-m /mnt/%s.manifest.xml ' % prefix
+ command += '-b %s ' % bucket
+ command += '-a %s ' % self.ec2.aws_access_key_id
+ command += '-s %s ' % self.ec2.aws_secret_access_key
+ print '\t%s' % command
+ ssh_client = self.get_ssh_client()
+ t = ssh_client.exec_command(command)
+ response = t[1].read()
+ print '\t%s' % response
+ print '\t%s' % t[2].read()
+ print '...complete!'
+
+ def create_image(self, bucket=None, prefix=None, key_file=None, cert_file=None, size=None):
+ iobject = IObject()
+ if not bucket:
+ bucket = iobject.get_string('Name of S3 bucket')
+ if not prefix:
+ prefix = iobject.get_string('Prefix for AMI file')
+ if not key_file:
+ key_file = iobject.get_filename('Path to RSA private key file')
+ if not cert_file:
+ cert_file = iobject.get_filename('Path to RSA public cert file')
+ if not size:
+ size = iobject.get_int('Size (in MB) of bundled image')
+ self.bundle_image(prefix, key_file, cert_file, size)
+ self.upload_bundle(bucket, prefix)
+ print 'registering image...'
+ self.image_id = self.ec2.register_image('%s/%s.manifest.xml' % (bucket, prefix))
+ return self.image_id
+
+ def attach_volume(self, volume, device="/dev/sdp"):
+ """
+ Attach an EBS volume to this server
+
+ :param volume: EBS Volume to attach
+ :type volume: boto.ec2.volume.Volume
+
+ :param device: Device to attach to (default to /dev/sdp)
+ :type device: string
+ """
+ if hasattr(volume, "id"):
+ volume_id = volume.id
+ else:
+ volume_id = volume
+ return self.ec2.attach_volume(volume_id=volume_id, instance_id=self.instance_id, device=device)
+
+ def detach_volume(self, volume):
+ """
+ Detach an EBS volume from this server
+
+ :param volume: EBS Volume to detach
+ :type volume: boto.ec2.volume.Volume
+ """
+ if hasattr(volume, "id"):
+ volume_id = volume.id
+ else:
+ volume_id = volume
+ return self.ec2.detach_volume(volume_id=volume_id, instance_id=self.instance_id)
+
+ def install_package(self, package_name):
+ print 'installing %s...' % package_name
+ command = 'yum -y install %s' % package_name
+ print '\t%s' % command
+ ssh_client = self.get_ssh_client()
+ t = ssh_client.exec_command(command)
+ response = t[1].read()
+ print '\t%s' % response
+ print '\t%s' % t[2].read()
+ print '...complete!'
diff --git a/boto/mturk/__init__.py b/boto/mturk/__init__.py
new file mode 100644
index 0000000..449bd16
--- /dev/null
+++ b/boto/mturk/__init__.py
@@ -0,0 +1,23 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+
diff --git a/boto/mturk/connection.py b/boto/mturk/connection.py
new file mode 100644
index 0000000..619697f
--- /dev/null
+++ b/boto/mturk/connection.py
@@ -0,0 +1,887 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import xml.sax
+import datetime
+import itertools
+
+from boto import handler
+from boto import config
+from boto.mturk.price import Price
+import boto.mturk.notification
+from boto.connection import AWSQueryConnection
+from boto.exception import EC2ResponseError
+from boto.resultset import ResultSet
+from boto.mturk.question import QuestionForm, ExternalQuestion
+
+class MTurkRequestError(EC2ResponseError):
+ "Error for MTurk Requests"
+ # todo: subclass from an abstract parent of EC2ResponseError
+
+class MTurkConnection(AWSQueryConnection):
+
+ APIVersion = '2008-08-02'
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=False, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None,
+ host=None, debug=0,
+ https_connection_factory=None):
+ if not host:
+ if config.has_option('MTurk', 'sandbox') and config.get('MTurk', 'sandbox') == 'True':
+ host = 'mechanicalturk.sandbox.amazonaws.com'
+ else:
+ host = 'mechanicalturk.amazonaws.com'
+
+ AWSQueryConnection.__init__(self, aws_access_key_id,
+ aws_secret_access_key,
+ is_secure, port, proxy, proxy_port,
+ proxy_user, proxy_pass, host, debug,
+ https_connection_factory)
+
+ def _required_auth_capability(self):
+ return ['mturk']
+
+ def get_account_balance(self):
+ """
+ """
+ params = {}
+ return self._process_request('GetAccountBalance', params,
+ [('AvailableBalance', Price),
+ ('OnHoldBalance', Price)])
+
+ def register_hit_type(self, title, description, reward, duration,
+ keywords=None, approval_delay=None, qual_req=None):
+ """
+ Register a new HIT Type
+ title, description are strings
+ reward is a Price object
+ duration can be a timedelta, or an object castable to an int
+ """
+ params = dict(
+ Title=title,
+ Description=description,
+ AssignmentDurationInSeconds=
+ self.duration_as_seconds(duration),
+ )
+ params.update(MTurkConnection.get_price_as_price(reward).get_as_params('Reward'))
+
+ if keywords:
+ params['Keywords'] = self.get_keywords_as_string(keywords)
+
+ if approval_delay is not None:
+ d = self.duration_as_seconds(approval_delay)
+ params['AutoApprovalDelayInSeconds'] = d
+
+ if qual_req is not None:
+ params.update(qual_req.get_as_params())
+
+ return self._process_request('RegisterHITType', params)
+
+ def set_email_notification(self, hit_type, email, event_types=None):
+ """
+ Performs a SetHITTypeNotification operation to set email
+ notification for a specified HIT type
+ """
+ return self._set_notification(hit_type, 'Email', email, event_types)
+
+ def set_rest_notification(self, hit_type, url, event_types=None):
+ """
+ Performs a SetHITTypeNotification operation to set REST notification
+ for a specified HIT type
+ """
+ return self._set_notification(hit_type, 'REST', url, event_types)
+
+ def _set_notification(self, hit_type, transport, destination, event_types=None):
+ """
+ Common SetHITTypeNotification operation to set notification for a
+ specified HIT type
+ """
+ assert type(hit_type) is str, "hit_type argument should be a string."
+
+ params = {'HITTypeId': hit_type}
+
+ # from the Developer Guide:
+ # The 'Active' parameter is optional. If omitted, the active status of
+ # the HIT type's notification specification is unchanged. All HIT types
+ # begin with their notification specifications in the "inactive" status.
+ notification_params = {'Destination': destination,
+ 'Transport': transport,
+ 'Version': boto.mturk.notification.NotificationMessage.NOTIFICATION_VERSION,
+ 'Active': True,
+ }
+
+ # add specific event types if required
+ if event_types:
+ self.build_list_params(notification_params, event_types, 'EventType')
+
+ # Set up dict of 'Notification.1.Transport' etc. values
+ notification_rest_params = {}
+ num = 1
+ for key in notification_params:
+ notification_rest_params['Notification.%d.%s' % (num, key)] = notification_params[key]
+
+ # Update main params dict
+ params.update(notification_rest_params)
+
+ # Execute operation
+ return self._process_request('SetHITTypeNotification', params)
+
+ def create_hit(self, hit_type=None, question=None,
+ lifetime=datetime.timedelta(days=7),
+ max_assignments=1,
+ title=None, description=None, keywords=None,
+ reward=None, duration=datetime.timedelta(days=7),
+ approval_delay=None, annotation=None,
+ questions=None, qualifications=None,
+ response_groups=None):
+ """
+ Creates a new HIT.
+ Returns a ResultSet
+ See: http://docs.amazonwebservices.com/AWSMechanicalTurkRequester/2006-10-31/ApiReference_CreateHITOperation.html
+ """
+
+ # handle single or multiple questions
+ neither = question is None and questions is None
+ both = question is not None and questions is not None
+ if neither or both:
+ raise ValueError("Must specify either question (single Question instance) or questions (list or QuestionForm instance), but not both")
+
+ if question:
+ questions = [question]
+ question_param = QuestionForm(questions)
+ if isinstance(question, QuestionForm):
+ question_param = question
+ elif isinstance(question, ExternalQuestion):
+ question_param = question
+
+ # Handle basic required arguments and set up params dict
+ params = {'Question': question_param.get_as_xml(),
+ 'LifetimeInSeconds' :
+ self.duration_as_seconds(lifetime),
+ 'MaxAssignments' : max_assignments,
+ }
+
+ # if hit type specified then add it
+ # else add the additional required parameters
+ if hit_type:
+ params['HITTypeId'] = hit_type
+ else:
+ # Handle keywords
+ final_keywords = MTurkConnection.get_keywords_as_string(keywords)
+
+ # Handle price argument
+ final_price = MTurkConnection.get_price_as_price(reward)
+
+ final_duration = self.duration_as_seconds(duration)
+
+ additional_params = dict(
+ Title=title,
+ Description=description,
+ Keywords=final_keywords,
+ AssignmentDurationInSeconds=final_duration,
+ )
+ additional_params.update(final_price.get_as_params('Reward'))
+
+ if approval_delay is not None:
+ d = self.duration_as_seconds(approval_delay)
+ additional_params['AutoApprovalDelayInSeconds'] = d
+
+ # add these params to the others
+ params.update(additional_params)
+
+ # add the annotation if specified
+ if annotation is not None:
+ params['RequesterAnnotation'] = annotation
+
+ # Add the Qualifications if specified
+ if qualifications is not None:
+ params.update(qualifications.get_as_params())
+
+ # Handle optional response groups argument
+ if response_groups:
+ self.build_list_params(params, response_groups, 'ResponseGroup')
+
+ # Submit
+ return self._process_request('CreateHIT', params, [('HIT', HIT),])
+
+ def change_hit_type_of_hit(self, hit_id, hit_type):
+ """
+ Change the HIT type of an existing HIT. Note that the reward associated
+ with the new HIT type must match the reward of the current HIT type in
+ order for the operation to be valid.
+ \thit_id is a string
+ \thit_type is a string
+ """
+ params = {'HITId' : hit_id,
+ 'HITTypeId': hit_type}
+
+ return self._process_request('ChangeHITTypeOfHIT', params)
+
+ def get_reviewable_hits(self, hit_type=None, status='Reviewable',
+ sort_by='Expiration', sort_direction='Ascending',
+ page_size=10, page_number=1):
+ """
+ Retrieve the HITs that have a status of Reviewable, or HITs that
+ have a status of Reviewing, and that belong to the Requester
+ calling the operation.
+ """
+ params = {'Status' : status,
+ 'SortProperty' : sort_by,
+ 'SortDirection' : sort_direction,
+ 'PageSize' : page_size,
+ 'PageNumber' : page_number}
+
+ # Handle optional hit_type argument
+ if hit_type is not None:
+ params.update({'HITTypeId': hit_type})
+
+ return self._process_request('GetReviewableHITs', params, [('HIT', HIT),])
+
+ @staticmethod
+ def _get_pages(page_size, total_records):
+ """
+ Given a page size (records per page) and a total number of
+ records, return the page numbers to be retrieved.
+ """
+ pages = total_records/page_size+bool(total_records%page_size)
+ return range(1, pages+1)
+
+
+ def get_all_hits(self):
+ """
+ Return all of a Requester's HITs
+
+ Despite what search_hits says, it does not return all hits, but
+ instead returns a page of hits. This method will pull the hits
+ from the server 100 at a time, but will yield the results
+ iteratively, so subsequent requests are made on demand.
+ """
+ page_size = 100
+ search_rs = self.search_hits(page_size=page_size)
+ total_records = int(search_rs.TotalNumResults)
+ get_page_hits = lambda(page): self.search_hits(page_size=page_size, page_number=page)
+ page_nums = self._get_pages(page_size, total_records)
+ hit_sets = itertools.imap(get_page_hits, page_nums)
+ return itertools.chain.from_iterable(hit_sets)
+
+ def search_hits(self, sort_by='CreationTime', sort_direction='Ascending',
+ page_size=10, page_number=1, response_groups=None):
+ """
+ Return a page of a Requester's HITs, on behalf of the Requester.
+ The operation returns HITs of any status, except for HITs that
+ have been disposed with the DisposeHIT operation.
+ Note:
+ The SearchHITs operation does not accept any search parameters
+ that filter the results.
+ """
+ params = {'SortProperty' : sort_by,
+ 'SortDirection' : sort_direction,
+ 'PageSize' : page_size,
+ 'PageNumber' : page_number}
+ # Handle optional response groups argument
+ if response_groups:
+ self.build_list_params(params, response_groups, 'ResponseGroup')
+
+
+ return self._process_request('SearchHITs', params, [('HIT', HIT),])
+
+ def get_assignments(self, hit_id, status=None,
+ sort_by='SubmitTime', sort_direction='Ascending',
+ page_size=10, page_number=1, response_groups=None):
+ """
+ Retrieves completed assignments for a HIT.
+ Use this operation to retrieve the results for a HIT.
+
+ The returned ResultSet will have the following attributes:
+
+ NumResults
+ The number of assignments on the page in the filtered results
+ list, equivalent to the number of assignments being returned
+ by this call.
+ A non-negative integer
+ PageNumber
+ The number of the page in the filtered results list being
+ returned.
+ A positive integer
+ TotalNumResults
+ The total number of HITs in the filtered results list based
+ on this call.
+ A non-negative integer
+
+ The ResultSet will contain zero or more Assignment objects
+
+ """
+ params = {'HITId' : hit_id,
+ 'SortProperty' : sort_by,
+ 'SortDirection' : sort_direction,
+ 'PageSize' : page_size,
+ 'PageNumber' : page_number}
+
+ if status is not None:
+ params['AssignmentStatus'] = status
+
+ # Handle optional response groups argument
+ if response_groups:
+ self.build_list_params(params, response_groups, 'ResponseGroup')
+
+ return self._process_request('GetAssignmentsForHIT', params,
+ [('Assignment', Assignment),])
+
+ def approve_assignment(self, assignment_id, feedback=None):
+ """
+ """
+ params = {'AssignmentId' : assignment_id,}
+ if feedback:
+ params['RequesterFeedback'] = feedback
+ return self._process_request('ApproveAssignment', params)
+
+ def reject_assignment(self, assignment_id, feedback=None):
+ """
+ """
+ params = {'AssignmentId' : assignment_id,}
+ if feedback:
+ params['RequesterFeedback'] = feedback
+ return self._process_request('RejectAssignment', params)
+
+ def get_hit(self, hit_id, response_groups=None):
+ """
+ """
+ params = {'HITId' : hit_id,}
+ # Handle optional response groups argument
+ if response_groups:
+ self.build_list_params(params, response_groups, 'ResponseGroup')
+
+ return self._process_request('GetHIT', params, [('HIT', HIT),])
+
+ def set_reviewing(self, hit_id, revert=None):
+ """
+ Update a HIT with a status of Reviewable to have a status of Reviewing,
+ or reverts a Reviewing HIT back to the Reviewable status.
+
+ Only HITs with a status of Reviewable can be updated with a status of
+ Reviewing. Similarly, only Reviewing HITs can be reverted back to a
+ status of Reviewable.
+ """
+ params = {'HITId' : hit_id,}
+ if revert:
+ params['Revert'] = revert
+ return self._process_request('SetHITAsReviewing', params)
+
+ def disable_hit(self, hit_id, response_groups=None):
+ """
+ Remove a HIT from the Mechanical Turk marketplace, approves all
+ submitted assignments that have not already been approved or rejected,
+ and disposes of the HIT and all assignment data.
+
+ Assignments for the HIT that have already been submitted, but not yet
+ approved or rejected, will be automatically approved. Assignments in
+ progress at the time of the call to DisableHIT will be approved once
+ the assignments are submitted. You will be charged for approval of
+ these assignments. DisableHIT completely disposes of the HIT and
+ all submitted assignment data. Assignment results data cannot be
+ retrieved for a HIT that has been disposed.
+
+ It is not possible to re-enable a HIT once it has been disabled.
+ To make the work from a disabled HIT available again, create a new HIT.
+ """
+ params = {'HITId' : hit_id,}
+ # Handle optional response groups argument
+ if response_groups:
+ self.build_list_params(params, response_groups, 'ResponseGroup')
+
+ return self._process_request('DisableHIT', params)
+
+ def dispose_hit(self, hit_id):
+ """
+ Dispose of a HIT that is no longer needed.
+
+ Only HITs in the "reviewable" state, with all submitted
+ assignments approved or rejected, can be disposed. A Requester
+ can call GetReviewableHITs to determine which HITs are
+ reviewable, then call GetAssignmentsForHIT to retrieve the
+ assignments. Disposing of a HIT removes the HIT from the
+ results of a call to GetReviewableHITs. """
+ params = {'HITId' : hit_id,}
+ return self._process_request('DisposeHIT', params)
+
+ def expire_hit(self, hit_id):
+
+ """
+ Expire a HIT that is no longer needed.
+
+ The effect is identical to the HIT expiring on its own. The
+ HIT no longer appears on the Mechanical Turk web site, and no
+ new Workers are allowed to accept the HIT. Workers who have
+ accepted the HIT prior to expiration are allowed to complete
+ it or return it, or allow the assignment duration to elapse
+ (abandon the HIT). Once all remaining assignments have been
+ submitted, the expired HIT becomes"reviewable", and will be
+ returned by a call to GetReviewableHITs.
+ """
+ params = {'HITId' : hit_id,}
+ return self._process_request('ForceExpireHIT', params)
+
+ def extend_hit(self, hit_id, assignments_increment=None, expiration_increment=None):
+ """
+ Increase the maximum number of assignments, or extend the
+ expiration date, of an existing HIT.
+
+ NOTE: If a HIT has a status of Reviewable and the HIT is
+ extended to make it Available, the HIT will not be returned by
+ GetReviewableHITs, and its submitted assignments will not be
+ returned by GetAssignmentsForHIT, until the HIT is Reviewable
+ again. Assignment auto-approval will still happen on its
+ original schedule, even if the HIT has been extended. Be sure
+ to retrieve and approve (or reject) submitted assignments
+ before extending the HIT, if so desired.
+ """
+ # must provide assignment *or* expiration increment
+ if (assignments_increment is None and expiration_increment is None) or \
+ (assignments_increment is not None and expiration_increment is not None):
+ raise ValueError("Must specify either assignments_increment or expiration_increment, but not both")
+
+ params = {'HITId' : hit_id,}
+ if assignments_increment:
+ params['MaxAssignmentsIncrement'] = assignments_increment
+ if expiration_increment:
+ params['ExpirationIncrementInSeconds'] = expiration_increment
+
+ return self._process_request('ExtendHIT', params)
+
+ def get_help(self, about, help_type='Operation'):
+ """
+ Return information about the Mechanical Turk Service
+ operations and response group NOTE - this is basically useless
+ as it just returns the URL of the documentation
+
+ help_type: either 'Operation' or 'ResponseGroup'
+ """
+ params = {'About': about, 'HelpType': help_type,}
+ return self._process_request('Help', params)
+
+ def grant_bonus(self, worker_id, assignment_id, bonus_price, reason):
+ """
+ Issues a payment of money from your account to a Worker. To
+ be eligible for a bonus, the Worker must have submitted
+ results for one of your HITs, and have had those results
+ approved or rejected. This payment happens separately from the
+ reward you pay to the Worker when you approve the Worker's
+ assignment. The Bonus must be passed in as an instance of the
+ Price object.
+ """
+ params = bonus_price.get_as_params('BonusAmount', 1)
+ params['WorkerId'] = worker_id
+ params['AssignmentId'] = assignment_id
+ params['Reason'] = reason
+
+ return self._process_request('GrantBonus', params)
+
+ def block_worker(self, worker_id, reason):
+ """
+ Block a worker from working on my tasks.
+ """
+ params = {'WorkerId': worker_id, 'Reason': reason}
+
+ return self._process_request('BlockWorker', params)
+
+ def unblock_worker(self, worker_id, reason):
+ """
+ Unblock a worker from working on my tasks.
+ """
+ params = {'WorkerId': worker_id, 'Reason': reason}
+
+ return self._process_request('UnblockWorker', params)
+
+ def notify_workers(self, worker_ids, subject, message_text):
+ """
+ Send a text message to workers.
+ """
+ params = {'WorkerId' : worker_ids,
+ 'Subject' : subject,
+ 'MessageText': message_text}
+
+ return self._process_request('NotifyWorkers', params)
+
+ def create_qualification_type(self,
+ name,
+ description,
+ status,
+ keywords=None,
+ retry_delay=None,
+ test=None,
+ answer_key=None,
+ answer_key_xml=None,
+ test_duration=None,
+ auto_granted=False,
+ auto_granted_value=1):
+ """
+ Create a new Qualification Type.
+
+ name: This will be visible to workers and must be unique for a
+ given requester.
+
+ description: description shown to workers. Max 2000 characters.
+
+ status: 'Active' or 'Inactive'
+
+ keywords: list of keyword strings or comma separated string.
+ Max length of 1000 characters when concatenated with commas.
+
+ retry_delay: number of seconds after requesting a
+ qualification the worker must wait before they can ask again.
+ If not specified, workers can only request this qualification
+ once.
+
+ test: a QuestionForm
+
+ answer_key: an XML string of your answer key, for automatically
+ scored qualification tests.
+ (Consider implementing an AnswerKey class for this to support.)
+
+ test_duration: the number of seconds a worker has to complete the test.
+
+ auto_granted: if True, requests for the Qualification are granted immediately.
+ Can't coexist with a test.
+
+ auto_granted_value: auto_granted qualifications are given this value.
+
+ """
+
+ params = {'Name' : name,
+ 'Description' : description,
+ 'QualificationTypeStatus' : status,
+ }
+ if retry_delay is not None:
+ params['RetryDelay'] = retry_delay
+
+ if test is not None:
+ assert(isinstance(test, QuestionForm))
+ assert(test_duration is not None)
+ params['Test'] = test.get_as_xml()
+
+ if test_duration is not None:
+ params['TestDuration'] = test_duration
+
+ if answer_key is not None:
+ if isinstance(answer_key, basestring):
+ params['AnswerKey'] = answer_key # xml
+ else:
+ raise TypeError
+ # Eventually someone will write an AnswerKey class.
+
+ if auto_granted:
+ assert(test is False)
+ params['AutoGranted'] = True
+ params['AutoGrantedValue'] = auto_granted_value
+
+ if keywords:
+ params['Keywords'] = self.get_keywords_as_string(keywords)
+
+ return self._process_request('CreateQualificationType', params,
+ [('QualificationType', QualificationType),])
+
+ def get_qualification_type(self, qualification_type_id):
+ params = {'QualificationTypeId' : qualification_type_id }
+ return self._process_request('GetQualificationType', params,
+ [('QualificationType', QualificationType),])
+
+ def get_qualifications_for_qualification_type(self, qualification_type_id):
+ params = {'QualificationTypeId' : qualification_type_id }
+ return self._process_request('GetQualificationsForQualificationType', params,
+ [('QualificationType', QualificationType),])
+
+ def update_qualification_type(self, qualification_type_id,
+ description=None,
+ status=None,
+ retry_delay=None,
+ test=None,
+ answer_key=None,
+ test_duration=None,
+ auto_granted=None,
+ auto_granted_value=None):
+
+ params = {'QualificationTypeId' : qualification_type_id }
+
+ if description is not None:
+ params['Description'] = description
+
+ if status is not None:
+ params['QualificationTypeStatus'] = status
+
+ if retry_delay is not None:
+ params['RetryDelay'] = retry_delay
+
+ if test is not None:
+ assert(isinstance(test, QuestionForm))
+ params['Test'] = test.get_as_xml()
+
+ if test_duration is not None:
+ params['TestDuration'] = test_duration
+
+ if answer_key is not None:
+ if isinstance(answer_key, basestring):
+ params['AnswerKey'] = answer_key # xml
+ else:
+ raise TypeError
+ # Eventually someone will write an AnswerKey class.
+
+ if auto_granted is not None:
+ params['AutoGranted'] = auto_granted
+
+ if auto_granted_value is not None:
+ params['AutoGrantedValue'] = auto_granted_value
+
+ return self._process_request('UpdateQualificationType', params,
+ [('QualificationType', QualificationType),])
+
+ def dispose_qualification_type(self, qualification_type_id):
+ """TODO: Document."""
+ params = {'QualificationTypeId' : qualification_type_id}
+ return self._process_request('DisposeQualificationType', params)
+
+ def search_qualification_types(self, query=None, sort_by='Name',
+ sort_direction='Ascending', page_size=10,
+ page_number=1, must_be_requestable=True,
+ must_be_owned_by_caller=True):
+ """TODO: Document."""
+ params = {'Query' : query,
+ 'SortProperty' : sort_by,
+ 'SortDirection' : sort_direction,
+ 'PageSize' : page_size,
+ 'PageNumber' : page_number,
+ 'MustBeRequestable' : must_be_requestable,
+ 'MustBeOwnedByCaller' : must_be_owned_by_caller}
+ return self._process_request('SearchQualificationTypes', params,
+ [('QualificationType', QualificationType),])
+
+ def get_qualification_requests(self, qualification_type_id,
+ sort_by='Expiration',
+ sort_direction='Ascending', page_size=10,
+ page_number=1):
+ """TODO: Document."""
+ params = {'QualificationTypeId' : qualification_type_id,
+ 'SortProperty' : sort_by,
+ 'SortDirection' : sort_direction,
+ 'PageSize' : page_size,
+ 'PageNumber' : page_number}
+ return self._process_request('GetQualificationRequests', params,
+ [('QualificationRequest', QualificationRequest),])
+
+ def grant_qualification(self, qualification_request_id, integer_value=1):
+ """TODO: Document."""
+ params = {'QualificationRequestId' : qualification_request_id,
+ 'IntegerValue' : integer_value}
+ return self._process_request('GrantQualification', params)
+
+ def revoke_qualification(self, subject_id, qualification_type_id,
+ reason=None):
+ """TODO: Document."""
+ params = {'SubjectId' : subject_id,
+ 'QualificationTypeId' : qualification_type_id,
+ 'Reason' : reason}
+ return self._process_request('RevokeQualification', params)
+
+ def assign_qualification(self, qualification_type_id, worker_id,
+ value=1, send_notification=True):
+ params = {'QualificationTypeId' : qualification_type_id,
+ 'WorkerId' : worker_id,
+ 'IntegerValue' : value,
+ 'SendNotification' : send_notification, }
+ return self._process_request('AssignQualification', params)
+
+ def _process_request(self, request_type, params, marker_elems=None):
+ """
+ Helper to process the xml response from AWS
+ """
+ response = self.make_request(request_type, params, verb='POST')
+ return self._process_response(response, marker_elems)
+
+ def _process_response(self, response, marker_elems=None):
+ """
+ Helper to process the xml response from AWS
+ """
+ body = response.read()
+ #print body
+ if '<Errors>' not in body:
+ rs = ResultSet(marker_elems)
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs
+ else:
+ raise MTurkRequestError(response.status, response.reason, body)
+
+ @staticmethod
+ def get_keywords_as_string(keywords):
+ """
+ Returns a comma+space-separated string of keywords from either
+ a list or a string
+ """
+ if type(keywords) is list:
+ keywords = ', '.join(keywords)
+ if type(keywords) is str:
+ final_keywords = keywords
+ elif type(keywords) is unicode:
+ final_keywords = keywords.encode('utf-8')
+ elif keywords is None:
+ final_keywords = ""
+ else:
+ raise TypeError("keywords argument must be a string or a list of strings; got a %s" % type(keywords))
+ return final_keywords
+
+ @staticmethod
+ def get_price_as_price(reward):
+ """
+ Returns a Price data structure from either a float or a Price
+ """
+ if isinstance(reward, Price):
+ final_price = reward
+ else:
+ final_price = Price(reward)
+ return final_price
+
+ @staticmethod
+ def duration_as_seconds(duration):
+ if isinstance(duration, datetime.timedelta):
+ duration = duration.days*86400 + duration.seconds
+ try:
+ duration = int(duration)
+ except TypeError:
+ raise TypeError("Duration must be a timedelta or int-castable, got %s" % type(duration))
+ return duration
+
+class BaseAutoResultElement:
+ """
+ Base class to automatically add attributes when parsing XML
+ """
+ def __init__(self, connection):
+ pass
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ setattr(self, name, value)
+
+class HIT(BaseAutoResultElement):
+ """
+ Class to extract a HIT structure from a response (used in ResultSet)
+
+ Will have attributes named as per the Developer Guide,
+ e.g. HITId, HITTypeId, CreationTime
+ """
+
+ # property helper to determine if HIT has expired
+ def _has_expired(self):
+ """ Has this HIT expired yet? """
+ expired = False
+ if hasattr(self, 'Expiration'):
+ now = datetime.datetime.utcnow()
+ expiration = datetime.datetime.strptime(self.Expiration, '%Y-%m-%dT%H:%M:%SZ')
+ expired = (now >= expiration)
+ else:
+ raise ValueError("ERROR: Request for expired property, but no Expiration in HIT!")
+ return expired
+
+ # are we there yet?
+ expired = property(_has_expired)
+
+class QualificationType(BaseAutoResultElement):
+ """
+ Class to extract an QualificationType structure from a response (used in
+ ResultSet)
+
+ Will have attributes named as per the Developer Guide,
+ e.g. QualificationTypeId, CreationTime, Name, etc
+ """
+
+ pass
+
+class QualificationRequest(BaseAutoResultElement):
+ """
+ Class to extract an QualificationRequest structure from a response (used in
+ ResultSet)
+
+ Will have attributes named as per the Developer Guide,
+ e.g. QualificationRequestId, QualificationTypeId, SubjectId, etc
+
+ TODO: Ensure that Test and Answer attribute are treated properly if the
+ qualification requires a test. These attributes are XML-encoded.
+ """
+
+ pass
+
+class Assignment(BaseAutoResultElement):
+ """
+ Class to extract an Assignment structure from a response (used in
+ ResultSet)
+
+ Will have attributes named as per the Developer Guide,
+ e.g. AssignmentId, WorkerId, HITId, Answer, etc
+ """
+
+ def __init__(self, connection):
+ BaseAutoResultElement.__init__(self, connection)
+ self.answers = []
+
+ def endElement(self, name, value, connection):
+ # the answer consists of embedded XML, so it needs to be parsed independantly
+ if name == 'Answer':
+ answer_rs = ResultSet([('Answer', QuestionFormAnswer),])
+ h = handler.XmlHandler(answer_rs, connection)
+ value = connection.get_utf8_value(value)
+ xml.sax.parseString(value, h)
+ self.answers.append(answer_rs)
+ else:
+ BaseAutoResultElement.endElement(self, name, value, connection)
+
+class QuestionFormAnswer(BaseAutoResultElement):
+ """
+ Class to extract Answers from inside the embedded XML
+ QuestionFormAnswers element inside the Answer element which is
+ part of the Assignment structure
+
+ A QuestionFormAnswers element contains an Answer element for each
+ question in the HIT or Qualification test for which the Worker
+ provided an answer. Each Answer contains a QuestionIdentifier
+ element whose value corresponds to the QuestionIdentifier of a
+ Question in the QuestionForm. See the QuestionForm data structure
+ for more information about questions and answer specifications.
+
+ If the question expects a free-text answer, the Answer element
+ contains a FreeText element. This element contains the Worker's
+ answer
+
+ *NOTE* - currently really only supports free-text and selection answers
+ """
+
+ def __init__(self, connection):
+ BaseAutoResultElement.__init__(self, connection)
+ self.fields = []
+ self.qid = None
+
+ def endElement(self, name, value, connection):
+ if name == 'QuestionIdentifier':
+ self.qid = value
+ elif name in ['FreeText', 'SelectionIdentifier'] and self.qid:
+ self.fields.append((self.qid,value))
+ elif name == 'Answer':
+ self.qid = None
diff --git a/boto/mturk/notification.py b/boto/mturk/notification.py
new file mode 100644
index 0000000..2aa99ca
--- /dev/null
+++ b/boto/mturk/notification.py
@@ -0,0 +1,94 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Provides NotificationMessage and Event classes, with utility methods, for
+implementations of the Mechanical Turk Notification API.
+"""
+
+import hmac
+try:
+ from hashlib import sha1 as sha
+except ImportError:
+ import sha
+import base64
+import re
+
+class NotificationMessage:
+
+ NOTIFICATION_WSDL = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurk/2006-05-05/AWSMechanicalTurkRequesterNotification.wsdl"
+ NOTIFICATION_VERSION = '2006-05-05'
+
+ SERVICE_NAME = "AWSMechanicalTurkRequesterNotification"
+ OPERATION_NAME = "Notify"
+
+ EVENT_PATTERN = r"Event\.(?P<n>\d+)\.(?P<param>\w+)"
+ EVENT_RE = re.compile(EVENT_PATTERN)
+
+ def __init__(self, d):
+ """
+ Constructor; expects parameter d to be a dict of string parameters from a REST transport notification message
+ """
+ self.signature = d['Signature'] # vH6ZbE0NhkF/hfNyxz2OgmzXYKs=
+ self.timestamp = d['Timestamp'] # 2006-05-23T23:22:30Z
+ self.version = d['Version'] # 2006-05-05
+ assert d['method'] == NotificationMessage.OPERATION_NAME, "Method should be '%s'" % NotificationMessage.OPERATION_NAME
+
+ # Build Events
+ self.events = []
+ events_dict = {}
+ if 'Event' in d:
+ # TurboGears surprised me by 'doing the right thing' and making { 'Event': { '1': { 'EventType': ... } } } etc.
+ events_dict = d['Event']
+ else:
+ for k in d:
+ v = d[k]
+ if k.startswith('Event.'):
+ ed = NotificationMessage.EVENT_RE.search(k).groupdict()
+ n = int(ed['n'])
+ param = str(ed['param'])
+ if n not in events_dict:
+ events_dict[n] = {}
+ events_dict[n][param] = v
+ for n in events_dict:
+ self.events.append(Event(events_dict[n]))
+
+ def verify(self, secret_key):
+ """
+ Verifies the authenticity of a notification message.
+ """
+ verification_input = NotificationMessage.SERVICE_NAME + NotificationMessage.OPERATION_NAME + self.timestamp
+ signature_calc = self._auth_handler.sign_string(verification_input)
+ return self.signature == signature_calc
+
+class Event:
+ def __init__(self, d):
+ self.event_type = d['EventType']
+ self.event_time_str = d['EventTime']
+ self.hit_type = d['HITTypeId']
+ self.hit_id = d['HITId']
+ if 'AssignmentId' in d: # Not present in all event types
+ self.assignment_id = d['AssignmentId']
+
+ #TODO: build self.event_time datetime from string self.event_time_str
+
+ def __repr__(self):
+ return "<boto.mturk.notification.Event: %s for HIT # %s>" % (self.event_type, self.hit_id)
diff --git a/boto/mturk/price.py b/boto/mturk/price.py
new file mode 100644
index 0000000..3c88a96
--- /dev/null
+++ b/boto/mturk/price.py
@@ -0,0 +1,48 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class Price:
+
+ def __init__(self, amount=0.0, currency_code='USD'):
+ self.amount = amount
+ self.currency_code = currency_code
+ self.formatted_price = ''
+
+ def __repr__(self):
+ if self.formatted_price:
+ return self.formatted_price
+ else:
+ return str(self.amount)
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Amount':
+ self.amount = float(value)
+ elif name == 'CurrencyCode':
+ self.currency_code = value
+ elif name == 'FormattedPrice':
+ self.formatted_price = value
+
+ def get_as_params(self, label, ord=1):
+ return {'%s.%d.Amount'%(label, ord) : str(self.amount),
+ '%s.%d.CurrencyCode'%(label, ord) : self.currency_code}
diff --git a/boto/mturk/qualification.py b/boto/mturk/qualification.py
new file mode 100644
index 0000000..6b620ec
--- /dev/null
+++ b/boto/mturk/qualification.py
@@ -0,0 +1,137 @@
+# Copyright (c) 2008 Chris Moyer http://coredumped.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class Qualifications:
+
+ def __init__(self, requirements=None):
+ if requirements == None:
+ requirements = []
+ self.requirements = requirements
+
+ def add(self, req):
+ self.requirements.append(req)
+
+ def get_as_params(self):
+ params = {}
+ assert(len(self.requirements) <= 10)
+ for n, req in enumerate(self.requirements):
+ reqparams = req.get_as_params()
+ for rp in reqparams:
+ params['QualificationRequirement.%s.%s' % ((n+1),rp) ] = reqparams[rp]
+ return params
+
+
+class Requirement(object):
+ """
+ Representation of a single requirement
+ """
+
+ def __init__(self, qualification_type_id, comparator, integer_value=None, required_to_preview=False):
+ self.qualification_type_id = qualification_type_id
+ self.comparator = comparator
+ self.integer_value = integer_value
+ self.required_to_preview = required_to_preview
+
+ def get_as_params(self):
+ params = {
+ "QualificationTypeId": self.qualification_type_id,
+ "Comparator": self.comparator,
+ }
+ if self.comparator != 'Exists' and self.integer_value is not None:
+ params['IntegerValue'] = self.integer_value
+ if self.required_to_preview:
+ params['RequiredToPreview'] = "true"
+ return params
+
+class PercentAssignmentsSubmittedRequirement(Requirement):
+ """
+ The percentage of assignments the Worker has submitted, over all assignments the Worker has accepted. The value is an integer between 0 and 100.
+ """
+
+ def __init__(self, comparator, integer_value, required_to_preview=False):
+ Requirement.__init__(self, qualification_type_id="00000000000000000000", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
+
+class PercentAssignmentsAbandonedRequirement(Requirement):
+ """
+ The percentage of assignments the Worker has abandoned (allowed the deadline to elapse), over all assignments the Worker has accepted. The value is an integer between 0 and 100.
+ """
+
+ def __init__(self, comparator, integer_value, required_to_preview=False):
+ Requirement.__init__(self, qualification_type_id="00000000000000000070", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
+
+class PercentAssignmentsReturnedRequirement(Requirement):
+ """
+ The percentage of assignments the Worker has returned, over all assignments the Worker has accepted. The value is an integer between 0 and 100.
+ """
+
+ def __init__(self, comparator, integer_value, required_to_preview=False):
+ Requirement.__init__(self, qualification_type_id="000000000000000000E0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
+
+class PercentAssignmentsApprovedRequirement(Requirement):
+ """
+ The percentage of assignments the Worker has submitted that were subsequently approved by the Requester, over all assignments the Worker has submitted. The value is an integer between 0 and 100.
+ """
+
+ def __init__(self, comparator, integer_value, required_to_preview=False):
+ Requirement.__init__(self, qualification_type_id="000000000000000000L0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
+
+class PercentAssignmentsRejectedRequirement(Requirement):
+ """
+ The percentage of assignments the Worker has submitted that were subsequently rejected by the Requester, over all assignments the Worker has submitted. The value is an integer between 0 and 100.
+ """
+
+ def __init__(self, comparator, integer_value, required_to_preview=False):
+ Requirement.__init__(self, qualification_type_id="000000000000000000S0", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
+
+class NumberHitsApprovedRequirement(Requirement):
+ """
+ Specifies the total number of HITs submitted by a Worker that have been approved. The value is an integer greater than or equal to 0.
+ """
+
+ def __init__(self, comparator, integer_value, required_to_preview=False):
+ Requirement.__init__(self, qualification_type_id="00000000000000000040", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
+
+class LocaleRequirement(Requirement):
+ """
+ A Qualification requirement based on the Worker's location. The Worker's location is specified by the Worker to Mechanical Turk when the Worker creates his account.
+ """
+
+ def __init__(self, comparator, locale, required_to_preview=False):
+ Requirement.__init__(self, qualification_type_id="00000000000000000071", comparator=comparator, integer_value=None, required_to_preview=required_to_preview)
+ self.locale = locale
+
+ def get_as_params(self):
+ params = {
+ "QualificationTypeId": self.qualification_type_id,
+ "Comparator": self.comparator,
+ 'LocaleValue.Country': self.locale,
+ }
+ if self.required_to_preview:
+ params['RequiredToPreview'] = "true"
+ return params
+
+class AdultRequirement(Requirement):
+ """
+ Requires workers to acknowledge that they are over 18 and that they agree to work on potentially offensive content. The value type is boolean, 1 (required), 0 (not required, the default).
+ """
+
+ def __init__(self, comparator, integer_value, required_to_preview=False):
+ Requirement.__init__(self, qualification_type_id="00000000000000000060", comparator=comparator, integer_value=integer_value, required_to_preview=required_to_preview)
diff --git a/boto/mturk/question.py b/boto/mturk/question.py
new file mode 100644
index 0000000..b1556ad
--- /dev/null
+++ b/boto/mturk/question.py
@@ -0,0 +1,396 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class Question(object):
+ template = "<Question>%(items)s</Question>"
+
+ def __init__(self, identifier, content, answer_spec, is_required=False, display_name=None):
+ # copy all of the parameters into object attributes
+ self.__dict__.update(vars())
+ del self.self
+
+ def get_as_params(self, label='Question'):
+ return { label : self.get_as_xml() }
+
+ def get_as_xml(self):
+ items = [
+ SimpleField('QuestionIdentifier', self.identifier),
+ SimpleField('IsRequired', str(self.is_required).lower()),
+ self.content,
+ self.answer_spec,
+ ]
+ if self.display_name is not None:
+ items.insert(1, SimpleField('DisplayName', self.display_name))
+ items = ''.join(item.get_as_xml() for item in items)
+ return self.template % vars()
+
+try:
+ from lxml import etree
+ class ValidatingXML(object):
+ def validate(self):
+ import urllib2
+ schema_src_file = urllib2.urlopen(self.schema_url)
+ schema_doc = etree.parse(schema_src_file)
+ schema = etree.XMLSchema(schema_doc)
+ doc = etree.fromstring(self.get_as_xml())
+ schema.assertValid(doc)
+except ImportError:
+ class ValidatingXML(object):
+ def validate(self): pass
+
+
+class ExternalQuestion(ValidatingXML):
+ """
+ An object for constructing an External Question.
+ """
+ schema_url = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/ExternalQuestion.xsd"
+ template = '<ExternalQuestion xmlns="%(schema_url)s"><ExternalURL>%%(external_url)s</ExternalURL><FrameHeight>%%(frame_height)s</FrameHeight></ExternalQuestion>' % vars()
+
+ def __init__(self, external_url, frame_height):
+ self.external_url = external_url
+ self.frame_height = frame_height
+
+ def get_as_params(self, label='ExternalQuestion'):
+ return { label : self.get_as_xml() }
+
+ def get_as_xml(self):
+ return self.template % vars(self)
+
+class XMLTemplate:
+ def get_as_xml(self):
+ return self.template % vars(self)
+
+class SimpleField(object, XMLTemplate):
+ """
+ A Simple name/value pair that can be easily rendered as XML.
+
+ >>> SimpleField('Text', 'A text string').get_as_xml()
+ '<Text>A text string</Text>'
+ """
+ template = '<%(field)s>%(value)s</%(field)s>'
+
+ def __init__(self, field, value):
+ self.field = field
+ self.value = value
+
+class Binary(object, XMLTemplate):
+ template = """<Binary><MimeType><Type>%(type)s</Type><SubType>%(subtype)s</SubType></MimeType><DataURL>%(url)s</DataURL><AltText>%(alt_text)s</AltText></Binary>"""
+ def __init__(self, type, subtype, url, alt_text):
+ self.__dict__.update(vars())
+ del self.self
+
+class List(list):
+ """A bulleted list suitable for OrderedContent or Overview content"""
+ def get_as_xml(self):
+ items = ''.join('<ListItem>%s</ListItem>' % item for item in self)
+ return '<List>%s</List>' % items
+
+class Application(object):
+ template = "<Application><%(class_)s>%(content)s</%(class_)s></Application>"
+ parameter_template = "<Name>%(name)s</Name><Value>%(value)s</Value>"
+
+ def __init__(self, width, height, **parameters):
+ self.width = width
+ self.height = height
+ self.parameters = parameters
+
+ def get_inner_content(self, content):
+ content.append_field('Width', self.width)
+ content.append_field('Height', self.height)
+ for name, value in self.parameters.items():
+ value = self.parameter_template % vars()
+ content.append_field('ApplicationParameter', value)
+
+ def get_as_xml(self):
+ content = OrderedContent()
+ self.get_inner_content(content)
+ content = content.get_as_xml()
+ class_ = self.__class__.__name__
+ return self.template % vars()
+
+class JavaApplet(Application):
+ def __init__(self, path, filename, *args, **kwargs):
+ self.path = path
+ self.filename = filename
+ super(JavaApplet, self).__init__(*args, **kwargs)
+
+ def get_inner_content(self, content):
+ content = OrderedContent()
+ content.append_field('AppletPath', self.path)
+ content.append_field('AppletFilename', self.filename)
+ super(JavaApplet, self).get_inner_content(content)
+
+class Flash(Application):
+ def __init__(self, url, *args, **kwargs):
+ self.url = url
+ super(Flash, self).__init__(*args, **kwargs)
+
+ def get_inner_content(self, content):
+ content = OrderedContent()
+ content.append_field('FlashMovieURL', self.url)
+ super(Flash, self).get_inner_content(content)
+
+class FormattedContent(object, XMLTemplate):
+ schema_url = 'http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2006-07-14/FormattedContentXHTMLSubset.xsd'
+ template = '<FormattedContent><![CDATA[%(content)s]]></FormattedContent>'
+ def __init__(self, content):
+ self.content = content
+
+class OrderedContent(list):
+
+ def append_field(self, field, value):
+ self.append(SimpleField(field, value))
+
+ def get_as_xml(self):
+ return ''.join(item.get_as_xml() for item in self)
+
+class Overview(OrderedContent):
+ template = '<Overview>%(content)s</Overview>'
+
+ def get_as_params(self, label='Overview'):
+ return { label : self.get_as_xml() }
+
+ def get_as_xml(self):
+ content = super(Overview, self).get_as_xml()
+ return self.template % vars()
+
+class QuestionForm(ValidatingXML, list):
+ """
+ From the AMT API docs:
+
+ The top-most element of the QuestionForm data structure is a QuestionForm element. This
+ element contains optional Overview elements and one or more Question elements. There can be
+ any number of these two element types listed in any order. The following example structure has an
+ Overview element and a Question element followed by a second Overview element and Question
+ element--all within the same QuestionForm.
+
+ <QuestionForm xmlns="[the QuestionForm schema URL]">
+ <Overview>
+ [...]
+ </Overview>
+ <Question>
+ [...]
+ </Question>
+ <Overview>
+ [...]
+ </Overview>
+ <Question>
+ [...]
+ </Question>
+ [...]
+ </QuestionForm>
+
+ QuestionForm is implemented as a list, so to construct a
+ QuestionForm, simply append Questions and Overviews (with at least
+ one Question).
+ """
+ schema_url = "http://mechanicalturk.amazonaws.com/AWSMechanicalTurkDataSchemas/2005-10-01/QuestionForm.xsd"
+ xml_template = """<QuestionForm xmlns="%(schema_url)s">%%(items)s</QuestionForm>""" % vars()
+
+ def is_valid(self):
+ return (
+ any(isinstance(item, Question) for item in self)
+ and
+ all(isinstance(item, (Question, Overview)) for item in self)
+ )
+
+ def get_as_xml(self):
+ assert self.is_valid(), "QuestionForm contains invalid elements"
+ items = ''.join(item.get_as_xml() for item in self)
+ return self.xml_template % vars()
+
+class QuestionContent(OrderedContent):
+ template = '<QuestionContent>%(content)s</QuestionContent>'
+
+ def get_as_xml(self):
+ content = super(QuestionContent, self).get_as_xml()
+ return self.template % vars()
+
+class AnswerSpecification(object):
+ template = '<AnswerSpecification>%(spec)s</AnswerSpecification>'
+
+ def __init__(self, spec):
+ self.spec = spec
+
+ def get_as_xml(self):
+ spec = self.spec.get_as_xml()
+ return self.template % vars()
+
+class Constraints(OrderedContent):
+ template = '<Constraints>%(content)s</Constraints>'
+
+ def get_as_xml(self):
+ content = super(Constraints, self).get_as_xml()
+ return self.template % vars()
+
+class Constraint(object):
+ def get_attributes(self):
+ pairs = zip(self.attribute_names, self.attribute_values)
+ attrs = ' '.join(
+ '%s="%d"' % (name,value)
+ for (name,value) in pairs
+ if value is not None
+ )
+ return attrs
+
+ def get_as_xml(self):
+ attrs = self.get_attributes()
+ return self.template % vars()
+
+class NumericConstraint(Constraint):
+ attribute_names = 'minValue', 'maxValue'
+ template = '<IsNumeric %(attrs)s />'
+
+ def __init__(self, min_value=None, max_value=None):
+ self.attribute_values = min_value, max_value
+
+class LengthConstraint(Constraint):
+ attribute_names = 'minLength', 'maxLength'
+ template = '<Length %(attrs)s />'
+
+ def __init__(self, min_length=None, max_length=None):
+ self.attribute_values = min_length, max_length
+
+class RegExConstraint(Constraint):
+ attribute_names = 'regex', 'errorText', 'flags'
+ template = '<AnswerFormatRegex %(attrs)s />'
+
+ def __init__(self, pattern, error_text=None, flags=None):
+ self.attribute_values = pattern, error_text, flags
+
+class NumberOfLinesSuggestion(object):
+ template = '<NumberOfLinesSuggestion>%(num_lines)s</NumberOfLinesSuggestion>'
+
+ def __init__(self, num_lines=1):
+ self.num_lines = num_lines
+
+ def get_as_xml(self):
+ num_lines = self.num_lines
+ return self.template % vars()
+
+class FreeTextAnswer(object):
+ template = '<FreeTextAnswer>%(items)s</FreeTextAnswer>'
+
+ def __init__(self, default=None, constraints=None, num_lines=None):
+ self.default = default
+ if constraints is None: constraints = Constraints()
+ self.constraints = Constraints(constraints)
+ self.num_lines = num_lines
+
+ def get_as_xml(self):
+ constraints = Constraints()
+ items = [constraints]
+ if self.default:
+ items.append(SimpleField('DefaultText', self.default))
+ if self.num_lines:
+ items.append(NumberOfLinesSuggestion(self.num_lines))
+ items = ''.join(item.get_as_xml() for item in items)
+ return self.template % vars()
+
+class FileUploadAnswer(object):
+ template = """<FileUploadAnswer><MinFileSizeInBytes>%(min_bytes)d</MinFileSizeInBytes><MaxFileSizeInBytes>%(max_bytes)d</MaxFileSizeInBytes></FileUploadAnswer>"""
+
+ def __init__(self, min_bytes, max_bytes):
+ assert 0 <= min_bytes <= max_bytes <= 2*10**9
+ self.min_bytes = min_bytes
+ self.max_bytes = max_bytes
+
+ def get_as_xml(self):
+ return self.template % vars(self)
+
+class SelectionAnswer(object):
+ """
+ A class to generate SelectionAnswer XML data structures.
+ Does not yet implement Binary selection options.
+ """
+ SELECTIONANSWER_XML_TEMPLATE = """<SelectionAnswer>%s%s<Selections>%s</Selections></SelectionAnswer>""" # % (count_xml, style_xml, selections_xml)
+ SELECTION_XML_TEMPLATE = """<Selection><SelectionIdentifier>%s</SelectionIdentifier>%s</Selection>""" # (identifier, value_xml)
+ SELECTION_VALUE_XML_TEMPLATE = """<%s>%s</%s>""" # (type, value, type)
+ STYLE_XML_TEMPLATE = """<StyleSuggestion>%s</StyleSuggestion>""" # (style)
+ MIN_SELECTION_COUNT_XML_TEMPLATE = """<MinSelectionCount>%s</MinSelectionCount>""" # count
+ MAX_SELECTION_COUNT_XML_TEMPLATE = """<MaxSelectionCount>%s</MaxSelectionCount>""" # count
+ ACCEPTED_STYLES = ['radiobutton', 'dropdown', 'checkbox', 'list', 'combobox', 'multichooser']
+ OTHER_SELECTION_ELEMENT_NAME = 'OtherSelection'
+
+ def __init__(self, min=1, max=1, style=None, selections=None, type='text', other=False):
+
+ if style is not None:
+ if style in SelectionAnswer.ACCEPTED_STYLES:
+ self.style_suggestion = style
+ else:
+ raise ValueError("style '%s' not recognized; should be one of %s" % (style, ', '.join(SelectionAnswer.ACCEPTED_STYLES)))
+ else:
+ self.style_suggestion = None
+
+ if selections is None:
+ raise ValueError("SelectionAnswer.__init__(): selections must be a non-empty list of (content, identifier) tuples")
+ else:
+ self.selections = selections
+
+ self.min_selections = min
+ self.max_selections = max
+
+ assert len(selections) >= self.min_selections, "# of selections is less than minimum of %d" % self.min_selections
+ #assert len(selections) <= self.max_selections, "# of selections exceeds maximum of %d" % self.max_selections
+
+ self.type = type
+
+ self.other = other
+
+ def get_as_xml(self):
+ if self.type == 'text':
+ TYPE_TAG = "Text"
+ elif self.type == 'binary':
+ TYPE_TAG = "Binary"
+ else:
+ raise ValueError("illegal type: %s; must be either 'text' or 'binary'" % str(self.type))
+
+ # build list of <Selection> elements
+ selections_xml = ""
+ for tpl in self.selections:
+ value_xml = SelectionAnswer.SELECTION_VALUE_XML_TEMPLATE % (TYPE_TAG, tpl[0], TYPE_TAG)
+ selection_xml = SelectionAnswer.SELECTION_XML_TEMPLATE % (tpl[1], value_xml)
+ selections_xml += selection_xml
+
+ if self.other:
+ # add OtherSelection element as xml if available
+ if hasattr(self.other, 'get_as_xml'):
+ assert type(self.other) == FreeTextAnswer, 'OtherSelection can only be a FreeTextAnswer'
+ selections_xml += self.other.get_as_xml().replace('FreeTextAnswer', 'OtherSelection')
+ else:
+ selections_xml += "<OtherSelection />"
+
+ if self.style_suggestion is not None:
+ style_xml = SelectionAnswer.STYLE_XML_TEMPLATE % self.style_suggestion
+ else:
+ style_xml = ""
+
+ if self.style_suggestion != 'radiobutton':
+ count_xml = SelectionAnswer.MIN_SELECTION_COUNT_XML_TEMPLATE %self.min_selections
+ count_xml += SelectionAnswer.MAX_SELECTION_COUNT_XML_TEMPLATE %self.max_selections
+ else:
+ count_xml = ""
+
+ ret = SelectionAnswer.SELECTIONANSWER_XML_TEMPLATE % (count_xml, style_xml, selections_xml)
+
+ # return XML
+ return ret
+
diff --git a/boto/mturk/test/.gitignore b/boto/mturk/test/.gitignore
new file mode 100644
index 0000000..8917c2c
--- /dev/null
+++ b/boto/mturk/test/.gitignore
@@ -0,0 +1 @@
+local.py
diff --git a/boto/mturk/test/__init__.py b/boto/mturk/test/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/boto/mturk/test/__init__.py
diff --git a/boto/mturk/test/_init_environment.py b/boto/mturk/test/_init_environment.py
new file mode 100644
index 0000000..e709785
--- /dev/null
+++ b/boto/mturk/test/_init_environment.py
@@ -0,0 +1,24 @@
+import os
+import functools
+
+live_connection = False
+mturk_host = 'mechanicalturk.sandbox.amazonaws.com'
+external_url = 'http://www.example.com/'
+
+try:
+ local = os.path.join(os.path.dirname(__file__), 'local.py')
+ execfile(local)
+except:
+ pass
+
+if live_connection:
+ #TODO: you must set the auth credentials to something valid
+ from boto.mturk.connection import MTurkConnection
+else:
+ # Here the credentials must be set, but it doesn't matter what
+ # they're set to.
+ os.environ.setdefault('AWS_ACCESS_KEY_ID', 'foo')
+ os.environ.setdefault('AWS_SECRET_ACCESS_KEY', 'bar')
+ from mocks import MTurkConnection
+
+SetHostMTurkConnection = functools.partial(MTurkConnection, host=mturk_host)
diff --git a/boto/mturk/test/all_tests.py b/boto/mturk/test/all_tests.py
new file mode 100644
index 0000000..f17cf85
--- /dev/null
+++ b/boto/mturk/test/all_tests.py
@@ -0,0 +1,24 @@
+
+import unittest
+import doctest
+from glob import glob
+
+from create_hit_test import *
+from create_hit_with_qualifications import *
+from create_hit_external import *
+from create_hit_with_qualifications import *
+from hit_persistence import *
+
+doctest_suite = doctest.DocFileSuite(
+ *glob('*.doctest'),
+ optionflags=doctest.REPORT_ONLY_FIRST_FAILURE
+ )
+
+class Program(unittest.TestProgram):
+ def runTests(self, *args, **kwargs):
+ self.test = unittest.TestSuite([self.test, doctest_suite])
+ super(Program, self).runTests(*args, **kwargs)
+
+if __name__ == '__main__':
+ Program()
+
diff --git a/boto/mturk/test/cleanup_tests.py b/boto/mturk/test/cleanup_tests.py
new file mode 100644
index 0000000..2381dd9
--- /dev/null
+++ b/boto/mturk/test/cleanup_tests.py
@@ -0,0 +1,45 @@
+import itertools
+
+from _init_environment import SetHostMTurkConnection
+
+def description_filter(substring):
+ return lambda hit: substring in hit.Title
+
+def disable_hit(hit):
+ return conn.disable_hit(hit.HITId)
+
+def dispose_hit(hit):
+ # assignments must be first approved or rejected
+ for assignment in conn.get_assignments(hit.HITId):
+ if assignment.AssignmentStatus == 'Submitted':
+ conn.approve_assignment(assignment.AssignmentId)
+ return conn.dispose_hit(hit.HITId)
+
+def cleanup():
+ """Remove any boto test related HIT's"""
+
+ global conn
+
+ conn = SetHostMTurkConnection()
+
+
+ is_boto = description_filter('Boto')
+ print 'getting hits...'
+ all_hits = list(conn.get_all_hits())
+ is_reviewable = lambda hit: hit.HITStatus == 'Reviewable'
+ is_not_reviewable = lambda hit: not is_reviewable(hit)
+ hits_to_process = filter(is_boto, all_hits)
+ hits_to_disable = filter(is_not_reviewable, hits_to_process)
+ hits_to_dispose = filter(is_reviewable, hits_to_process)
+ print 'disabling/disposing %d/%d hits' % (len(hits_to_disable), len(hits_to_dispose))
+ map(disable_hit, hits_to_disable)
+ map(dispose_hit, hits_to_dispose)
+
+ total_hits = len(all_hits)
+ hits_processed = len(hits_to_process)
+ skipped = total_hits - hits_processed
+ fmt = 'Processed: %(total_hits)d HITs, disabled/disposed: %(hits_processed)d, skipped: %(skipped)d'
+ print fmt % vars()
+
+if __name__ == '__main__':
+ cleanup()
diff --git a/boto/mturk/test/common.py b/boto/mturk/test/common.py
new file mode 100644
index 0000000..40e2726
--- /dev/null
+++ b/boto/mturk/test/common.py
@@ -0,0 +1,44 @@
+import unittest
+import uuid
+import datetime
+
+from boto.mturk.question import (
+ Question, QuestionContent, AnswerSpecification, FreeTextAnswer,
+)
+from _init_environment import SetHostMTurkConnection
+
+class MTurkCommon(unittest.TestCase):
+ def setUp(self):
+ self.conn = SetHostMTurkConnection()
+
+ @staticmethod
+ def get_question():
+ # create content for a question
+ qn_content = QuestionContent()
+ qn_content.append_field('Title', 'Boto no hit type question content')
+ qn_content.append_field('Text', 'What is a boto no hit type?')
+
+ # create the question specification
+ qn = Question(identifier=str(uuid.uuid4()),
+ content=qn_content,
+ answer_spec=AnswerSpecification(FreeTextAnswer()))
+ return qn
+
+ @staticmethod
+ def get_hit_params():
+ return dict(
+ lifetime=datetime.timedelta(minutes=65),
+ max_assignments=2,
+ title='Boto create_hit title',
+ description='Boto create_hit description',
+ keywords=['boto', 'test'],
+ reward=0.23,
+ duration=datetime.timedelta(minutes=6),
+ approval_delay=60*60,
+ annotation='An annotation from boto create_hit test',
+ response_groups=['Minimal',
+ 'HITDetail',
+ 'HITQuestion',
+ 'HITAssignmentSummary',],
+ )
+
diff --git a/boto/mturk/test/create_free_text_question_regex.doctest b/boto/mturk/test/create_free_text_question_regex.doctest
new file mode 100644
index 0000000..0b9d2a9
--- /dev/null
+++ b/boto/mturk/test/create_free_text_question_regex.doctest
@@ -0,0 +1,100 @@
+>>> import uuid
+>>> import datetime
+>>> from _init_environment import MTurkConnection, mturk_host
+>>> from boto.mturk.question import Question, QuestionContent, AnswerSpecification, FreeTextAnswer, RegExConstraint
+
+>>> conn = MTurkConnection(host=mturk_host)
+
+# create content for a question
+>>> qn_content = QuestionContent()
+>>> qn_content.append_field('Title', 'Boto no hit type question content')
+>>> qn_content.append_field('Text', 'What is a boto no hit type?')
+
+# create a free text answer that is not quite so free!
+>>> constraints = [
+... RegExConstraint(
+... "^[12][0-9]{3}-[01]?\d-[0-3]?\d$",
+... error_text="You must enter a date with the format yyyy-mm-dd.",
+... flags='i',
+... )]
+>>> ft_answer = FreeTextAnswer(constraints=constraints,
+... default="This is not a valid format")
+
+# create the question specification
+>>> qn = Question(identifier=str(uuid.uuid4()),
+... content=qn_content,
+... answer_spec=AnswerSpecification(ft_answer))
+
+# now, create the actual HIT for the question without using a HIT type
+# NOTE - the response_groups are specified to get back additional information for testing
+>>> keywords=['boto', 'test', 'doctest']
+>>> create_hit_rs = conn.create_hit(question=qn,
+... lifetime=60*65,
+... max_assignments=2,
+... title='Boto create_hit title',
+... description='Boto create_hit description',
+... keywords=keywords,
+... reward=0.23,
+... duration=60*6,
+... approval_delay=60*60,
+... annotation='An annotation from boto create_hit test',
+... response_groups=['Minimal',
+... 'HITDetail',
+... 'HITQuestion',
+... 'HITAssignmentSummary',])
+
+# this is a valid request
+>>> create_hit_rs.status
+True
+
+# for the requested hit type id
+# the HIT Type Id is a unicode string
+>>> len(create_hit_rs)
+1
+>>> hit = create_hit_rs[0]
+>>> hit_type_id = hit.HITTypeId
+>>> hit_type_id # doctest: +ELLIPSIS
+u'...'
+
+>>> hit.MaxAssignments
+u'2'
+
+>>> hit.AutoApprovalDelayInSeconds
+u'3600'
+
+# expiration should be very close to now + the lifetime in seconds
+>>> expected_datetime = datetime.datetime.utcnow() + datetime.timedelta(seconds=3900)
+>>> expiration_datetime = datetime.datetime.strptime(hit.Expiration, '%Y-%m-%dT%H:%M:%SZ')
+>>> delta = expected_datetime - expiration_datetime
+>>> abs(delta).seconds < 5
+True
+
+# duration is as specified for the HIT type
+>>> hit.AssignmentDurationInSeconds
+u'360'
+
+# the reward has been set correctly (allow for float error here)
+>>> int(float(hit.Amount) * 100)
+23
+
+>>> hit.FormattedPrice
+u'$0.23'
+
+# only US currency supported at present
+>>> hit.CurrencyCode
+u'USD'
+
+# title is the HIT type title
+>>> hit.Title
+u'Boto create_hit title'
+
+# title is the HIT type description
+>>> hit.Description
+u'Boto create_hit description'
+
+# annotation is correct
+>>> hit.RequesterAnnotation
+u'An annotation from boto create_hit test'
+
+>>> hit.HITReviewStatus
+u'NotReviewed'
diff --git a/boto/mturk/test/create_hit.doctest b/boto/mturk/test/create_hit.doctest
new file mode 100644
index 0000000..a97cbf8
--- /dev/null
+++ b/boto/mturk/test/create_hit.doctest
@@ -0,0 +1,92 @@
+>>> import uuid
+>>> import datetime
+>>> from _init_environment import MTurkConnection, mturk_host
+>>> from boto.mturk.question import Question, QuestionContent, AnswerSpecification, FreeTextAnswer
+
+>>> conn = MTurkConnection(host=mturk_host)
+
+# create content for a question
+>>> qn_content = QuestionContent()
+>>> qn_content.append_field('Title', 'Boto no hit type question content')
+>>> qn_content.append_field('Text', 'What is a boto no hit type?')
+
+# create the question specification
+>>> qn = Question(identifier=str(uuid.uuid4()),
+... content=qn_content,
+... answer_spec=AnswerSpecification(FreeTextAnswer()))
+
+# now, create the actual HIT for the question without using a HIT type
+# NOTE - the response_groups are specified to get back additional information for testing
+>>> keywords=['boto', 'test', 'doctest']
+>>> lifetime = datetime.timedelta(minutes=65)
+>>> create_hit_rs = conn.create_hit(question=qn,
+... lifetime=lifetime,
+... max_assignments=2,
+... title='Boto create_hit title',
+... description='Boto create_hit description',
+... keywords=keywords,
+... reward=0.23,
+... duration=60*6,
+... approval_delay=60*60,
+... annotation='An annotation from boto create_hit test',
+... response_groups=['Minimal',
+... 'HITDetail',
+... 'HITQuestion',
+... 'HITAssignmentSummary',])
+
+# this is a valid request
+>>> create_hit_rs.status
+True
+
+>>> len(create_hit_rs)
+1
+>>> hit = create_hit_rs[0]
+
+# for the requested hit type id
+# the HIT Type Id is a unicode string
+>>> hit_type_id = hit.HITTypeId
+>>> hit_type_id # doctest: +ELLIPSIS
+u'...'
+
+>>> hit.MaxAssignments
+u'2'
+
+>>> hit.AutoApprovalDelayInSeconds
+u'3600'
+
+# expiration should be very close to now + the lifetime
+>>> expected_datetime = datetime.datetime.utcnow() + lifetime
+>>> expiration_datetime = datetime.datetime.strptime(hit.Expiration, '%Y-%m-%dT%H:%M:%SZ')
+>>> delta = expected_datetime - expiration_datetime
+>>> abs(delta).seconds < 5
+True
+
+# duration is as specified for the HIT type
+>>> hit.AssignmentDurationInSeconds
+u'360'
+
+# the reward has been set correctly (allow for float error here)
+>>> int(float(hit.Amount) * 100)
+23
+
+>>> hit.FormattedPrice
+u'$0.23'
+
+# only US currency supported at present
+>>> hit.CurrencyCode
+u'USD'
+
+# title is the HIT type title
+>>> hit.Title
+u'Boto create_hit title'
+
+# title is the HIT type description
+>>> hit.Description
+u'Boto create_hit description'
+
+# annotation is correct
+>>> hit.RequesterAnnotation
+u'An annotation from boto create_hit test'
+
+>>> hit.HITReviewStatus
+u'NotReviewed'
diff --git a/boto/mturk/test/create_hit_binary.doctest b/boto/mturk/test/create_hit_binary.doctest
new file mode 100644
index 0000000..3f0434e
--- /dev/null
+++ b/boto/mturk/test/create_hit_binary.doctest
@@ -0,0 +1,94 @@
+>>> import uuid
+>>> import datetime
+>>> from _init_environment import MTurkConnection, mturk_host
+>>> from boto.mturk.question import Question, QuestionContent, AnswerSpecification, FreeTextAnswer, Binary
+
+>>> conn = MTurkConnection(host=mturk_host)
+
+# create content for a question
+>>> qn_content = QuestionContent()
+>>> qn_content.append_field('Title','Boto no hit type question content')
+>>> qn_content.append_field('Text', 'What is a boto binary hit type?')
+>>> binary_content = Binary('image', 'jpeg', 'http://www.example.com/test1.jpg', alt_text='image is missing')
+>>> qn_content.append(binary_content)
+
+# create the question specification
+>>> qn = Question(identifier=str(uuid.uuid4()),
+... content=qn_content,
+... answer_spec=AnswerSpecification(FreeTextAnswer()))
+
+# now, create the actual HIT for the question without using a HIT type
+# NOTE - the response_groups are specified to get back additional information for testing
+>>> keywords=['boto', 'test', 'doctest']
+>>> lifetime = datetime.timedelta(minutes=65)
+>>> create_hit_rs = conn.create_hit(question=qn,
+... lifetime=lifetime,
+... max_assignments=2,
+... title='Boto create_hit title',
+... description='Boto create_hit description',
+... keywords=keywords,
+... reward=0.23,
+... duration=60*6,
+... approval_delay=60*60,
+... annotation='An annotation from boto create_hit test',
+... response_groups=['Minimal',
+... 'HITDetail',
+... 'HITQuestion',
+... 'HITAssignmentSummary',])
+
+# this is a valid request
+>>> create_hit_rs.status
+True
+
+>>> len(create_hit_rs)
+1
+>>> hit = create_hit_rs[0]
+
+# for the requested hit type id
+# the HIT Type Id is a unicode string
+>>> hit_type_id = hit.HITTypeId
+>>> hit_type_id # doctest: +ELLIPSIS
+u'...'
+
+>>> hit.MaxAssignments
+u'2'
+
+>>> hit.AutoApprovalDelayInSeconds
+u'3600'
+
+# expiration should be very close to now + the lifetime
+>>> expected_datetime = datetime.datetime.utcnow() + lifetime
+>>> expiration_datetime = datetime.datetime.strptime(hit.Expiration, '%Y-%m-%dT%H:%M:%SZ')
+>>> delta = expected_datetime - expiration_datetime
+>>> abs(delta).seconds < 5
+True
+
+# duration is as specified for the HIT type
+>>> hit.AssignmentDurationInSeconds
+u'360'
+
+# the reward has been set correctly (allow for float error here)
+>>> int(float(hit.Amount) * 100)
+23
+
+>>> hit.FormattedPrice
+u'$0.23'
+
+# only US currency supported at present
+>>> hit.CurrencyCode
+u'USD'
+
+# title is the HIT type title
+>>> hit.Title
+u'Boto create_hit title'
+
+# title is the HIT type description
+>>> hit.Description
+u'Boto create_hit description'
+
+# annotation is correct
+>>> hit.RequesterAnnotation
+u'An annotation from boto create_hit test'
+
+>>> hit.HITReviewStatus
+u'NotReviewed'
diff --git a/boto/mturk/test/create_hit_external.py b/boto/mturk/test/create_hit_external.py
new file mode 100644
index 0000000..9e955a6
--- /dev/null
+++ b/boto/mturk/test/create_hit_external.py
@@ -0,0 +1,17 @@
+import unittest
+import uuid
+import datetime
+from boto.mturk.question import ExternalQuestion
+
+from _init_environment import SetHostMTurkConnection, external_url
+
+class Test(unittest.TestCase):
+ def test_create_hit_external(self):
+ q = ExternalQuestion(external_url=external_url, frame_height=800)
+ conn = SetHostMTurkConnection()
+ keywords=['boto', 'test', 'doctest']
+ create_hit_rs = conn.create_hit(question=q, lifetime=60*65,max_assignments=2,title="Boto External Question Test", keywords=keywords,reward = 0.05, duration=60*6,approval_delay=60*60, annotation='An annotation from boto external question test', response_groups=['Minimal','HITDetail','HITQuestion','HITAssignmentSummary',])
+ assert(create_hit_rs.status == True)
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/boto/mturk/test/create_hit_from_hit_type.doctest b/boto/mturk/test/create_hit_from_hit_type.doctest
new file mode 100644
index 0000000..1b6d0f0
--- /dev/null
+++ b/boto/mturk/test/create_hit_from_hit_type.doctest
@@ -0,0 +1,103 @@
+>>> import uuid
+>>> import datetime
+>>> from _init_environment import MTurkConnection, mturk_host
+>>> from boto.mturk.question import Question, QuestionContent, AnswerSpecification, FreeTextAnswer
+>>>
+>>> conn = MTurkConnection(host=mturk_host)
+>>> keywords=['boto', 'test', 'doctest']
+>>> hit_type_rs = conn.register_hit_type('Boto Test HIT type',
+... 'HIT Type for testing Boto',
+... 0.12,
+... 60*6,
+... keywords=keywords,
+... approval_delay=60*60)
+
+# this was a valid request
+>>> hit_type_rs.status
+True
+
+# the HIT Type Id is a unicode string
+>>> hit_type_id = hit_type_rs.HITTypeId
+>>> hit_type_id # doctest: +ELLIPSIS
+u'...'
+
+# create content for a question
+>>> qn_content = QuestionContent()
+>>> qn_content.append_field('Title', 'Boto question content create_hit_from_hit_type')
+>>> qn_content.append_field('Text', 'What is a boto create_hit_from_hit_type?')
+
+# create the question specification
+>>> qn = Question(identifier=str(uuid.uuid4()),
+... content=qn_content,
+... answer_spec=AnswerSpecification(FreeTextAnswer()))
+
+# now, create the actual HIT for the question using the HIT type
+# NOTE - the response_groups are specified to get back additional information for testing
+>>> create_hit_rs = conn.create_hit(hit_type=hit_type_rs.HITTypeId,
+... question=qn,
+... lifetime=60*65,
+... max_assignments=2,
+... annotation='An annotation from boto create_hit_from_hit_type test',
+... response_groups=['Minimal',
+... 'HITDetail',
+... 'HITQuestion',
+... 'HITAssignmentSummary',])
+
+# this is a valid request
+>>> create_hit_rs.status
+True
+
+>>> len(create_hit_rs)
+1
+
+>>> hit = create_hit_rs[0]
+
+# for the requested hit type id
+>>> hit.HITTypeId == hit_type_id
+True
+
+# with the correct number of maximum assignments
+>>> hit.MaxAssignments
+u'2'
+
+# and the approval delay
+>>> hit.AutoApprovalDelayInSeconds
+u'3600'
+
+# expiration should be very close to now + the lifetime in seconds
+>>> expected_datetime = datetime.datetime.utcnow() + datetime.timedelta(seconds=3900)
+>>> expiration_datetime = datetime.datetime.strptime(hit.Expiration, '%Y-%m-%dT%H:%M:%SZ')
+>>> delta = expected_datetime - expiration_datetime
+>>> abs(delta).seconds < 5
+True
+
+# duration is as specified for the HIT type
+>>> hit.AssignmentDurationInSeconds
+u'360'
+
+# the reward has been set correctly
+>>> float(hit.Amount) == 0.12
+True
+
+>>> hit.FormattedPrice
+u'$0.12'
+
+# only US currency supported at present
+>>> hit.CurrencyCode
+u'USD'
+
+# title is the HIT type title
+>>> hit.Title
+u'Boto Test HIT type'
+
+# title is the HIT type description
+>>> hit.Description
+u'HIT Type for testing Boto'
+
+# annotation is correct
+>>> hit.RequesterAnnotation
+u'An annotation from boto create_hit_from_hit_type test'
+
+# not reviewed yet
+>>> hit.HITReviewStatus
+u'NotReviewed'
diff --git a/boto/mturk/test/create_hit_test.py b/boto/mturk/test/create_hit_test.py
new file mode 100644
index 0000000..ea134b4
--- /dev/null
+++ b/boto/mturk/test/create_hit_test.py
@@ -0,0 +1,21 @@
+import unittest
+import os
+from boto.mturk.question import QuestionForm
+
+from common import MTurkCommon
+
+class TestHITCreation(MTurkCommon):
+ def testCallCreateHitWithOneQuestion(self):
+ create_hit_rs = self.conn.create_hit(
+ question=self.get_question(),
+ **self.get_hit_params()
+ )
+
+ def testCallCreateHitWithQuestionForm(self):
+ create_hit_rs = self.conn.create_hit(
+ questions=QuestionForm([self.get_question()]),
+ **self.get_hit_params()
+ )
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/boto/mturk/test/create_hit_with_qualifications.py b/boto/mturk/test/create_hit_with_qualifications.py
new file mode 100644
index 0000000..9ef2bc5
--- /dev/null
+++ b/boto/mturk/test/create_hit_with_qualifications.py
@@ -0,0 +1,16 @@
+from boto.mturk.connection import MTurkConnection
+from boto.mturk.question import ExternalQuestion
+from boto.mturk.qualification import Qualifications, PercentAssignmentsApprovedRequirement
+
+def test():
+ q = ExternalQuestion(external_url="http://websort.net/s/F3481C", frame_height=800)
+ conn = MTurkConnection(host='mechanicalturk.sandbox.amazonaws.com')
+ keywords=['boto', 'test', 'doctest']
+ qualifications = Qualifications()
+ qualifications.add(PercentAssignmentsApprovedRequirement(comparator="GreaterThan", integer_value="95"))
+ create_hit_rs = conn.create_hit(question=q, lifetime=60*65,max_assignments=2,title="Boto External Question Test", keywords=keywords,reward = 0.05, duration=60*6,approval_delay=60*60, annotation='An annotation from boto external question test', qualifications=qualifications)
+ assert(create_hit_rs.status == True)
+ print create_hit_rs.HITTypeId
+
+if __name__ == "__main__":
+ test()
diff --git a/boto/mturk/test/hit_persistence.py b/boto/mturk/test/hit_persistence.py
new file mode 100644
index 0000000..04ebd0c
--- /dev/null
+++ b/boto/mturk/test/hit_persistence.py
@@ -0,0 +1,27 @@
+import unittest
+import pickle
+
+from common import MTurkCommon
+
+class TestHITPersistence(MTurkCommon):
+ def create_hit_result(self):
+ return self.conn.create_hit(
+ question=self.get_question(), **self.get_hit_params()
+ )
+
+ def test_pickle_hit_result(self):
+ result = self.create_hit_result()
+ new_result = pickle.loads(pickle.dumps(result))
+
+ def test_pickle_deserialized_version(self):
+ """
+ It seems the technique used to store and reload the object must
+ result in an equivalent object, or subsequent pickles may fail.
+ This tests a double-pickle to elicit that error.
+ """
+ result = self.create_hit_result()
+ new_result = pickle.loads(pickle.dumps(result))
+ pickle.dumps(new_result)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/boto/mturk/test/mocks.py b/boto/mturk/test/mocks.py
new file mode 100644
index 0000000..0b2c52c
--- /dev/null
+++ b/boto/mturk/test/mocks.py
@@ -0,0 +1,11 @@
+from boto.mturk.connection import MTurkConnection as RealMTurkConnection
+
+class MTurkConnection(RealMTurkConnection):
+ """
+ Mock MTurkConnection that doesn't connect, but instead just prepares
+ the request and captures information about its usage.
+ """
+
+ def _process_request(self, *args, **kwargs):
+ saved_args = self.__dict__.setdefault('_mock_saved_args', dict())
+ saved_args['_process_request'] = (args, kwargs)
diff --git a/boto/mturk/test/reviewable_hits.doctest b/boto/mturk/test/reviewable_hits.doctest
new file mode 100644
index 0000000..113a056
--- /dev/null
+++ b/boto/mturk/test/reviewable_hits.doctest
@@ -0,0 +1,129 @@
+>>> import uuid
+>>> import datetime
+>>> from _init_environment import MTurkConnection, mturk_host
+>>> from boto.mturk.question import Question, QuestionContent, AnswerSpecification, FreeTextAnswer
+
+>>> conn = MTurkConnection(host=mturk_host)
+
+# create content for a question
+>>> qn_content = QuestionContent()
+>>> qn_content.append_field('Title', 'Boto no hit type question content')
+>>> qn_content.append_field('Text', 'What is a boto no hit type?')
+
+# create the question specification
+>>> qn = Question(identifier=str(uuid.uuid4()),
+... content=qn_content,
+... answer_spec=AnswerSpecification(FreeTextAnswer()))
+
+# now, create the actual HIT for the question without using a HIT type
+# NOTE - the response_groups are specified to get back additional information for testing
+>>> keywords=['boto', 'test', 'doctest']
+>>> create_hit_rs = conn.create_hit(question=qn,
+... lifetime=60*65,
+... max_assignments=1,
+... title='Boto Hit to be Reviewed',
+... description='Boto reviewable_hits description',
+... keywords=keywords,
+... reward=0.23,
+... duration=60*6,
+... approval_delay=60*60,
+... annotation='An annotation from boto create_hit test',
+... response_groups=['Minimal',
+... 'HITDetail',
+... 'HITQuestion',
+... 'HITAssignmentSummary',])
+
+# this is a valid request
+>>> create_hit_rs.status
+True
+
+>>> len(create_hit_rs)
+1
+>>> hit = create_hit_rs[0]
+
+# for the requested hit type id
+# the HIT Type Id is a unicode string
+>>> hit_type_id = hit.HITTypeId
+>>> hit_type_id # doctest: +ELLIPSIS
+u'...'
+
+>>> from selenium_support import complete_hit, has_selenium
+>>> if has_selenium(): complete_hit(hit_type_id, response='reviewable_hits_test')
+>>> import time
+
+Give mechanical turk some time to process the hit
+>>> if has_selenium(): time.sleep(10)
+
+# should have some reviewable HIT's returned, especially if returning all HIT type's
+# NOTE: but only if your account has existing HIT's in the reviewable state
+>>> reviewable_rs = conn.get_reviewable_hits()
+
+# this is a valid request
+>>> reviewable_rs.status
+True
+
+>>> len(reviewable_rs) >= 1
+True
+
+# should contain at least one HIT object
+>>> reviewable_rs # doctest: +ELLIPSIS
+[<boto.mturk.connection.HIT instance at ...]
+
+>>> hit_id = reviewable_rs[0].HITId
+
+# check that we can retrieve the assignments for a HIT
+>>> assignments_rs = conn.get_assignments(hit_id)
+
+# this is a valid request
+>>> assignments_rs.status
+True
+
+>>> int(assignments_rs.NumResults) >= 1
+True
+
+>>> len(assignments_rs) == int(assignments_rs.NumResults)
+True
+
+>>> assignments_rs.PageNumber
+u'1'
+
+>>> assignments_rs.TotalNumResults >= 1
+True
+
+# should contain at least one Assignment object
+>>> assignments_rs # doctest: +ELLIPSIS
+[<boto.mturk.connection.Assignment instance at ...]
+
+# should have returned assignments for the requested HIT id
+>>> assignment = assignments_rs[0]
+
+>>> assignment.HITId == hit_id
+True
+
+# should have a valid status
+>>> assignment.AssignmentStatus in ['Submitted', 'Approved', 'Rejected']
+True
+
+# should have returned at least one answer
+>>> len(assignment.answers) > 0
+True
+
+# should contain at least one set of QuestionFormAnswer objects
+>>> assignment.answers # doctest: +ELLIPSIS
+[[<boto.mturk.connection.QuestionFormAnswer instance at ...]]
+
+>>> answer = assignment.answers[0][0]
+
+# the answer should have exactly one field
+>>> len(answer.fields)
+1
+
+>>> qid, text = answer.fields[0]
+
+>>> text # doctest: +ELLIPSIS
+u'...'
+
+# question identifier should be a unicode string
+>>> qid # doctest: +ELLIPSIS
+u'...'
+
diff --git a/boto/mturk/test/run-doctest.py b/boto/mturk/test/run-doctest.py
new file mode 100644
index 0000000..251b7e0
--- /dev/null
+++ b/boto/mturk/test/run-doctest.py
@@ -0,0 +1,15 @@
+from __future__ import print_function
+
+import argparse
+import doctest
+
+parser = argparse.ArgumentParser(
+ description="Run a test by name"
+ )
+parser.add_argument('test_name')
+args = parser.parse_args()
+
+doctest.testfile(
+ args.test_name,
+ optionflags=doctest.REPORT_ONLY_FIRST_FAILURE
+ )
\ No newline at end of file
diff --git a/boto/mturk/test/search_hits.doctest b/boto/mturk/test/search_hits.doctest
new file mode 100644
index 0000000..a79bab7
--- /dev/null
+++ b/boto/mturk/test/search_hits.doctest
@@ -0,0 +1,16 @@
+>>> from _init_environment import MTurkConnection, mturk_host
+>>> conn = MTurkConnection(host=mturk_host)
+
+# should have some HIT's returned by a search (but only if your account has existing HIT's)
+>>> search_rs = conn.search_hits()
+
+# this is a valid request
+>>> search_rs.status
+True
+
+>>> len(search_rs) > 1
+True
+
+>>> search_rs # doctest: +ELLIPSIS
+[<boto.mturk.connection.HIT instance at ...]
+
diff --git a/boto/mturk/test/selenium_support.py b/boto/mturk/test/selenium_support.py
new file mode 100644
index 0000000..f1552cb
--- /dev/null
+++ b/boto/mturk/test/selenium_support.py
@@ -0,0 +1,61 @@
+from __future__ import absolute_import
+from boto.mturk.test.support import unittest
+
+sel_args = ('localhost', 4444, '*chrome', 'https://workersandbox.mturk.com')
+
+class SeleniumFailed(object):
+ def __init__(self, message):
+ self.message = message
+ def __nonzero__(self):
+ return False
+
+def has_selenium():
+ try:
+ from selenium import selenium
+ globals().update(selenium=selenium)
+ sel = selenium(*sel_args)
+ # a little trick to see if the server is responding
+ try:
+ sel.do_command('shutdown', '')
+ except Exception, e:
+ if not 'Server Exception' in str(e):
+ raise
+ result = True
+ except ImportError:
+ result = SeleniumFailed('selenium RC not installed')
+ except Exception:
+ msg = 'Error occurred initializing selenium: %s' % e
+ result = SeleniumFailed(msg)
+
+ # overwrite has_selenium, so the same result is returned every time
+ globals().update(has_selenium=lambda: result)
+ return result
+
+identity = lambda x: x
+
+def skip_unless_has_selenium():
+ res = has_selenium()
+ if not res:
+ return unittest.skip(res.message)
+ return identity
+
+def complete_hit(hit_type_id, response='Some Response'):
+ verificationErrors = []
+ sel = selenium(*sel_args)
+ sel.start()
+ sel.open("/mturk/welcome")
+ sel.click("lnkWorkerSignin")
+ sel.wait_for_page_to_load("30000")
+ sel.type("email", "boto.tester@example.com")
+ sel.type("password", "BotoTest")
+ sel.click("Continue")
+ sel.wait_for_page_to_load("30000")
+ sel.open("/mturk/preview?groupId={hit_type_id}".format(**vars()))
+ sel.click("/accept")
+ sel.wait_for_page_to_load("30000")
+ sel.type("Answer_1_FreeText", response)
+ sel.click("//div[5]/table/tbody/tr[2]/td[1]/input")
+ sel.wait_for_page_to_load("30000")
+ sel.click("link=Sign Out")
+ sel.wait_for_page_to_load("30000")
+ sel.stop()
diff --git a/boto/mturk/test/support.py b/boto/mturk/test/support.py
new file mode 100644
index 0000000..16b86e6
--- /dev/null
+++ b/boto/mturk/test/support.py
@@ -0,0 +1,8 @@
+
+import sys
+
+# use unittest2 under Python 2.6 and earlier.
+if sys.version_info >= (2,7):
+ import unittest
+else:
+ import unittest2 as unittest
diff --git a/boto/mturk/test/test_disable_hit.py b/boto/mturk/test/test_disable_hit.py
new file mode 100644
index 0000000..0913443
--- /dev/null
+++ b/boto/mturk/test/test_disable_hit.py
@@ -0,0 +1,11 @@
+from boto.mturk.test.support import unittest
+
+from common import MTurkCommon
+from boto.mturk.connection import MTurkRequestError
+
+class TestDisableHITs(MTurkCommon):
+ def test_disable_invalid_hit(self):
+ self.assertRaises(MTurkRequestError, self.conn.disable_hit, 'foo')
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/boto/plugin.py b/boto/plugin.py
new file mode 100644
index 0000000..f8b592c
--- /dev/null
+++ b/boto/plugin.py
@@ -0,0 +1,90 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+"""
+Implements plugin related api.
+
+To define a new plugin just subclass Plugin, like this.
+
+class AuthPlugin(Plugin):
+ pass
+
+Then start creating subclasses of your new plugin.
+
+class MyFancyAuth(AuthPlugin):
+ capability = ['sign', 'vmac']
+
+The actual interface is duck typed.
+
+"""
+
+import glob
+import imp, os.path
+
+class Plugin(object):
+ """Base class for all plugins."""
+
+ capability = []
+
+ @classmethod
+ def is_capable(cls, requested_capability):
+ """Returns true if the requested capability is supported by this plugin
+ """
+ for c in requested_capability:
+ if not c in cls.capability:
+ return False
+ return True
+
+def get_plugin(cls, requested_capability=None):
+ if not requested_capability:
+ requested_capability = []
+ result = []
+ for handler in cls.__subclasses__():
+ if handler.is_capable(requested_capability):
+ result.append(handler)
+ return result
+
+def _import_module(filename):
+ (path, name) = os.path.split(filename)
+ (name, ext) = os.path.splitext(name)
+
+ (file, filename, data) = imp.find_module(name, [path])
+ try:
+ return imp.load_module(name, file, filename, data)
+ finally:
+ if file:
+ file.close()
+
+_plugin_loaded = False
+
+def load_plugins(config):
+ global _plugin_loaded
+ if _plugin_loaded:
+ return
+ _plugin_loaded = True
+
+ if not config.has_option('Plugin', 'plugin_directory'):
+ return
+ directory = config.get('Plugin', 'plugin_directory')
+ for file in glob.glob(os.path.join(directory, '*.py')):
+ _import_module(file)
+
diff --git a/boto/provider.py b/boto/provider.py
new file mode 100644
index 0000000..c1c8b59
--- /dev/null
+++ b/boto/provider.py
@@ -0,0 +1,208 @@
+# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
+# Copyright 2010 Google Inc.
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+"""
+This class encapsulates the provider-specific header differences.
+"""
+
+import os
+import boto
+from boto import config
+from boto.gs.acl import ACL
+from boto.gs.acl import CannedACLStrings as CannedGSACLStrings
+from boto.s3.acl import CannedACLStrings as CannedS3ACLStrings
+from boto.s3.acl import Policy
+
+HEADER_PREFIX_KEY = 'header_prefix'
+METADATA_PREFIX_KEY = 'metadata_prefix'
+
+AWS_HEADER_PREFIX = 'x-amz-'
+GOOG_HEADER_PREFIX = 'x-goog-'
+
+ACL_HEADER_KEY = 'acl-header'
+AUTH_HEADER_KEY = 'auth-header'
+COPY_SOURCE_HEADER_KEY = 'copy-source-header'
+COPY_SOURCE_VERSION_ID_HEADER_KEY = 'copy-source-version-id-header'
+DELETE_MARKER_HEADER_KEY = 'delete-marker-header'
+DATE_HEADER_KEY = 'date-header'
+METADATA_DIRECTIVE_HEADER_KEY = 'metadata-directive-header'
+RESUMABLE_UPLOAD_HEADER_KEY = 'resumable-upload-header'
+SECURITY_TOKEN_HEADER_KEY = 'security-token-header'
+STORAGE_CLASS_HEADER_KEY = 'storage-class'
+MFA_HEADER_KEY = 'mfa-header'
+VERSION_ID_HEADER_KEY = 'version-id-header'
+
+STORAGE_COPY_ERROR = 'StorageCopyError'
+STORAGE_CREATE_ERROR = 'StorageCreateError'
+STORAGE_DATA_ERROR = 'StorageDataError'
+STORAGE_PERMISSIONS_ERROR = 'StoragePermissionsError'
+STORAGE_RESPONSE_ERROR = 'StorageResponseError'
+
+
+class Provider(object):
+
+ CredentialMap = {
+ 'aws' : ('aws_access_key_id', 'aws_secret_access_key'),
+ 'google' : ('gs_access_key_id', 'gs_secret_access_key'),
+ }
+
+ AclClassMap = {
+ 'aws' : Policy,
+ 'google' : ACL
+ }
+
+ CannedAclsMap = {
+ 'aws' : CannedS3ACLStrings,
+ 'google' : CannedGSACLStrings
+ }
+
+ HostKeyMap = {
+ 'aws' : 's3',
+ 'google' : 'gs'
+ }
+
+ HeaderInfoMap = {
+ 'aws' : {
+ HEADER_PREFIX_KEY : AWS_HEADER_PREFIX,
+ METADATA_PREFIX_KEY : AWS_HEADER_PREFIX + 'meta-',
+ ACL_HEADER_KEY : AWS_HEADER_PREFIX + 'acl',
+ AUTH_HEADER_KEY : 'AWS',
+ COPY_SOURCE_HEADER_KEY : AWS_HEADER_PREFIX + 'copy-source',
+ COPY_SOURCE_VERSION_ID_HEADER_KEY : AWS_HEADER_PREFIX +
+ 'copy-source-version-id',
+ DATE_HEADER_KEY : AWS_HEADER_PREFIX + 'date',
+ DELETE_MARKER_HEADER_KEY : AWS_HEADER_PREFIX + 'delete-marker',
+ METADATA_DIRECTIVE_HEADER_KEY : AWS_HEADER_PREFIX +
+ 'metadata-directive',
+ RESUMABLE_UPLOAD_HEADER_KEY : None,
+ SECURITY_TOKEN_HEADER_KEY : AWS_HEADER_PREFIX + 'security-token',
+ VERSION_ID_HEADER_KEY : AWS_HEADER_PREFIX + 'version-id',
+ STORAGE_CLASS_HEADER_KEY : AWS_HEADER_PREFIX + 'storage-class',
+ MFA_HEADER_KEY : AWS_HEADER_PREFIX + 'mfa',
+ },
+ 'google' : {
+ HEADER_PREFIX_KEY : GOOG_HEADER_PREFIX,
+ METADATA_PREFIX_KEY : GOOG_HEADER_PREFIX + 'meta-',
+ ACL_HEADER_KEY : GOOG_HEADER_PREFIX + 'acl',
+ AUTH_HEADER_KEY : 'GOOG1',
+ COPY_SOURCE_HEADER_KEY : GOOG_HEADER_PREFIX + 'copy-source',
+ COPY_SOURCE_VERSION_ID_HEADER_KEY : GOOG_HEADER_PREFIX +
+ 'copy-source-version-id',
+ DATE_HEADER_KEY : GOOG_HEADER_PREFIX + 'date',
+ DELETE_MARKER_HEADER_KEY : GOOG_HEADER_PREFIX + 'delete-marker',
+ METADATA_DIRECTIVE_HEADER_KEY : GOOG_HEADER_PREFIX +
+ 'metadata-directive',
+ RESUMABLE_UPLOAD_HEADER_KEY : GOOG_HEADER_PREFIX + 'resumable',
+ SECURITY_TOKEN_HEADER_KEY : GOOG_HEADER_PREFIX + 'security-token',
+ VERSION_ID_HEADER_KEY : GOOG_HEADER_PREFIX + 'version-id',
+ STORAGE_CLASS_HEADER_KEY : None,
+ MFA_HEADER_KEY : None,
+ }
+ }
+
+ ErrorMap = {
+ 'aws' : {
+ STORAGE_COPY_ERROR : boto.exception.S3CopyError,
+ STORAGE_CREATE_ERROR : boto.exception.S3CreateError,
+ STORAGE_DATA_ERROR : boto.exception.S3DataError,
+ STORAGE_PERMISSIONS_ERROR : boto.exception.S3PermissionsError,
+ STORAGE_RESPONSE_ERROR : boto.exception.S3ResponseError,
+ },
+ 'google' : {
+ STORAGE_COPY_ERROR : boto.exception.GSCopyError,
+ STORAGE_CREATE_ERROR : boto.exception.GSCreateError,
+ STORAGE_DATA_ERROR : boto.exception.GSDataError,
+ STORAGE_PERMISSIONS_ERROR : boto.exception.GSPermissionsError,
+ STORAGE_RESPONSE_ERROR : boto.exception.GSResponseError,
+ }
+ }
+
+ def __init__(self, name, access_key=None, secret_key=None):
+ self.host = None
+ self.access_key = access_key
+ self.secret_key = secret_key
+ self.name = name
+ self.acl_class = self.AclClassMap[self.name]
+ self.canned_acls = self.CannedAclsMap[self.name]
+ self.get_credentials(access_key, secret_key)
+ self.configure_headers()
+ self.configure_errors()
+ # allow config file to override default host
+ host_opt_name = '%s_host' % self.HostKeyMap[self.name]
+ if config.has_option('Credentials', host_opt_name):
+ self.host = config.get('Credentials', host_opt_name)
+
+ def get_credentials(self, access_key=None, secret_key=None):
+ access_key_name, secret_key_name = self.CredentialMap[self.name]
+ if access_key is not None:
+ self.access_key = access_key
+ elif os.environ.has_key(access_key_name.upper()):
+ self.access_key = os.environ[access_key_name.upper()]
+ elif config.has_option('Credentials', access_key_name):
+ self.access_key = config.get('Credentials', access_key_name)
+
+ if secret_key is not None:
+ self.secret_key = secret_key
+ elif os.environ.has_key(secret_key_name.upper()):
+ self.secret_key = os.environ[secret_key_name.upper()]
+ elif config.has_option('Credentials', secret_key_name):
+ self.secret_key = config.get('Credentials', secret_key_name)
+ if isinstance(self.secret_key, unicode):
+ # the secret key must be bytes and not unicode to work
+ # properly with hmac.new (see http://bugs.python.org/issue5285)
+ self.secret_key = str(self.secret_key)
+
+ def configure_headers(self):
+ header_info_map = self.HeaderInfoMap[self.name]
+ self.metadata_prefix = header_info_map[METADATA_PREFIX_KEY]
+ self.header_prefix = header_info_map[HEADER_PREFIX_KEY]
+ self.acl_header = header_info_map[ACL_HEADER_KEY]
+ self.auth_header = header_info_map[AUTH_HEADER_KEY]
+ self.copy_source_header = header_info_map[COPY_SOURCE_HEADER_KEY]
+ self.copy_source_version_id = header_info_map[
+ COPY_SOURCE_VERSION_ID_HEADER_KEY]
+ self.date_header = header_info_map[DATE_HEADER_KEY]
+ self.delete_marker = header_info_map[DELETE_MARKER_HEADER_KEY]
+ self.metadata_directive_header = (
+ header_info_map[METADATA_DIRECTIVE_HEADER_KEY])
+ self.security_token_header = header_info_map[SECURITY_TOKEN_HEADER_KEY]
+ self.resumable_upload_header = (
+ header_info_map[RESUMABLE_UPLOAD_HEADER_KEY])
+ self.storage_class_header = header_info_map[STORAGE_CLASS_HEADER_KEY]
+ self.version_id = header_info_map[VERSION_ID_HEADER_KEY]
+ self.mfa_header = header_info_map[MFA_HEADER_KEY]
+
+ def configure_errors(self):
+ error_map = self.ErrorMap[self.name]
+ self.storage_copy_error = error_map[STORAGE_COPY_ERROR]
+ self.storage_create_error = error_map[STORAGE_CREATE_ERROR]
+ self.storage_data_error = error_map[STORAGE_DATA_ERROR]
+ self.storage_permissions_error = error_map[STORAGE_PERMISSIONS_ERROR]
+ self.storage_response_error = error_map[STORAGE_RESPONSE_ERROR]
+
+ def get_provider_name(self):
+ return self.HostKeyMap[self.name]
+
+# Static utility method for getting default Provider.
+def get_default():
+ return Provider('aws')
diff --git a/boto/pyami/__init__.py b/boto/pyami/__init__.py
new file mode 100644
index 0000000..303dbb6
--- /dev/null
+++ b/boto/pyami/__init__.py
@@ -0,0 +1,22 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
diff --git a/boto/pyami/bootstrap.py b/boto/pyami/bootstrap.py
new file mode 100644
index 0000000..c1441fd
--- /dev/null
+++ b/boto/pyami/bootstrap.py
@@ -0,0 +1,125 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import os
+import boto
+from boto.utils import get_instance_metadata, get_instance_userdata
+from boto.pyami.config import Config, BotoConfigPath
+from boto.pyami.scriptbase import ScriptBase
+
+class Bootstrap(ScriptBase):
+ """
+ The Bootstrap class is instantiated and run as part of the PyAMI
+ instance initialization process. The methods in this class will
+ be run from the rc.local script of the instance and will be run
+ as the root user.
+
+ The main purpose of this class is to make sure the boto distribution
+ on the instance is the one required.
+ """
+
+ def __init__(self):
+ self.working_dir = '/mnt/pyami'
+ self.write_metadata()
+ ScriptBase.__init__(self)
+
+ def write_metadata(self):
+ fp = open(os.path.expanduser(BotoConfigPath), 'w')
+ fp.write('[Instance]\n')
+ inst_data = get_instance_metadata()
+ for key in inst_data:
+ fp.write('%s = %s\n' % (key, inst_data[key]))
+ user_data = get_instance_userdata()
+ fp.write('\n%s\n' % user_data)
+ fp.write('[Pyami]\n')
+ fp.write('working_dir = %s\n' % self.working_dir)
+ fp.close()
+ # This file has the AWS credentials, should we lock it down?
+ # os.chmod(BotoConfigPath, stat.S_IREAD | stat.S_IWRITE)
+ # now that we have written the file, read it into a pyami Config object
+ boto.config = Config()
+ boto.init_logging()
+
+ def create_working_dir(self):
+ boto.log.info('Working directory: %s' % self.working_dir)
+ if not os.path.exists(self.working_dir):
+ os.mkdir(self.working_dir)
+
+ def load_boto(self):
+ update = boto.config.get('Boto', 'boto_update', 'svn:HEAD')
+ if update.startswith('svn'):
+ if update.find(':') >= 0:
+ method, version = update.split(':')
+ version = '-r%s' % version
+ else:
+ version = '-rHEAD'
+ location = boto.config.get('Boto', 'boto_location', '/usr/local/boto')
+ self.run('svn update %s %s' % (version, location))
+ elif update.startswith('git'):
+ location = boto.config.get('Boto', 'boto_location', '/usr/share/python-support/python-boto/boto')
+ self.run('git pull', cwd=location)
+ if update.find(':') >= 0:
+ method, version = update.split(':')
+ else:
+ version = 'master'
+ self.run('git checkout %s' % version, cwd=location)
+ else:
+ # first remove the symlink needed when running from subversion
+ self.run('rm /usr/local/lib/python2.5/site-packages/boto')
+ self.run('easy_install %s' % update)
+
+ def fetch_s3_file(self, s3_file):
+ try:
+ from boto.utils import fetch_file
+ f = fetch_file(s3_file)
+ path = os.path.join(self.working_dir, s3_file.split("/")[-1])
+ open(path, "w").write(f.read())
+ except:
+ boto.log.exception('Problem Retrieving file: %s' % s3_file)
+ path = None
+ return path
+
+ def load_packages(self):
+ package_str = boto.config.get('Pyami', 'packages')
+ if package_str:
+ packages = package_str.split(',')
+ for package in packages:
+ package = package.strip()
+ if package.startswith('s3:'):
+ package = self.fetch_s3_file(package)
+ if package:
+ # if the "package" is really a .py file, it doesn't have to
+ # be installed, just being in the working dir is enough
+ if not package.endswith('.py'):
+ self.run('easy_install -Z %s' % package, exit_on_error=False)
+
+ def main(self):
+ self.create_working_dir()
+ self.load_boto()
+ self.load_packages()
+ self.notify('Bootstrap Completed for %s' % boto.config.get_instance('instance-id'))
+
+if __name__ == "__main__":
+ # because bootstrap starts before any logging configuration can be loaded from
+ # the boto config files, we will manually enable logging to /var/log/boto.log
+ boto.set_file_logger('bootstrap', '/var/log/boto.log')
+ bs = Bootstrap()
+ bs.main()
diff --git a/boto/pyami/config.py b/boto/pyami/config.py
new file mode 100644
index 0000000..f4613ab
--- /dev/null
+++ b/boto/pyami/config.py
@@ -0,0 +1,203 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import StringIO, os, re
+import ConfigParser
+import boto
+
+BotoConfigPath = '/etc/boto.cfg'
+BotoConfigLocations = [BotoConfigPath]
+if 'BOTO_CONFIG' in os.environ:
+ BotoConfigLocations = [os.path.expanduser(os.environ['BOTO_CONFIG'])]
+elif 'HOME' in os.environ:
+ UserConfigPath = os.path.expanduser('~/.boto')
+ BotoConfigLocations.append(UserConfigPath)
+else:
+ UserConfigPath = None
+
+class Config(ConfigParser.SafeConfigParser):
+
+ def __init__(self, path=None, fp=None, do_load=True):
+ ConfigParser.SafeConfigParser.__init__(self, {'working_dir' : '/mnt/pyami',
+ 'debug' : '0'})
+ if do_load:
+ if path:
+ self.load_from_path(path)
+ elif fp:
+ self.readfp(fp)
+ else:
+ self.read(BotoConfigLocations)
+ if "AWS_CREDENTIAL_FILE" in os.environ:
+ self.load_credential_file(os.path.expanduser(os.environ['AWS_CREDENTIAL_FILE']))
+
+ def load_credential_file(self, path):
+ """Load a credential file as is setup like the Java utilities"""
+ c_data = StringIO.StringIO()
+ c_data.write("[Credentials]\n")
+ for line in open(path, "r").readlines():
+ c_data.write(line.replace("AWSAccessKeyId", "aws_access_key_id").replace("AWSSecretKey", "aws_secret_access_key"))
+ c_data.seek(0)
+ self.readfp(c_data)
+
+ def load_from_path(self, path):
+ file = open(path)
+ for line in file.readlines():
+ match = re.match("^#import[\s\t]*([^\s^\t]*)[\s\t]*$", line)
+ if match:
+ extended_file = match.group(1)
+ (dir, file) = os.path.split(path)
+ self.load_from_path(os.path.join(dir, extended_file))
+ self.read(path)
+
+ def save_option(self, path, section, option, value):
+ """
+ Write the specified Section.Option to the config file specified by path.
+ Replace any previous value. If the path doesn't exist, create it.
+ Also add the option the the in-memory config.
+ """
+ config = ConfigParser.SafeConfigParser()
+ config.read(path)
+ if not config.has_section(section):
+ config.add_section(section)
+ config.set(section, option, value)
+ fp = open(path, 'w')
+ config.write(fp)
+ fp.close()
+ if not self.has_section(section):
+ self.add_section(section)
+ self.set(section, option, value)
+
+ def save_user_option(self, section, option, value):
+ self.save_option(UserConfigPath, section, option, value)
+
+ def save_system_option(self, section, option, value):
+ self.save_option(BotoConfigPath, section, option, value)
+
+ def get_instance(self, name, default=None):
+ try:
+ val = self.get('Instance', name)
+ except:
+ val = default
+ return val
+
+ def get_user(self, name, default=None):
+ try:
+ val = self.get('User', name)
+ except:
+ val = default
+ return val
+
+ def getint_user(self, name, default=0):
+ try:
+ val = self.getint('User', name)
+ except:
+ val = default
+ return val
+
+ def get_value(self, section, name, default=None):
+ return self.get(section, name, default)
+
+ def get(self, section, name, default=None):
+ try:
+ val = ConfigParser.SafeConfigParser.get(self, section, name)
+ except:
+ val = default
+ return val
+
+ def getint(self, section, name, default=0):
+ try:
+ val = ConfigParser.SafeConfigParser.getint(self, section, name)
+ except:
+ val = int(default)
+ return val
+
+ def getfloat(self, section, name, default=0.0):
+ try:
+ val = ConfigParser.SafeConfigParser.getfloat(self, section, name)
+ except:
+ val = float(default)
+ return val
+
+ def getbool(self, section, name, default=False):
+ if self.has_option(section, name):
+ val = self.get(section, name)
+ if val.lower() == 'true':
+ val = True
+ else:
+ val = False
+ else:
+ val = default
+ return val
+
+ def setbool(self, section, name, value):
+ if value:
+ self.set(section, name, 'true')
+ else:
+ self.set(section, name, 'false')
+
+ def dump(self):
+ s = StringIO.StringIO()
+ self.write(s)
+ print s.getvalue()
+
+ def dump_safe(self, fp=None):
+ if not fp:
+ fp = StringIO.StringIO()
+ for section in self.sections():
+ fp.write('[%s]\n' % section)
+ for option in self.options(section):
+ if option == 'aws_secret_access_key':
+ fp.write('%s = xxxxxxxxxxxxxxxxxx\n' % option)
+ else:
+ fp.write('%s = %s\n' % (option, self.get(section, option)))
+
+ def dump_to_sdb(self, domain_name, item_name):
+ import simplejson
+ sdb = boto.connect_sdb()
+ domain = sdb.lookup(domain_name)
+ if not domain:
+ domain = sdb.create_domain(domain_name)
+ item = domain.new_item(item_name)
+ item.active = False
+ for section in self.sections():
+ d = {}
+ for option in self.options(section):
+ d[option] = self.get(section, option)
+ item[section] = simplejson.dumps(d)
+ item.save()
+
+ def load_from_sdb(self, domain_name, item_name):
+ import simplejson
+ sdb = boto.connect_sdb()
+ domain = sdb.lookup(domain_name)
+ item = domain.get_item(item_name)
+ for section in item.keys():
+ if not self.has_section(section):
+ self.add_section(section)
+ d = simplejson.loads(item[section])
+ for attr_name in d.keys():
+ attr_value = d[attr_name]
+ if attr_value == None:
+ attr_value = 'None'
+ if isinstance(attr_value, bool):
+ self.setbool(section, attr_name, attr_value)
+ else:
+ self.set(section, attr_name, attr_value)
diff --git a/boto/pyami/copybot.cfg b/boto/pyami/copybot.cfg
new file mode 100644
index 0000000..cbfdc5a
--- /dev/null
+++ b/boto/pyami/copybot.cfg
@@ -0,0 +1,60 @@
+#
+# Your AWS Credentials
+#
+[Credentials]
+aws_access_key_id = <AWS Access Key Here>
+aws_secret_access_key = <AWS Secret Key Here>
+
+#
+# If you want to use a separate set of credentials when writing
+# to the destination bucket, put them here
+#dest_aws_access_key_id = <AWS Access Key Here>
+#dest_aws_secret_access_key = <AWS Secret Key Here>
+
+#
+# Fill out this section if you want emails from CopyBot
+# when it starts and stops
+#
+[Notification]
+#smtp_host = <your smtp host>
+#smtp_user = <your smtp username, if necessary>
+#smtp_pass = <your smtp password, if necessary>
+#smtp_from = <email address for From: field>
+#smtp_to = <email address for To: field>
+
+#
+# If you leave this section as is, it will automatically
+# update boto from subversion upon start up.
+# If you don't want that to happen, comment this out
+#
+[Boto]
+boto_location = /usr/local/boto
+boto_update = svn:HEAD
+
+#
+# This tells the Pyami code in boto what scripts
+# to run during startup
+#
+[Pyami]
+scripts = boto.pyami.copybot.CopyBot
+
+#
+# Source bucket and Destination Bucket, obviously.
+# If the Destination bucket does not exist, it will
+# attempt to create it.
+# If exit_on_completion is false, the instance
+# will keep running after the copy operation is
+# complete which might be handy for debugging.
+# If copy_acls is false, the ACL's will not be
+# copied with the objects to the new bucket.
+# If replace_dst is false, copybot will not
+# will only store the source file in the dest if
+# that file does not already exist. If it's true
+# it will replace it even if it does exist.
+#
+[CopyBot]
+src_bucket = <your source bucket name>
+dst_bucket = <your destination bucket name>
+exit_on_completion = true
+copy_acls = true
+replace_dst = true
diff --git a/boto/pyami/copybot.py b/boto/pyami/copybot.py
new file mode 100644
index 0000000..ed397cb
--- /dev/null
+++ b/boto/pyami/copybot.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import boto
+from boto.pyami.scriptbase import ScriptBase
+import os, StringIO
+
+class CopyBot(ScriptBase):
+
+ def __init__(self):
+ ScriptBase.__init__(self)
+ self.wdir = boto.config.get('Pyami', 'working_dir')
+ self.log_file = '%s.log' % self.instance_id
+ self.log_path = os.path.join(self.wdir, self.log_file)
+ boto.set_file_logger(self.name, self.log_path)
+ self.src_name = boto.config.get(self.name, 'src_bucket')
+ self.dst_name = boto.config.get(self.name, 'dst_bucket')
+ self.replace = boto.config.getbool(self.name, 'replace_dst', True)
+ s3 = boto.connect_s3()
+ self.src = s3.lookup(self.src_name)
+ if not self.src:
+ boto.log.error('Source bucket does not exist: %s' % self.src_name)
+ dest_access_key = boto.config.get(self.name, 'dest_aws_access_key_id', None)
+ if dest_access_key:
+ dest_secret_key = boto.config.get(self.name, 'dest_aws_secret_access_key', None)
+ s3 = boto.connect(dest_access_key, dest_secret_key)
+ self.dst = s3.lookup(self.dst_name)
+ if not self.dst:
+ self.dst = s3.create_bucket(self.dst_name)
+
+ def copy_bucket_acl(self):
+ if boto.config.get(self.name, 'copy_acls', True):
+ acl = self.src.get_xml_acl()
+ self.dst.set_xml_acl(acl)
+
+ def copy_key_acl(self, src, dst):
+ if boto.config.get(self.name, 'copy_acls', True):
+ acl = src.get_xml_acl()
+ dst.set_xml_acl(acl)
+
+ def copy_keys(self):
+ boto.log.info('src=%s' % self.src.name)
+ boto.log.info('dst=%s' % self.dst.name)
+ try:
+ for key in self.src:
+ if not self.replace:
+ exists = self.dst.lookup(key.name)
+ if exists:
+ boto.log.info('key=%s already exists in %s, skipping' % (key.name, self.dst.name))
+ continue
+ boto.log.info('copying %d bytes from key=%s' % (key.size, key.name))
+ prefix, base = os.path.split(key.name)
+ path = os.path.join(self.wdir, base)
+ key.get_contents_to_filename(path)
+ new_key = self.dst.new_key(key.name)
+ new_key.set_contents_from_filename(path)
+ self.copy_key_acl(key, new_key)
+ os.unlink(path)
+ except:
+ boto.log.exception('Error copying key: %s' % key.name)
+
+ def copy_log(self):
+ key = self.dst.new_key(self.log_file)
+ key.set_contents_from_filename(self.log_path)
+
+ def main(self):
+ fp = StringIO.StringIO()
+ boto.config.dump_safe(fp)
+ self.notify('%s (%s) Starting' % (self.name, self.instance_id), fp.getvalue())
+ if self.src and self.dst:
+ self.copy_keys()
+ if self.dst:
+ self.copy_log()
+ self.notify('%s (%s) Stopping' % (self.name, self.instance_id),
+ 'Copy Operation Complete')
+ if boto.config.getbool(self.name, 'exit_on_completion', True):
+ ec2 = boto.connect_ec2()
+ ec2.terminate_instances([self.instance_id])
+
diff --git a/boto/pyami/helloworld.py b/boto/pyami/helloworld.py
new file mode 100644
index 0000000..680873c
--- /dev/null
+++ b/boto/pyami/helloworld.py
@@ -0,0 +1,28 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.pyami.scriptbase import ScriptBase
+
+class HelloWorld(ScriptBase):
+
+ def main(self):
+ self.log('Hello World!!!')
+
diff --git a/boto/pyami/installers/__init__.py b/boto/pyami/installers/__init__.py
new file mode 100644
index 0000000..cc68926
--- /dev/null
+++ b/boto/pyami/installers/__init__.py
@@ -0,0 +1,64 @@
+# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.pyami.scriptbase import ScriptBase
+
+
+class Installer(ScriptBase):
+ """
+ Abstract base class for installers
+ """
+
+ def add_cron(self, name, minute, hour, mday, month, wday, who, command, env=None):
+ """
+ Add an entry to the system crontab.
+ """
+ raise NotImplementedError
+
+ def add_init_script(self, file):
+ """
+ Add this file to the init.d directory
+ """
+
+ def add_env(self, key, value):
+ """
+ Add an environemnt variable
+ """
+ raise NotImplementedError
+
+ def stop(self, service_name):
+ """
+ Stop a service.
+ """
+ raise NotImplementedError
+
+ def start(self, service_name):
+ """
+ Start a service.
+ """
+ raise NotImplementedError
+
+ def install(self):
+ """
+ Do whatever is necessary to "install" the package.
+ """
+ raise NotImplementedError
+
diff --git a/boto/pyami/installers/ubuntu/__init__.py b/boto/pyami/installers/ubuntu/__init__.py
new file mode 100644
index 0000000..60ee658
--- /dev/null
+++ b/boto/pyami/installers/ubuntu/__init__.py
@@ -0,0 +1,22 @@
+# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
diff --git a/boto/pyami/installers/ubuntu/apache.py b/boto/pyami/installers/ubuntu/apache.py
new file mode 100644
index 0000000..febc2df
--- /dev/null
+++ b/boto/pyami/installers/ubuntu/apache.py
@@ -0,0 +1,43 @@
+# Copyright (c) 2008 Chris Moyer http://coredumped.org
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.pyami.installers.ubuntu.installer import Installer
+
+class Apache(Installer):
+ """
+ Install apache2, mod_python, and libapache2-svn
+ """
+
+ def install(self):
+ self.run("apt-get update")
+ self.run('apt-get -y install apache2', notify=True, exit_on_error=True)
+ self.run('apt-get -y install libapache2-mod-python', notify=True, exit_on_error=True)
+ self.run('a2enmod rewrite', notify=True, exit_on_error=True)
+ self.run('a2enmod ssl', notify=True, exit_on_error=True)
+ self.run('a2enmod proxy', notify=True, exit_on_error=True)
+ self.run('a2enmod proxy_ajp', notify=True, exit_on_error=True)
+
+ # Hard reboot the apache2 server to enable these module
+ self.stop("apache2")
+ self.start("apache2")
+
+ def main(self):
+ self.install()
diff --git a/boto/pyami/installers/ubuntu/ebs.py b/boto/pyami/installers/ubuntu/ebs.py
new file mode 100644
index 0000000..204c9b1
--- /dev/null
+++ b/boto/pyami/installers/ubuntu/ebs.py
@@ -0,0 +1,220 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+"""
+Automated installer to attach, format and mount an EBS volume.
+This installer assumes that you want the volume formatted as
+an XFS file system. To drive this installer, you need the
+following section in the boto config passed to the new instance.
+You also need to install dateutil by listing python-dateutil
+in the list of packages to be installed in the Pyami seciont
+of your boto config file.
+
+If there is already a device mounted at the specified mount point,
+the installer assumes that it is the ephemeral drive and unmounts
+it, remounts it as /tmp and chmods it to 777.
+
+Config file section::
+
+ [EBS]
+ volume_id = <the id of the EBS volume, should look like vol-xxxxxxxx>
+ logical_volume_name = <the name of the logical volume that contaings
+ a reference to the physical volume to be mounted. If this parameter
+ is supplied, it overrides the volume_id setting.>
+ device = <the linux device the EBS volume should be mounted on>
+ mount_point = <directory to mount device, defaults to /ebs>
+
+"""
+import boto
+from boto.manage.volume import Volume
+import os, time
+from boto.pyami.installers.ubuntu.installer import Installer
+from string import Template
+
+BackupScriptTemplate = """#!/usr/bin/env python
+# Backup EBS volume
+import boto
+from boto.pyami.scriptbase import ScriptBase
+import traceback
+
+class Backup(ScriptBase):
+
+ def main(self):
+ try:
+ ec2 = boto.connect_ec2()
+ self.run("/usr/sbin/xfs_freeze -f ${mount_point}")
+ snapshot = ec2.create_snapshot('${volume_id}')
+ boto.log.info("Snapshot created: %s " % snapshot)
+ except Exception, e:
+ self.notify(subject="${instance_id} Backup Failed", body=traceback.format_exc())
+ boto.log.info("Snapshot created: ${volume_id}")
+ except Exception, e:
+ self.notify(subject="${instance_id} Backup Failed", body=traceback.format_exc())
+ finally:
+ self.run("/usr/sbin/xfs_freeze -u ${mount_point}")
+
+if __name__ == "__main__":
+ b = Backup()
+ b.main()
+"""
+
+BackupCleanupScript= """#!/usr/bin/env python
+import boto
+from boto.manage.volume import Volume
+
+# Cleans Backups of EBS volumes
+
+for v in Volume.all():
+ v.trim_snapshots(True)
+"""
+
+class EBSInstaller(Installer):
+ """
+ Set up the EBS stuff
+ """
+
+ def __init__(self, config_file=None):
+ Installer.__init__(self, config_file)
+ self.instance_id = boto.config.get('Instance', 'instance-id')
+ self.device = boto.config.get('EBS', 'device', '/dev/sdp')
+ self.volume_id = boto.config.get('EBS', 'volume_id')
+ self.logical_volume_name = boto.config.get('EBS', 'logical_volume_name')
+ self.mount_point = boto.config.get('EBS', 'mount_point', '/ebs')
+
+ def attach(self):
+ ec2 = boto.connect_ec2()
+ if self.logical_volume_name:
+ # if a logical volume was specified, override the specified volume_id
+ # (if there was one) with the current AWS volume for the logical volume:
+ logical_volume = Volume.find(name = self.logical_volume_name).next()
+ self.volume_id = logical_volume._volume_id
+ volume = ec2.get_all_volumes([self.volume_id])[0]
+ # wait for the volume to be available. The volume may still be being created
+ # from a snapshot.
+ while volume.update() != 'available':
+ boto.log.info('Volume %s not yet available. Current status = %s.' % (volume.id, volume.status))
+ time.sleep(5)
+ instance = ec2.get_all_instances([self.instance_id])[0].instances[0]
+ attempt_attach = True
+ while attempt_attach:
+ try:
+ ec2.attach_volume(self.volume_id, self.instance_id, self.device)
+ attempt_attach = False
+ except EC2ResponseError, e:
+ if e.error_code != 'IncorrectState':
+ # if there's an EC2ResonseError with the code set to IncorrectState, delay a bit for ec2
+ # to realize the instance is running, then try again. Otherwise, raise the error:
+ boto.log.info('Attempt to attach the EBS volume %s to this instance (%s) returned %s. Trying again in a bit.' % (self.volume_id, self.instance_id, e.errors))
+ time.sleep(2)
+ else:
+ raise e
+ boto.log.info('Attached volume %s to instance %s as device %s' % (self.volume_id, self.instance_id, self.device))
+ # now wait for the volume device to appear
+ while not os.path.exists(self.device):
+ boto.log.info('%s still does not exist, waiting 2 seconds' % self.device)
+ time.sleep(2)
+
+ def make_fs(self):
+ boto.log.info('make_fs...')
+ has_fs = self.run('fsck %s' % self.device)
+ if has_fs != 0:
+ self.run('mkfs -t xfs %s' % self.device)
+
+ def create_backup_script(self):
+ t = Template(BackupScriptTemplate)
+ s = t.substitute(volume_id=self.volume_id, instance_id=self.instance_id,
+ mount_point=self.mount_point)
+ fp = open('/usr/local/bin/ebs_backup', 'w')
+ fp.write(s)
+ fp.close()
+ self.run('chmod +x /usr/local/bin/ebs_backup')
+
+ def create_backup_cleanup_script(self):
+ fp = open('/usr/local/bin/ebs_backup_cleanup', 'w')
+ fp.write(BackupCleanupScript)
+ fp.close()
+ self.run('chmod +x /usr/local/bin/ebs_backup_cleanup')
+
+ def handle_mount_point(self):
+ boto.log.info('handle_mount_point')
+ if not os.path.isdir(self.mount_point):
+ boto.log.info('making directory')
+ # mount directory doesn't exist so create it
+ self.run("mkdir %s" % self.mount_point)
+ else:
+ boto.log.info('directory exists already')
+ self.run('mount -l')
+ lines = self.last_command.output.split('\n')
+ for line in lines:
+ t = line.split()
+ if t and t[2] == self.mount_point:
+ # something is already mounted at the mount point
+ # unmount that and mount it as /tmp
+ if t[0] != self.device:
+ self.run('umount %s' % self.mount_point)
+ self.run('mount %s /tmp' % t[0])
+ break
+ self.run('chmod 777 /tmp')
+ # Mount up our new EBS volume onto mount_point
+ self.run("mount %s %s" % (self.device, self.mount_point))
+ self.run('xfs_growfs %s' % self.mount_point)
+
+ def update_fstab(self):
+ f = open("/etc/fstab", "a")
+ f.write('%s\t%s\txfs\tdefaults 0 0\n' % (self.device, self.mount_point))
+ f.close()
+
+ def install(self):
+ # First, find and attach the volume
+ self.attach()
+
+ # Install the xfs tools
+ self.run('apt-get -y install xfsprogs xfsdump')
+
+ # Check to see if the filesystem was created or not
+ self.make_fs()
+
+ # create the /ebs directory for mounting
+ self.handle_mount_point()
+
+ # create the backup script
+ self.create_backup_script()
+
+ # Set up the backup script
+ minute = boto.config.get('EBS', 'backup_cron_minute', '0')
+ hour = boto.config.get('EBS', 'backup_cron_hour', '4,16')
+ self.add_cron("ebs_backup", "/usr/local/bin/ebs_backup", minute=minute, hour=hour)
+
+ # Set up the backup cleanup script
+ minute = boto.config.get('EBS', 'backup_cleanup_cron_minute')
+ hour = boto.config.get('EBS', 'backup_cleanup_cron_hour')
+ if (minute != None) and (hour != None):
+ self.create_backup_cleanup_script();
+ self.add_cron("ebs_backup_cleanup", "/usr/local/bin/ebs_backup_cleanup", minute=minute, hour=hour)
+
+ # Set up the fstab
+ self.update_fstab()
+
+ def main(self):
+ if not os.path.exists(self.device):
+ self.install()
+ else:
+ boto.log.info("Device %s is already attached, skipping EBS Installer" % self.device)
diff --git a/boto/pyami/installers/ubuntu/installer.py b/boto/pyami/installers/ubuntu/installer.py
new file mode 100644
index 0000000..370d63f
--- /dev/null
+++ b/boto/pyami/installers/ubuntu/installer.py
@@ -0,0 +1,96 @@
+# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import boto.pyami.installers
+import os
+import os.path
+import stat
+import boto
+import random
+from pwd import getpwnam
+
+class Installer(boto.pyami.installers.Installer):
+ """
+ Base Installer class for Ubuntu-based AMI's
+ """
+ def add_cron(self, name, command, minute="*", hour="*", mday="*", month="*", wday="*", who="root", env=None):
+ """
+ Write a file to /etc/cron.d to schedule a command
+ env is a dict containing environment variables you want to set in the file
+ name will be used as the name of the file
+ """
+ if minute == 'random':
+ minute = str(random.randrange(60))
+ if hour == 'random':
+ hour = str(random.randrange(24))
+ fp = open('/etc/cron.d/%s' % name, "w")
+ if env:
+ for key, value in env.items():
+ fp.write('%s=%s\n' % (key, value))
+ fp.write('%s %s %s %s %s %s %s\n' % (minute, hour, mday, month, wday, who, command))
+ fp.close()
+
+ def add_init_script(self, file, name):
+ """
+ Add this file to the init.d directory
+ """
+ f_path = os.path.join("/etc/init.d", name)
+ f = open(f_path, "w")
+ f.write(file)
+ f.close()
+ os.chmod(f_path, stat.S_IREAD| stat.S_IWRITE | stat.S_IEXEC)
+ self.run("/usr/sbin/update-rc.d %s defaults" % name)
+
+ def add_env(self, key, value):
+ """
+ Add an environemnt variable
+ For Ubuntu, the best place is /etc/environment. Values placed here do
+ not need to be exported.
+ """
+ boto.log.info('Adding env variable: %s=%s' % (key, value))
+ if not os.path.exists("/etc/environment.orig"):
+ self.run('cp /etc/environment /etc/environment.orig', notify=False, exit_on_error=False)
+ fp = open('/etc/environment', 'a')
+ fp.write('\n%s="%s"' % (key, value))
+ fp.close()
+ os.environ[key] = value
+
+ def stop(self, service_name):
+ self.run('/etc/init.d/%s stop' % service_name)
+
+ def start(self, service_name):
+ self.run('/etc/init.d/%s start' % service_name)
+
+ def create_user(self, user):
+ """
+ Create a user on the local system
+ """
+ self.run("useradd -m %s" % user)
+ usr = getpwnam(user)
+ return usr
+
+
+ def install(self):
+ """
+ This is the only method you need to override
+ """
+ raise NotImplementedError
+
diff --git a/boto/pyami/installers/ubuntu/mysql.py b/boto/pyami/installers/ubuntu/mysql.py
new file mode 100644
index 0000000..490e5db
--- /dev/null
+++ b/boto/pyami/installers/ubuntu/mysql.py
@@ -0,0 +1,109 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+"""
+This installer will install mysql-server on an Ubuntu machine.
+In addition to the normal installation done by apt-get, it will
+also configure the new MySQL server to store it's data files in
+a different location. By default, this is /mnt but that can be
+configured in the [MySQL] section of the boto config file passed
+to the instance.
+"""
+from boto.pyami.installers.ubuntu.installer import Installer
+import os
+import boto
+from boto.utils import ShellCommand
+from ConfigParser import SafeConfigParser
+import time
+
+ConfigSection = """
+[MySQL]
+root_password = <will be used as MySQL root password, default none>
+data_dir = <new data dir for MySQL, default is /mnt>
+"""
+
+class MySQL(Installer):
+
+ def install(self):
+ self.run('apt-get update')
+ self.run('apt-get -y install mysql-server', notify=True, exit_on_error=True)
+
+# def set_root_password(self, password=None):
+# if not password:
+# password = boto.config.get('MySQL', 'root_password')
+# if password:
+# self.run('mysqladmin -u root password %s' % password)
+# return password
+
+ def change_data_dir(self, password=None):
+ data_dir = boto.config.get('MySQL', 'data_dir', '/mnt')
+ fresh_install = False;
+ is_mysql_running_command = ShellCommand('mysqladmin ping') # exit status 0 if mysql is running
+ is_mysql_running_command.run()
+ if is_mysql_running_command.getStatus() == 0:
+ # mysql is running. This is the state apt-get will leave it in. If it isn't running,
+ # that means mysql was already installed on the AMI and there's no need to stop it,
+ # saving 40 seconds on instance startup.
+ time.sleep(10) #trying to stop mysql immediately after installing it fails
+ # We need to wait until mysql creates the root account before we kill it
+ # or bad things will happen
+ i = 0
+ while self.run("echo 'quit' | mysql -u root") != 0 and i<5:
+ time.sleep(5)
+ i = i + 1
+ self.run('/etc/init.d/mysql stop')
+ self.run("pkill -9 mysql")
+
+ mysql_path = os.path.join(data_dir, 'mysql')
+ if not os.path.exists(mysql_path):
+ self.run('mkdir %s' % mysql_path)
+ fresh_install = True;
+ self.run('chown -R mysql:mysql %s' % mysql_path)
+ fp = open('/etc/mysql/conf.d/use_mnt.cnf', 'w')
+ fp.write('# created by pyami\n')
+ fp.write('# use the %s volume for data\n' % data_dir)
+ fp.write('[mysqld]\n')
+ fp.write('datadir = %s\n' % mysql_path)
+ fp.write('log_bin = %s\n' % os.path.join(mysql_path, 'mysql-bin.log'))
+ fp.close()
+ if fresh_install:
+ self.run('cp -pr /var/lib/mysql/* %s/' % mysql_path)
+ self.start('mysql')
+ else:
+ #get the password ubuntu expects to use:
+ config_parser = SafeConfigParser()
+ config_parser.read('/etc/mysql/debian.cnf')
+ password = config_parser.get('client', 'password')
+ # start the mysql deamon, then mysql with the required grant statement piped into it:
+ self.start('mysql')
+ time.sleep(10) #time for mysql to start
+ grant_command = "echo \"GRANT ALL PRIVILEGES ON *.* TO 'debian-sys-maint'@'localhost' IDENTIFIED BY '%s' WITH GRANT OPTION;\" | mysql" % password
+ while self.run(grant_command) != 0:
+ time.sleep(5)
+ # leave mysqld running
+
+ def main(self):
+ self.install()
+ # change_data_dir runs 'mysql -u root' which assumes there is no mysql password, i
+ # and changing that is too ugly to be worth it:
+ #self.set_root_password()
+ self.change_data_dir()
+
diff --git a/boto/pyami/installers/ubuntu/trac.py b/boto/pyami/installers/ubuntu/trac.py
new file mode 100644
index 0000000..ef83af7
--- /dev/null
+++ b/boto/pyami/installers/ubuntu/trac.py
@@ -0,0 +1,139 @@
+# Copyright (c) 2008 Chris Moyer http://coredumped.org
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.pyami.installers.ubuntu.installer import Installer
+import boto
+import os
+
+class Trac(Installer):
+ """
+ Install Trac and DAV-SVN
+ Sets up a Vhost pointing to [Trac]->home
+ Using the config parameter [Trac]->hostname
+ Sets up a trac environment for every directory found under [Trac]->data_dir
+
+ [Trac]
+ name = My Foo Server
+ hostname = trac.foo.com
+ home = /mnt/sites/trac
+ data_dir = /mnt/trac
+ svn_dir = /mnt/subversion
+ server_admin = root@foo.com
+ sdb_auth_domain = users
+ # Optional
+ SSLCertificateFile = /mnt/ssl/foo.crt
+ SSLCertificateKeyFile = /mnt/ssl/foo.key
+ SSLCertificateChainFile = /mnt/ssl/FooCA.crt
+
+ """
+
+ def install(self):
+ self.run('apt-get -y install trac', notify=True, exit_on_error=True)
+ self.run('apt-get -y install libapache2-svn', notify=True, exit_on_error=True)
+ self.run("a2enmod ssl")
+ self.run("a2enmod mod_python")
+ self.run("a2enmod dav_svn")
+ self.run("a2enmod rewrite")
+ # Make sure that boto.log is writable by everyone so that subversion post-commit hooks can
+ # write to it.
+ self.run("touch /var/log/boto.log")
+ self.run("chmod a+w /var/log/boto.log")
+
+ def setup_vhost(self):
+ domain = boto.config.get("Trac", "hostname").strip()
+ if domain:
+ domain_info = domain.split('.')
+ cnf = open("/etc/apache2/sites-available/%s" % domain_info[0], "w")
+ cnf.write("NameVirtualHost *:80\n")
+ if boto.config.get("Trac", "SSLCertificateFile"):
+ cnf.write("NameVirtualHost *:443\n\n")
+ cnf.write("<VirtualHost *:80>\n")
+ cnf.write("\tServerAdmin %s\n" % boto.config.get("Trac", "server_admin").strip())
+ cnf.write("\tServerName %s\n" % domain)
+ cnf.write("\tRewriteEngine On\n")
+ cnf.write("\tRewriteRule ^(.*)$ https://%s$1\n" % domain)
+ cnf.write("</VirtualHost>\n\n")
+
+ cnf.write("<VirtualHost *:443>\n")
+ else:
+ cnf.write("<VirtualHost *:80>\n")
+
+ cnf.write("\tServerAdmin %s\n" % boto.config.get("Trac", "server_admin").strip())
+ cnf.write("\tServerName %s\n" % domain)
+ cnf.write("\tDocumentRoot %s\n" % boto.config.get("Trac", "home").strip())
+
+ cnf.write("\t<Directory %s>\n" % boto.config.get("Trac", "home").strip())
+ cnf.write("\t\tOptions FollowSymLinks Indexes MultiViews\n")
+ cnf.write("\t\tAllowOverride All\n")
+ cnf.write("\t\tOrder allow,deny\n")
+ cnf.write("\t\tallow from all\n")
+ cnf.write("\t</Directory>\n")
+
+ cnf.write("\t<Location />\n")
+ cnf.write("\t\tAuthType Basic\n")
+ cnf.write("\t\tAuthName \"%s\"\n" % boto.config.get("Trac", "name"))
+ cnf.write("\t\tRequire valid-user\n")
+ cnf.write("\t\tAuthUserFile /mnt/apache/passwd/passwords\n")
+ cnf.write("\t</Location>\n")
+
+ data_dir = boto.config.get("Trac", "data_dir")
+ for env in os.listdir(data_dir):
+ if(env[0] != "."):
+ cnf.write("\t<Location /trac/%s>\n" % env)
+ cnf.write("\t\tSetHandler mod_python\n")
+ cnf.write("\t\tPythonInterpreter main_interpreter\n")
+ cnf.write("\t\tPythonHandler trac.web.modpython_frontend\n")
+ cnf.write("\t\tPythonOption TracEnv %s/%s\n" % (data_dir, env))
+ cnf.write("\t\tPythonOption TracUriRoot /trac/%s\n" % env)
+ cnf.write("\t</Location>\n")
+
+ svn_dir = boto.config.get("Trac", "svn_dir")
+ for env in os.listdir(svn_dir):
+ if(env[0] != "."):
+ cnf.write("\t<Location /svn/%s>\n" % env)
+ cnf.write("\t\tDAV svn\n")
+ cnf.write("\t\tSVNPath %s/%s\n" % (svn_dir, env))
+ cnf.write("\t</Location>\n")
+
+ cnf.write("\tErrorLog /var/log/apache2/error.log\n")
+ cnf.write("\tLogLevel warn\n")
+ cnf.write("\tCustomLog /var/log/apache2/access.log combined\n")
+ cnf.write("\tServerSignature On\n")
+ SSLCertificateFile = boto.config.get("Trac", "SSLCertificateFile")
+ if SSLCertificateFile:
+ cnf.write("\tSSLEngine On\n")
+ cnf.write("\tSSLCertificateFile %s\n" % SSLCertificateFile)
+
+ SSLCertificateKeyFile = boto.config.get("Trac", "SSLCertificateKeyFile")
+ if SSLCertificateKeyFile:
+ cnf.write("\tSSLCertificateKeyFile %s\n" % SSLCertificateKeyFile)
+
+ SSLCertificateChainFile = boto.config.get("Trac", "SSLCertificateChainFile")
+ if SSLCertificateChainFile:
+ cnf.write("\tSSLCertificateChainFile %s\n" % SSLCertificateChainFile)
+ cnf.write("</VirtualHost>\n")
+ cnf.close()
+ self.run("a2ensite %s" % domain_info[0])
+ self.run("/etc/init.d/apache2 force-reload")
+
+ def main(self):
+ self.install()
+ self.setup_vhost()
diff --git a/boto/pyami/launch_ami.py b/boto/pyami/launch_ami.py
new file mode 100755
index 0000000..243d56d
--- /dev/null
+++ b/boto/pyami/launch_ami.py
@@ -0,0 +1,178 @@
+#!/usr/bin/env python
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import getopt
+import sys
+import imp
+import time
+import boto
+
+usage_string = """
+SYNOPSIS
+ launch_ami.py -a ami_id [-b script_bucket] [-s script_name]
+ [-m module] [-c class_name] [-r]
+ [-g group] [-k key_name] [-n num_instances]
+ [-w] [extra_data]
+ Where:
+ ami_id - the id of the AMI you wish to launch
+ module - The name of the Python module containing the class you
+ want to run when the instance is started. If you use this
+ option the Python module must already be stored on the
+ instance in a location that is on the Python path.
+ script_file - The name of a local Python module that you would like
+ to have copied to S3 and then run on the instance
+ when it is started. The specified module must be
+ import'able (i.e. in your local Python path). It
+ will then be copied to the specified bucket in S3
+ (see the -b option). Once the new instance(s)
+ start up the script will be copied from S3 and then
+ run locally on the instance.
+ class_name - The name of the class to be instantiated within the
+ module or script file specified.
+ script_bucket - the name of the bucket in which the script will be
+ stored
+ group - the name of the security group the instance will run in
+ key_name - the name of the keypair to use when launching the AMI
+ num_instances - how many instances of the AMI to launch (default 1)
+ input_queue_name - Name of SQS to read input messages from
+ output_queue_name - Name of SQS to write output messages to
+ extra_data - additional name-value pairs that will be passed as
+ userdata to the newly launched instance. These should
+ be of the form "name=value"
+ The -r option reloads the Python module to S3 without launching
+ another instance. This can be useful during debugging to allow
+ you to test a new version of your script without shutting down
+ your instance and starting up another one.
+ The -w option tells the script to run synchronously, meaning to
+ wait until the instance is actually up and running. It then prints
+ the IP address and internal and external DNS names before exiting.
+"""
+
+def usage():
+ print usage_string
+ sys.exit()
+
+def main():
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], 'a:b:c:g:hi:k:m:n:o:rs:w',
+ ['ami', 'bucket', 'class', 'group', 'help',
+ 'inputqueue', 'keypair', 'module',
+ 'numinstances', 'outputqueue',
+ 'reload', 'script_name', 'wait'])
+ except:
+ usage()
+ params = {'module_name' : None,
+ 'script_name' : None,
+ 'class_name' : None,
+ 'script_bucket' : None,
+ 'group' : 'default',
+ 'keypair' : None,
+ 'ami' : None,
+ 'num_instances' : 1,
+ 'input_queue_name' : None,
+ 'output_queue_name' : None}
+ reload = None
+ wait = None
+ for o, a in opts:
+ if o in ('-a', '--ami'):
+ params['ami'] = a
+ if o in ('-b', '--bucket'):
+ params['script_bucket'] = a
+ if o in ('-c', '--class'):
+ params['class_name'] = a
+ if o in ('-g', '--group'):
+ params['group'] = a
+ if o in ('-h', '--help'):
+ usage()
+ if o in ('-i', '--inputqueue'):
+ params['input_queue_name'] = a
+ if o in ('-k', '--keypair'):
+ params['keypair'] = a
+ if o in ('-m', '--module'):
+ params['module_name'] = a
+ if o in ('-n', '--num_instances'):
+ params['num_instances'] = int(a)
+ if o in ('-o', '--outputqueue'):
+ params['output_queue_name'] = a
+ if o in ('-r', '--reload'):
+ reload = True
+ if o in ('-s', '--script'):
+ params['script_name'] = a
+ if o in ('-w', '--wait'):
+ wait = True
+
+ # check required fields
+ required = ['ami']
+ for pname in required:
+ if not params.get(pname, None):
+ print '%s is required' % pname
+ usage()
+ if params['script_name']:
+ # first copy the desired module file to S3 bucket
+ if reload:
+ print 'Reloading module %s to S3' % params['script_name']
+ else:
+ print 'Copying module %s to S3' % params['script_name']
+ l = imp.find_module(params['script_name'])
+ c = boto.connect_s3()
+ bucket = c.get_bucket(params['script_bucket'])
+ key = bucket.new_key(params['script_name']+'.py')
+ key.set_contents_from_file(l[0])
+ params['script_md5'] = key.md5
+ # we have everything we need, now build userdata string
+ l = []
+ for k, v in params.items():
+ if v:
+ l.append('%s=%s' % (k, v))
+ c = boto.connect_ec2()
+ l.append('aws_access_key_id=%s' % c.aws_access_key_id)
+ l.append('aws_secret_access_key=%s' % c.aws_secret_access_key)
+ for kv in args:
+ l.append(kv)
+ s = '|'.join(l)
+ if not reload:
+ rs = c.get_all_images([params['ami']])
+ img = rs[0]
+ r = img.run(user_data=s, key_name=params['keypair'],
+ security_groups=[params['group']],
+ max_count=params.get('num_instances', 1))
+ print 'AMI: %s - %s (Started)' % (params['ami'], img.location)
+ print 'Reservation %s contains the following instances:' % r.id
+ for i in r.instances:
+ print '\t%s' % i.id
+ if wait:
+ running = False
+ while not running:
+ time.sleep(30)
+ [i.update() for i in r.instances]
+ status = [i.state for i in r.instances]
+ print status
+ if status.count('running') == len(r.instances):
+ running = True
+ for i in r.instances:
+ print 'Instance: %s' % i.ami_launch_index
+ print 'Public DNS Name: %s' % i.public_dns_name
+ print 'Private DNS Name: %s' % i.private_dns_name
+
+if __name__ == "__main__":
+ main()
+
diff --git a/boto/pyami/scriptbase.py b/boto/pyami/scriptbase.py
new file mode 100644
index 0000000..90522ca
--- /dev/null
+++ b/boto/pyami/scriptbase.py
@@ -0,0 +1,44 @@
+import os
+import sys
+from boto.utils import ShellCommand, get_ts
+import boto
+import boto.utils
+
+class ScriptBase:
+
+ def __init__(self, config_file=None):
+ self.instance_id = boto.config.get('Instance', 'instance-id', 'default')
+ self.name = self.__class__.__name__
+ self.ts = get_ts()
+ if config_file:
+ boto.config.read(config_file)
+
+ def notify(self, subject, body=''):
+ boto.utils.notify(subject, body)
+
+ def mkdir(self, path):
+ if not os.path.isdir(path):
+ try:
+ os.mkdir(path)
+ except:
+ boto.log.error('Error creating directory: %s' % path)
+
+ def umount(self, path):
+ if os.path.ismount(path):
+ self.run('umount %s' % path)
+
+ def run(self, command, notify=True, exit_on_error=False, cwd=None):
+ self.last_command = ShellCommand(command, cwd=cwd)
+ if self.last_command.status != 0:
+ boto.log.error('Error running command: "%s". Output: "%s"' % (command, self.last_command.output))
+ if notify:
+ self.notify('Error encountered', \
+ 'Error running the following command:\n\t%s\n\nCommand output:\n\t%s' % \
+ (command, self.last_command.output))
+ if exit_on_error:
+ sys.exit(-1)
+ return self.last_command.status
+
+ def main(self):
+ pass
+
diff --git a/boto/pyami/startup.py b/boto/pyami/startup.py
new file mode 100644
index 0000000..2093151
--- /dev/null
+++ b/boto/pyami/startup.py
@@ -0,0 +1,60 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+import sys
+import boto
+from boto.utils import find_class
+from boto import config
+from boto.pyami.scriptbase import ScriptBase
+
+
+class Startup(ScriptBase):
+
+ def run_scripts(self):
+ scripts = config.get('Pyami', 'scripts')
+ if scripts:
+ for script in scripts.split(','):
+ script = script.strip(" ")
+ try:
+ pos = script.rfind('.')
+ if pos > 0:
+ mod_name = script[0:pos]
+ cls_name = script[pos+1:]
+ cls = find_class(mod_name, cls_name)
+ boto.log.info('Running Script: %s' % script)
+ s = cls()
+ s.main()
+ else:
+ boto.log.warning('Trouble parsing script: %s' % script)
+ except Exception, e:
+ boto.log.exception('Problem Running Script: %s. Startup process halting.' % script)
+ raise e
+
+ def main(self):
+ self.run_scripts()
+ self.notify('Startup Completed for %s' % config.get('Instance', 'instance-id'))
+
+if __name__ == "__main__":
+ if not config.has_section('loggers'):
+ boto.set_file_logger('startup', '/var/log/boto.log')
+ sys.path.append(config.get('Pyami', 'working_dir'))
+ su = Startup()
+ su.main()
diff --git a/boto/rds/__init__.py b/boto/rds/__init__.py
new file mode 100644
index 0000000..940815d
--- /dev/null
+++ b/boto/rds/__init__.py
@@ -0,0 +1,972 @@
+# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import boto.utils
+import urllib
+from boto.connection import AWSQueryConnection
+from boto.rds.dbinstance import DBInstance
+from boto.rds.dbsecuritygroup import DBSecurityGroup
+from boto.rds.parametergroup import ParameterGroup
+from boto.rds.dbsnapshot import DBSnapshot
+from boto.rds.event import Event
+from boto.rds.regioninfo import RDSRegionInfo
+
+def regions():
+ """
+ Get all available regions for the RDS service.
+
+ :rtype: list
+ :return: A list of :class:`boto.rds.regioninfo.RDSRegionInfo`
+ """
+ return [RDSRegionInfo(name='us-east-1',
+ endpoint='rds.amazonaws.com'),
+ RDSRegionInfo(name='eu-west-1',
+ endpoint='eu-west-1.rds.amazonaws.com'),
+ RDSRegionInfo(name='us-west-1',
+ endpoint='us-west-1.rds.amazonaws.com'),
+ RDSRegionInfo(name='ap-southeast-1',
+ endpoint='ap-southeast-1.rds.amazonaws.com')
+ ]
+
+def connect_to_region(region_name):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect()
+ return None
+
+#boto.set_stream_logger('rds')
+
+class RDSConnection(AWSQueryConnection):
+
+ DefaultRegionName = 'us-east-1'
+ DefaultRegionEndpoint = 'rds.amazonaws.com'
+ APIVersion = '2009-10-16'
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, debug=0,
+ https_connection_factory=None, region=None, path='/'):
+ if not region:
+ region = RDSRegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+ self.region = region
+ AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key,
+ is_secure, port, proxy, proxy_port, proxy_user,
+ proxy_pass, self.region.endpoint, debug,
+ https_connection_factory, path)
+
+ def _required_auth_capability(self):
+ return ['rds']
+
+ # DB Instance methods
+
+ def get_all_dbinstances(self, instance_id=None, max_records=None,
+ marker=None):
+ """
+ Retrieve all the DBInstances in your account.
+
+ :type instance_id: str
+ :param instance_id: DB Instance identifier. If supplied, only information
+ this instance will be returned. Otherwise, info
+ about all DB Instances will be returned.
+
+ :type max_records: int
+ :param max_records: The maximum number of records to be returned.
+ If more results are available, a MoreToken will
+ be returned in the response that can be used to
+ retrieve additional records. Default is 100.
+
+ :type marker: str
+ :param marker: The marker provided by a previous request.
+
+ :rtype: list
+ :return: A list of :class:`boto.rds.dbinstance.DBInstance`
+ """
+ params = {}
+ if instance_id:
+ params['DBInstanceIdentifier'] = instance_id
+ if max_records:
+ params['MaxRecords'] = max_records
+ if marker:
+ params['Marker'] = marker
+ return self.get_list('DescribeDBInstances', params, [('DBInstance', DBInstance)])
+
+ def create_dbinstance(self, id, allocated_storage, instance_class,
+ master_username, master_password, port=3306,
+ engine='MySQL5.1', db_name=None, param_group=None,
+ security_groups=None, availability_zone=None,
+ preferred_maintenance_window=None,
+ backup_retention_period=None,
+ preferred_backup_window=None,
+ multi_az=False,
+ engine_version=None,
+ auto_minor_version_upgrade=True):
+ """
+ Create a new DBInstance.
+
+ :type id: str
+ :param id: Unique identifier for the new instance.
+ Must contain 1-63 alphanumeric characters.
+ First character must be a letter.
+ May not end with a hyphen or contain two consecutive hyphens
+
+ :type allocated_storage: int
+ :param allocated_storage: Initially allocated storage size, in GBs.
+ Valid values are [5-1024]
+
+ :type instance_class: str
+ :param instance_class: The compute and memory capacity of the DBInstance.
+
+ Valid values are:
+
+ * db.m1.small
+ * db.m1.large
+ * db.m1.xlarge
+ * db.m2.xlarge
+ * db.m2.2xlarge
+ * db.m2.4xlarge
+
+ :type engine: str
+ :param engine: Name of database engine. Must be MySQL5.1 for now.
+
+ :type master_username: str
+ :param master_username: Name of master user for the DBInstance.
+ Must be 1-15 alphanumeric characters, first
+ must be a letter.
+
+ :type master_password: str
+ :param master_password: Password of master user for the DBInstance.
+ Must be 4-16 alphanumeric characters.
+
+ :type port: int
+ :param port: Port number on which database accepts connections.
+ Valid values [1115-65535]. Defaults to 3306.
+
+ :type db_name: str
+ :param db_name: Name of a database to create when the DBInstance
+ is created. Default is to create no databases.
+
+ :type param_group: str
+ :param param_group: Name of DBParameterGroup to associate with
+ this DBInstance. If no groups are specified
+ no parameter groups will be used.
+
+ :type security_groups: list of str or list of DBSecurityGroup objects
+ :param security_groups: List of names of DBSecurityGroup to authorize on
+ this DBInstance.
+
+ :type availability_zone: str
+ :param availability_zone: Name of the availability zone to place
+ DBInstance into.
+
+ :type preferred_maintenance_window: str
+ :param preferred_maintenance_window: The weekly time range (in UTC)
+ during which maintenance can occur.
+ Default is Sun:05:00-Sun:09:00
+
+ :type backup_retention_period: int
+ :param backup_retention_period: The number of days for which automated
+ backups are retained. Setting this to
+ zero disables automated backups.
+
+ :type preferred_backup_window: str
+ :param preferred_backup_window: The daily time range during which
+ automated backups are created (if
+ enabled). Must be in h24:mi-hh24:mi
+ format (UTC).
+
+ :type multi_az: bool
+ :param multi_az: If True, specifies the DB Instance will be
+ deployed in multiple availability zones.
+
+ :type engine_version: str
+ :param engine_version: Version number of the database engine to use.
+
+ :type auto_minor_version_upgrade: bool
+ :param auto_minor_version_upgrade: Indicates that minor engine
+ upgrades will be applied
+ automatically to the Read Replica
+ during the maintenance window.
+ Default is True.
+
+ :rtype: :class:`boto.rds.dbinstance.DBInstance`
+ :return: The new db instance.
+ """
+ params = {'DBInstanceIdentifier' : id,
+ 'AllocatedStorage' : allocated_storage,
+ 'DBInstanceClass' : instance_class,
+ 'Engine' : engine,
+ 'MasterUsername' : master_username,
+ 'MasterUserPassword' : master_password}
+ if port:
+ params['Port'] = port
+ if db_name:
+ params['DBName'] = db_name
+ if param_group:
+ params['DBParameterGroupName'] = param_group
+ if security_groups:
+ l = []
+ for group in security_groups:
+ if isinstance(group, DBSecurityGroup):
+ l.append(group.name)
+ else:
+ l.append(group)
+ self.build_list_params(params, l, 'DBSecurityGroups.member')
+ if availability_zone:
+ params['AvailabilityZone'] = availability_zone
+ if preferred_maintenance_window:
+ params['PreferredMaintenanceWindow'] = preferred_maintenance_window
+ if backup_retention_period:
+ params['BackupRetentionPeriod'] = backup_retention_period
+ if preferred_backup_window:
+ params['PreferredBackupWindow'] = preferred_backup_window
+ if multi_az:
+ params['MultiAZ'] = 'true'
+ if engine_version:
+ params['EngineVersion'] = engine_version
+ if auto_minor_version_upgrade is False:
+ params['AutoMinorVersionUpgrade'] = 'false'
+
+ return self.get_object('CreateDBInstance', params, DBInstance)
+
+ def create_dbinstance_read_replica(self, id, source_id,
+ instance_class=None,
+ port=3306,
+ availability_zone=None,
+ auto_minor_version_upgrade=None):
+ """
+ Create a new DBInstance Read Replica.
+
+ :type id: str
+ :param id: Unique identifier for the new instance.
+ Must contain 1-63 alphanumeric characters.
+ First character must be a letter.
+ May not end with a hyphen or contain two consecutive hyphens
+
+ :type source_id: str
+ :param source_id: Unique identifier for the DB Instance for which this
+ DB Instance will act as a Read Replica.
+
+ :type instance_class: str
+ :param instance_class: The compute and memory capacity of the
+ DBInstance. Default is to inherit from
+ the source DB Instance.
+
+ Valid values are:
+
+ * db.m1.small
+ * db.m1.large
+ * db.m1.xlarge
+ * db.m2.xlarge
+ * db.m2.2xlarge
+ * db.m2.4xlarge
+
+ :type port: int
+ :param port: Port number on which database accepts connections.
+ Default is to inherit from source DB Instance.
+ Valid values [1115-65535]. Defaults to 3306.
+
+ :type availability_zone: str
+ :param availability_zone: Name of the availability zone to place
+ DBInstance into.
+
+ :type auto_minor_version_upgrade: bool
+ :param auto_minor_version_upgrade: Indicates that minor engine
+ upgrades will be applied
+ automatically to the Read Replica
+ during the maintenance window.
+ Default is to inherit this value
+ from the source DB Instance.
+
+ :rtype: :class:`boto.rds.dbinstance.DBInstance`
+ :return: The new db instance.
+ """
+ params = {'DBInstanceIdentifier' : id,
+ 'SourceDBInstanceIdentifier' : source_id}
+ if instance_class:
+ params['DBInstanceClass'] = instance_class
+ if port:
+ params['Port'] = port
+ if availability_zone:
+ params['AvailabilityZone'] = availability_zone
+ if auto_minor_version_upgrade is not None:
+ if auto_minor_version_upgrade is True:
+ params['AutoMinorVersionUpgrade'] = 'true'
+ else:
+ params['AutoMinorVersionUpgrade'] = 'false'
+
+ return self.get_object('CreateDBInstanceReadReplica',
+ params, DBInstance)
+
+ def modify_dbinstance(self, id, param_group=None, security_groups=None,
+ preferred_maintenance_window=None,
+ master_password=None, allocated_storage=None,
+ instance_class=None,
+ backup_retention_period=None,
+ preferred_backup_window=None,
+ multi_az=False,
+ apply_immediately=False):
+ """
+ Modify an existing DBInstance.
+
+ :type id: str
+ :param id: Unique identifier for the new instance.
+
+ :type security_groups: list of str or list of DBSecurityGroup objects
+ :param security_groups: List of names of DBSecurityGroup to authorize on
+ this DBInstance.
+
+ :type preferred_maintenance_window: str
+ :param preferred_maintenance_window: The weekly time range (in UTC)
+ during which maintenance can
+ occur.
+ Default is Sun:05:00-Sun:09:00
+
+ :type master_password: str
+ :param master_password: Password of master user for the DBInstance.
+ Must be 4-15 alphanumeric characters.
+
+ :type allocated_storage: int
+ :param allocated_storage: The new allocated storage size, in GBs.
+ Valid values are [5-1024]
+
+ :type instance_class: str
+ :param instance_class: The compute and memory capacity of the
+ DBInstance. Changes will be applied at
+ next maintenance window unless
+ apply_immediately is True.
+
+ Valid values are:
+
+ * db.m1.small
+ * db.m1.large
+ * db.m1.xlarge
+ * db.m2.xlarge
+ * db.m2.2xlarge
+ * db.m2.4xlarge
+
+ :type apply_immediately: bool
+ :param apply_immediately: If true, the modifications will be applied
+ as soon as possible rather than waiting for
+ the next preferred maintenance window.
+
+ :type backup_retention_period: int
+ :param backup_retention_period: The number of days for which automated
+ backups are retained. Setting this to
+ zero disables automated backups.
+
+ :type preferred_backup_window: str
+ :param preferred_backup_window: The daily time range during which
+ automated backups are created (if
+ enabled). Must be in h24:mi-hh24:mi
+ format (UTC).
+
+ :type multi_az: bool
+ :param multi_az: If True, specifies the DB Instance will be
+ deployed in multiple availability zones.
+
+ :rtype: :class:`boto.rds.dbinstance.DBInstance`
+ :return: The modified db instance.
+ """
+ params = {'DBInstanceIdentifier' : id}
+ if param_group:
+ params['DBParameterGroupName'] = param_group
+ if security_groups:
+ l = []
+ for group in security_groups:
+ if isinstance(group, DBSecurityGroup):
+ l.append(group.name)
+ else:
+ l.append(group)
+ self.build_list_params(params, l, 'DBSecurityGroups.member')
+ if preferred_maintenance_window:
+ params['PreferredMaintenanceWindow'] = preferred_maintenance_window
+ if master_password:
+ params['MasterUserPassword'] = master_password
+ if allocated_storage:
+ params['AllocatedStorage'] = allocated_storage
+ if instance_class:
+ params['DBInstanceClass'] = instance_class
+ if backup_retention_period:
+ params['BackupRetentionPeriod'] = backup_retention_period
+ if preferred_backup_window:
+ params['PreferredBackupWindow'] = preferred_backup_window
+ if multi_az:
+ params['MultiAZ'] = 'true'
+ if apply_immediately:
+ params['ApplyImmediately'] = 'true'
+
+ return self.get_object('ModifyDBInstance', params, DBInstance)
+
+ def delete_dbinstance(self, id, skip_final_snapshot=False,
+ final_snapshot_id=''):
+ """
+ Delete an existing DBInstance.
+
+ :type id: str
+ :param id: Unique identifier for the new instance.
+
+ :type skip_final_snapshot: bool
+ :param skip_final_snapshot: This parameter determines whether a final
+ db snapshot is created before the instance
+ is deleted. If True, no snapshot is created.
+ If False, a snapshot is created before
+ deleting the instance.
+
+ :type final_snapshot_id: str
+ :param final_snapshot_id: If a final snapshot is requested, this
+ is the identifier used for that snapshot.
+
+ :rtype: :class:`boto.rds.dbinstance.DBInstance`
+ :return: The deleted db instance.
+ """
+ params = {'DBInstanceIdentifier' : id}
+ if skip_final_snapshot:
+ params['SkipFinalSnapshot'] = 'true'
+ else:
+ params['SkipFinalSnapshot'] = 'false'
+ params['FinalDBSnapshotIdentifier'] = final_snapshot_id
+ return self.get_object('DeleteDBInstance', params, DBInstance)
+
+
+ def reboot_dbinstance(self, id):
+ """
+ Reboot DBInstance.
+
+ :type id: str
+ :param id: Unique identifier of the instance.
+
+ :rtype: :class:`boto.rds.dbinstance.DBInstance`
+ :return: The rebooting db instance.
+ """
+ params = {'DBInstanceIdentifier' : id}
+ return self.get_object('RebootDBInstance', params, DBInstance)
+
+ # DBParameterGroup methods
+
+ def get_all_dbparameter_groups(self, groupname=None, max_records=None,
+ marker=None):
+ """
+ Get all parameter groups associated with your account in a region.
+
+ :type groupname: str
+ :param groupname: The name of the DBParameter group to retrieve.
+ If not provided, all DBParameter groups will be returned.
+
+ :type max_records: int
+ :param max_records: The maximum number of records to be returned.
+ If more results are available, a MoreToken will
+ be returned in the response that can be used to
+ retrieve additional records. Default is 100.
+
+ :type marker: str
+ :param marker: The marker provided by a previous request.
+
+ :rtype: list
+ :return: A list of :class:`boto.ec2.parametergroup.ParameterGroup`
+ """
+ params = {}
+ if groupname:
+ params['DBParameterGroupName'] = groupname
+ if max_records:
+ params['MaxRecords'] = max_records
+ if marker:
+ params['Marker'] = marker
+ return self.get_list('DescribeDBParameterGroups', params,
+ [('DBParameterGroup', ParameterGroup)])
+
+ def get_all_dbparameters(self, groupname, source=None,
+ max_records=None, marker=None):
+ """
+ Get all parameters associated with a ParameterGroup
+
+ :type groupname: str
+ :param groupname: The name of the DBParameter group to retrieve.
+
+ :type source: str
+ :param source: Specifies which parameters to return.
+ If not specified, all parameters will be returned.
+ Valid values are: user|system|engine-default
+
+ :type max_records: int
+ :param max_records: The maximum number of records to be returned.
+ If more results are available, a MoreToken will
+ be returned in the response that can be used to
+ retrieve additional records. Default is 100.
+
+ :type marker: str
+ :param marker: The marker provided by a previous request.
+
+ :rtype: :class:`boto.ec2.parametergroup.ParameterGroup`
+ :return: The ParameterGroup
+ """
+ params = {'DBParameterGroupName' : groupname}
+ if source:
+ params['Source'] = source
+ if max_records:
+ params['MaxRecords'] = max_records
+ if marker:
+ params['Marker'] = marker
+ pg = self.get_object('DescribeDBParameters', params, ParameterGroup)
+ pg.name = groupname
+ return pg
+
+ def create_parameter_group(self, name, engine='MySQL5.1', description=''):
+ """
+ Create a new dbparameter group for your account.
+
+ :type name: string
+ :param name: The name of the new dbparameter group
+
+ :type engine: str
+ :param engine: Name of database engine. Must be MySQL5.1 for now.
+
+ :type description: string
+ :param description: The description of the new security group
+
+ :rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup`
+ :return: The newly created DBSecurityGroup
+ """
+ params = {'DBParameterGroupName': name,
+ 'Engine': engine,
+ 'Description' : description}
+ return self.get_object('CreateDBParameterGroup', params, ParameterGroup)
+
+ def modify_parameter_group(self, name, parameters=None):
+ """
+ Modify a parameter group for your account.
+
+ :type name: string
+ :param name: The name of the new parameter group
+
+ :type parameters: list of :class:`boto.rds.parametergroup.Parameter`
+ :param parameters: The new parameters
+
+ :rtype: :class:`boto.rds.parametergroup.ParameterGroup`
+ :return: The newly created ParameterGroup
+ """
+ params = {'DBParameterGroupName': name}
+ for i in range(0, len(parameters)):
+ parameter = parameters[i]
+ parameter.merge(params, i+1)
+ return self.get_list('ModifyDBParameterGroup', params, ParameterGroup)
+
+ def reset_parameter_group(self, name, reset_all_params=False, parameters=None):
+ """
+ Resets some or all of the parameters of a ParameterGroup to the
+ default value
+
+ :type key_name: string
+ :param key_name: The name of the ParameterGroup to reset
+
+ :type parameters: list of :class:`boto.rds.parametergroup.Parameter`
+ :param parameters: The parameters to reset. If not supplied, all parameters
+ will be reset.
+ """
+ params = {'DBParameterGroupName':name}
+ if reset_all_params:
+ params['ResetAllParameters'] = 'true'
+ else:
+ params['ResetAllParameters'] = 'false'
+ for i in range(0, len(parameters)):
+ parameter = parameters[i]
+ parameter.merge(params, i+1)
+ return self.get_status('ResetDBParameterGroup', params)
+
+ def delete_parameter_group(self, name):
+ """
+ Delete a DBSecurityGroup from your account.
+
+ :type key_name: string
+ :param key_name: The name of the DBSecurityGroup to delete
+ """
+ params = {'DBParameterGroupName':name}
+ return self.get_status('DeleteDBParameterGroup', params)
+
+ # DBSecurityGroup methods
+
+ def get_all_dbsecurity_groups(self, groupname=None, max_records=None,
+ marker=None):
+ """
+ Get all security groups associated with your account in a region.
+
+ :type groupnames: list
+ :param groupnames: A list of the names of security groups to retrieve.
+ If not provided, all security groups will be returned.
+
+ :type max_records: int
+ :param max_records: The maximum number of records to be returned.
+ If more results are available, a MoreToken will
+ be returned in the response that can be used to
+ retrieve additional records. Default is 100.
+
+ :type marker: str
+ :param marker: The marker provided by a previous request.
+
+ :rtype: list
+ :return: A list of :class:`boto.rds.dbsecuritygroup.DBSecurityGroup`
+ """
+ params = {}
+ if groupname:
+ params['DBSecurityGroupName'] = groupname
+ if max_records:
+ params['MaxRecords'] = max_records
+ if marker:
+ params['Marker'] = marker
+ return self.get_list('DescribeDBSecurityGroups', params,
+ [('DBSecurityGroup', DBSecurityGroup)])
+
+ def create_dbsecurity_group(self, name, description=None):
+ """
+ Create a new security group for your account.
+ This will create the security group within the region you
+ are currently connected to.
+
+ :type name: string
+ :param name: The name of the new security group
+
+ :type description: string
+ :param description: The description of the new security group
+
+ :rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup`
+ :return: The newly created DBSecurityGroup
+ """
+ params = {'DBSecurityGroupName':name}
+ if description:
+ params['DBSecurityGroupDescription'] = description
+ group = self.get_object('CreateDBSecurityGroup', params, DBSecurityGroup)
+ group.name = name
+ group.description = description
+ return group
+
+ def delete_dbsecurity_group(self, name):
+ """
+ Delete a DBSecurityGroup from your account.
+
+ :type key_name: string
+ :param key_name: The name of the DBSecurityGroup to delete
+ """
+ params = {'DBSecurityGroupName':name}
+ return self.get_status('DeleteDBSecurityGroup', params)
+
+ def authorize_dbsecurity_group(self, group_name, cidr_ip=None,
+ ec2_security_group_name=None,
+ ec2_security_group_owner_id=None):
+ """
+ Add a new rule to an existing security group.
+ You need to pass in either src_security_group_name and
+ src_security_group_owner_id OR a CIDR block but not both.
+
+ :type group_name: string
+ :param group_name: The name of the security group you are adding
+ the rule to.
+
+ :type ec2_security_group_name: string
+ :param ec2_security_group_name: The name of the EC2 security group you are
+ granting access to.
+
+ :type ec2_security_group_owner_id: string
+ :param ec2_security_group_owner_id: The ID of the owner of the EC2 security
+ group you are granting access to.
+
+ :type cidr_ip: string
+ :param cidr_ip: The CIDR block you are providing access to.
+ See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
+
+ :rtype: bool
+ :return: True if successful.
+ """
+ params = {'DBSecurityGroupName':group_name}
+ if ec2_security_group_name:
+ params['EC2SecurityGroupName'] = ec2_security_group_name
+ if ec2_security_group_owner_id:
+ params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
+ if cidr_ip:
+ params['CIDRIP'] = urllib.quote(cidr_ip)
+ return self.get_object('AuthorizeDBSecurityGroupIngress', params, DBSecurityGroup)
+
+ def revoke_dbsecurity_group(self, group_name, ec2_security_group_name=None,
+ ec2_security_group_owner_id=None, cidr_ip=None):
+ """
+ Remove an existing rule from an existing security group.
+ You need to pass in either ec2_security_group_name and
+ ec2_security_group_owner_id OR a CIDR block.
+
+ :type group_name: string
+ :param group_name: The name of the security group you are removing
+ the rule from.
+
+ :type ec2_security_group_name: string
+ :param ec2_security_group_name: The name of the EC2 security group from which
+ you are removing access.
+
+ :type ec2_security_group_owner_id: string
+ :param ec2_security_group_owner_id: The ID of the owner of the EC2 security
+ from which you are removing access.
+
+ :type cidr_ip: string
+ :param cidr_ip: The CIDR block from which you are removing access.
+ See http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
+
+ :rtype: bool
+ :return: True if successful.
+ """
+ params = {'DBSecurityGroupName':group_name}
+ if ec2_security_group_name:
+ params['EC2SecurityGroupName'] = ec2_security_group_name
+ if ec2_security_group_owner_id:
+ params['EC2SecurityGroupOwnerId'] = ec2_security_group_owner_id
+ if cidr_ip:
+ params['CIDRIP'] = cidr_ip
+ return self.get_object('RevokeDBSecurityGroupIngress', params, DBSecurityGroup)
+
+ # For backwards compatibility. This method was improperly named
+ # in previous versions. I have renamed it to match the others.
+ revoke_security_group = revoke_dbsecurity_group
+
+ # DBSnapshot methods
+
+ def get_all_dbsnapshots(self, snapshot_id=None, instance_id=None,
+ max_records=None, marker=None):
+ """
+ Get information about DB Snapshots.
+
+ :type snapshot_id: str
+ :param snapshot_id: The unique identifier of an RDS snapshot.
+ If not provided, all RDS snapshots will be returned.
+
+ :type instance_id: str
+ :param instance_id: The identifier of a DBInstance. If provided,
+ only the DBSnapshots related to that instance will
+ be returned.
+ If not provided, all RDS snapshots will be returned.
+
+ :type max_records: int
+ :param max_records: The maximum number of records to be returned.
+ If more results are available, a MoreToken will
+ be returned in the response that can be used to
+ retrieve additional records. Default is 100.
+
+ :type marker: str
+ :param marker: The marker provided by a previous request.
+
+ :rtype: list
+ :return: A list of :class:`boto.rds.dbsnapshot.DBSnapshot`
+ """
+ params = {}
+ if snapshot_id:
+ params['DBSnapshotIdentifier'] = snapshot_id
+ if instance_id:
+ params['DBInstanceIdentifier'] = instance_id
+ if max_records:
+ params['MaxRecords'] = max_records
+ if marker:
+ params['Marker'] = marker
+ return self.get_list('DescribeDBSnapshots', params,
+ [('DBSnapshot', DBSnapshot)])
+
+ def create_dbsnapshot(self, snapshot_id, dbinstance_id):
+ """
+ Create a new DB snapshot.
+
+ :type snapshot_id: string
+ :param snapshot_id: The identifier for the DBSnapshot
+
+ :type dbinstance_id: string
+ :param dbinstance_id: The source identifier for the RDS instance from
+ which the snapshot is created.
+
+ :rtype: :class:`boto.rds.dbsnapshot.DBSnapshot`
+ :return: The newly created DBSnapshot
+ """
+ params = {'DBSnapshotIdentifier' : snapshot_id,
+ 'DBInstanceIdentifier' : dbinstance_id}
+ return self.get_object('CreateDBSnapshot', params, DBSnapshot)
+
+ def delete_dbsnapshot(self, identifier):
+ """
+ Delete a DBSnapshot
+
+ :type identifier: string
+ :param identifier: The identifier of the DBSnapshot to delete
+ """
+ params = {'DBSnapshotIdentifier' : identifier}
+ return self.get_object('DeleteDBSnapshot', params, DBSnapshot)
+
+ def restore_dbinstance_from_dbsnapshot(self, identifier, instance_id,
+ instance_class, port=None,
+ availability_zone=None):
+
+ """
+ Create a new DBInstance from a DB snapshot.
+
+ :type identifier: string
+ :param identifier: The identifier for the DBSnapshot
+
+ :type instance_id: string
+ :param instance_id: The source identifier for the RDS instance from
+ which the snapshot is created.
+
+ :type instance_class: str
+ :param instance_class: The compute and memory capacity of the DBInstance.
+ Valid values are:
+ db.m1.small | db.m1.large | db.m1.xlarge |
+ db.m2.2xlarge | db.m2.4xlarge
+
+ :type port: int
+ :param port: Port number on which database accepts connections.
+ Valid values [1115-65535]. Defaults to 3306.
+
+ :type availability_zone: str
+ :param availability_zone: Name of the availability zone to place
+ DBInstance into.
+
+ :rtype: :class:`boto.rds.dbinstance.DBInstance`
+ :return: The newly created DBInstance
+ """
+ params = {'DBSnapshotIdentifier' : identifier,
+ 'DBInstanceIdentifier' : instance_id,
+ 'DBInstanceClass' : instance_class}
+ if port:
+ params['Port'] = port
+ if availability_zone:
+ params['AvailabilityZone'] = availability_zone
+ return self.get_object('RestoreDBInstanceFromDBSnapshot',
+ params, DBInstance)
+
+ def restore_dbinstance_from_point_in_time(self, source_instance_id,
+ target_instance_id,
+ use_latest=False,
+ restore_time=None,
+ dbinstance_class=None,
+ port=None,
+ availability_zone=None):
+
+ """
+ Create a new DBInstance from a point in time.
+
+ :type source_instance_id: string
+ :param source_instance_id: The identifier for the source DBInstance.
+
+ :type target_instance_id: string
+ :param target_instance_id: The identifier of the new DBInstance.
+
+ :type use_latest: bool
+ :param use_latest: If True, the latest snapshot availabile will
+ be used.
+
+ :type restore_time: datetime
+ :param restore_time: The date and time to restore from. Only
+ used if use_latest is False.
+
+ :type instance_class: str
+ :param instance_class: The compute and memory capacity of the DBInstance.
+ Valid values are:
+ db.m1.small | db.m1.large | db.m1.xlarge |
+ db.m2.2xlarge | db.m2.4xlarge
+
+ :type port: int
+ :param port: Port number on which database accepts connections.
+ Valid values [1115-65535]. Defaults to 3306.
+
+ :type availability_zone: str
+ :param availability_zone: Name of the availability zone to place
+ DBInstance into.
+
+ :rtype: :class:`boto.rds.dbinstance.DBInstance`
+ :return: The newly created DBInstance
+ """
+ params = {'SourceDBInstanceIdentifier' : source_instance_id,
+ 'TargetDBInstanceIdentifier' : target_instance_id}
+ if use_latest:
+ params['UseLatestRestorableTime'] = 'true'
+ elif restore_time:
+ params['RestoreTime'] = restore_time.isoformat()
+ if dbinstance_class:
+ params['DBInstanceClass'] = dbinstance_class
+ if port:
+ params['Port'] = port
+ if availability_zone:
+ params['AvailabilityZone'] = availability_zone
+ return self.get_object('RestoreDBInstanceToPointInTime',
+ params, DBInstance)
+
+ # Events
+
+ def get_all_events(self, source_identifier=None, source_type=None,
+ start_time=None, end_time=None,
+ max_records=None, marker=None):
+ """
+ Get information about events related to your DBInstances,
+ DBSecurityGroups and DBParameterGroups.
+
+ :type source_identifier: str
+ :param source_identifier: If supplied, the events returned will be
+ limited to those that apply to the identified
+ source. The value of this parameter depends
+ on the value of source_type. If neither
+ parameter is specified, all events in the time
+ span will be returned.
+
+ :type source_type: str
+ :param source_type: Specifies how the source_identifier should
+ be interpreted. Valid values are:
+ b-instance | db-security-group |
+ db-parameter-group | db-snapshot
+
+ :type start_time: datetime
+ :param start_time: The beginning of the time interval for events.
+ If not supplied, all available events will
+ be returned.
+
+ :type end_time: datetime
+ :param end_time: The ending of the time interval for events.
+ If not supplied, all available events will
+ be returned.
+
+ :type max_records: int
+ :param max_records: The maximum number of records to be returned.
+ If more results are available, a MoreToken will
+ be returned in the response that can be used to
+ retrieve additional records. Default is 100.
+
+ :type marker: str
+ :param marker: The marker provided by a previous request.
+
+ :rtype: list
+ :return: A list of class:`boto.rds.event.Event`
+ """
+ params = {}
+ if source_identifier and source_type:
+ params['SourceIdentifier'] = source_identifier
+ params['SourceType'] = source_type
+ if start_time:
+ params['StartTime'] = start_time.isoformat()
+ if end_time:
+ params['EndTime'] = end_time.isoformat()
+ if max_records:
+ params['MaxRecords'] = max_records
+ if marker:
+ params['Marker'] = marker
+ return self.get_list('DescribeEvents', params, [('Event', Event)])
+
+
diff --git a/boto/rds/dbinstance.py b/boto/rds/dbinstance.py
new file mode 100644
index 0000000..02f9af6
--- /dev/null
+++ b/boto/rds/dbinstance.py
@@ -0,0 +1,264 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.rds.dbsecuritygroup import DBSecurityGroup
+from boto.rds.parametergroup import ParameterGroup
+
+class DBInstance(object):
+ """
+ Represents a RDS DBInstance
+ """
+
+ def __init__(self, connection=None, id=None):
+ self.connection = connection
+ self.id = id
+ self.create_time = None
+ self.engine = None
+ self.status = None
+ self.allocated_storage = None
+ self.endpoint = None
+ self.instance_class = None
+ self.master_username = None
+ self.parameter_group = None
+ self.security_group = None
+ self.availability_zone = None
+ self.backup_retention_period = None
+ self.preferred_backup_window = None
+ self.preferred_maintenance_window = None
+ self.latest_restorable_time = None
+ self.multi_az = False
+ self.pending_modified_values = None
+ self._in_endpoint = False
+ self._port = None
+ self._address = None
+
+ def __repr__(self):
+ return 'DBInstance:%s' % self.id
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Endpoint':
+ self._in_endpoint = True
+ elif name == 'DBParameterGroup':
+ self.parameter_group = ParameterGroup(self.connection)
+ return self.parameter_group
+ elif name == 'DBSecurityGroup':
+ self.security_group = DBSecurityGroup(self.connection)
+ return self.security_group
+ elif name == 'PendingModifiedValues':
+ self.pending_modified_values = PendingModifiedValues()
+ return self.pending_modified_values
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'DBInstanceIdentifier':
+ self.id = value
+ elif name == 'DBInstanceStatus':
+ self.status = value
+ elif name == 'InstanceCreateTime':
+ self.create_time = value
+ elif name == 'Engine':
+ self.engine = value
+ elif name == 'DBInstanceStatus':
+ self.status = value
+ elif name == 'AllocatedStorage':
+ self.allocated_storage = int(value)
+ elif name == 'DBInstanceClass':
+ self.instance_class = value
+ elif name == 'MasterUsername':
+ self.master_username = value
+ elif name == 'Port':
+ if self._in_endpoint:
+ self._port = int(value)
+ elif name == 'Address':
+ if self._in_endpoint:
+ self._address = value
+ elif name == 'Endpoint':
+ self.endpoint = (self._address, self._port)
+ self._in_endpoint = False
+ elif name == 'AvailabilityZone':
+ self.availability_zone = value
+ elif name == 'BackupRetentionPeriod':
+ self.backup_retention_period = value
+ elif name == 'LatestRestorableTime':
+ self.latest_restorable_time = value
+ elif name == 'PreferredMaintenanceWindow':
+ self.preferred_maintenance_window = value
+ elif name == 'PreferredBackupWindow':
+ self.preferred_backup_window = value
+ elif name == 'MultiAZ':
+ if value.lower() == 'true':
+ self.multi_az = True
+ else:
+ setattr(self, name, value)
+
+ def snapshot(self, snapshot_id):
+ """
+ Create a new DB snapshot of this DBInstance.
+
+ :type identifier: string
+ :param identifier: The identifier for the DBSnapshot
+
+ :rtype: :class:`boto.rds.dbsnapshot.DBSnapshot`
+ :return: The newly created DBSnapshot
+ """
+ return self.connection.create_dbsnapshot(snapshot_id, self.id)
+
+ def reboot(self):
+ """
+ Reboot this DBInstance
+
+ :rtype: :class:`boto.rds.dbsnapshot.DBSnapshot`
+ :return: The newly created DBSnapshot
+ """
+ return self.connection.reboot_dbinstance(self.id)
+
+ def update(self, validate=False):
+ """
+ Update the DB instance's status information by making a call to fetch
+ the current instance attributes from the service.
+
+ :type validate: bool
+ :param validate: By default, if EC2 returns no data about the
+ instance the update method returns quietly. If
+ the validate param is True, however, it will
+ raise a ValueError exception if no data is
+ returned from EC2.
+ """
+ rs = self.connection.get_all_dbinstances(self.id)
+ if len(rs) > 0:
+ for i in rs:
+ if i.id == self.id:
+ self.__dict__.update(i.__dict__)
+ elif validate:
+ raise ValueError('%s is not a valid Instance ID' % self.id)
+ return self.status
+
+
+ def stop(self, skip_final_snapshot=False, final_snapshot_id=''):
+ """
+ Delete this DBInstance.
+
+ :type skip_final_snapshot: bool
+ :param skip_final_snapshot: This parameter determines whether a final
+ db snapshot is created before the instance
+ is deleted. If True, no snapshot is created.
+ If False, a snapshot is created before
+ deleting the instance.
+
+ :type final_snapshot_id: str
+ :param final_snapshot_id: If a final snapshot is requested, this
+ is the identifier used for that snapshot.
+
+ :rtype: :class:`boto.rds.dbinstance.DBInstance`
+ :return: The deleted db instance.
+ """
+ return self.connection.delete_dbinstance(self.id,
+ skip_final_snapshot,
+ final_snapshot_id)
+
+ def modify(self, param_group=None, security_groups=None,
+ preferred_maintenance_window=None,
+ master_password=None, allocated_storage=None,
+ instance_class=None,
+ backup_retention_period=None,
+ preferred_backup_window=None,
+ multi_az=False,
+ apply_immediately=False):
+ """
+ Modify this DBInstance.
+
+ :type security_groups: list of str or list of DBSecurityGroup objects
+ :param security_groups: List of names of DBSecurityGroup to authorize on
+ this DBInstance.
+
+ :type preferred_maintenance_window: str
+ :param preferred_maintenance_window: The weekly time range (in UTC)
+ during which maintenance can
+ occur.
+ Default is Sun:05:00-Sun:09:00
+
+ :type master_password: str
+ :param master_password: Password of master user for the DBInstance.
+ Must be 4-15 alphanumeric characters.
+
+ :type allocated_storage: int
+ :param allocated_storage: The new allocated storage size, in GBs.
+ Valid values are [5-1024]
+
+ :type instance_class: str
+ :param instance_class: The compute and memory capacity of the
+ DBInstance. Changes will be applied at
+ next maintenance window unless
+ apply_immediately is True.
+
+ Valid values are:
+
+ * db.m1.small
+ * db.m1.large
+ * db.m1.xlarge
+ * db.m2.xlarge
+ * db.m2.2xlarge
+ * db.m2.4xlarge
+
+ :type apply_immediately: bool
+ :param apply_immediately: If true, the modifications will be applied
+ as soon as possible rather than waiting for
+ the next preferred maintenance window.
+
+ :type backup_retention_period: int
+ :param backup_retention_period: The number of days for which automated
+ backups are retained. Setting this to
+ zero disables automated backups.
+
+ :type preferred_backup_window: str
+ :param preferred_backup_window: The daily time range during which
+ automated backups are created (if
+ enabled). Must be in h24:mi-hh24:mi
+ format (UTC).
+
+ :type multi_az: bool
+ :param multi_az: If True, specifies the DB Instance will be
+ deployed in multiple availability zones.
+
+ :rtype: :class:`boto.rds.dbinstance.DBInstance`
+ :return: The modified db instance.
+ """
+ return self.connection.modify_dbinstance(self.id,
+ param_group,
+ security_groups,
+ preferred_maintenance_window,
+ master_password,
+ allocated_storage,
+ instance_class,
+ backup_retention_period,
+ preferred_backup_window,
+ multi_az,
+ apply_immediately)
+
+class PendingModifiedValues(dict):
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name != 'PendingModifiedValues':
+ self[name] = value
+
diff --git a/boto/rds/dbsecuritygroup.py b/boto/rds/dbsecuritygroup.py
new file mode 100644
index 0000000..1555ca0
--- /dev/null
+++ b/boto/rds/dbsecuritygroup.py
@@ -0,0 +1,160 @@
+# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents an DBSecurityGroup
+"""
+from boto.ec2.securitygroup import SecurityGroup
+
+class DBSecurityGroup(object):
+
+ def __init__(self, connection=None, owner_id=None,
+ name=None, description=None):
+ self.connection = connection
+ self.owner_id = owner_id
+ self.name = name
+ self.description = description
+ self.ec2_groups = []
+ self.ip_ranges = []
+
+ def __repr__(self):
+ return 'DBSecurityGroup:%s' % self.name
+
+ def startElement(self, name, attrs, connection):
+ if name == 'IPRange':
+ cidr = IPRange(self)
+ self.ip_ranges.append(cidr)
+ return cidr
+ elif name == 'EC2SecurityGroup':
+ ec2_grp = EC2SecurityGroup(self)
+ self.ec2_groups.append(ec2_grp)
+ return ec2_grp
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'OwnerId':
+ self.owner_id = value
+ elif name == 'DBSecurityGroupName':
+ self.name = value
+ elif name == 'DBSecurityGroupDescription':
+ self.description = value
+ elif name == 'IPRanges':
+ pass
+ else:
+ setattr(self, name, value)
+
+ def delete(self):
+ return self.connection.delete_dbsecurity_group(self.name)
+
+ def authorize(self, cidr_ip=None, ec2_group=None):
+ """
+ Add a new rule to this DBSecurity group.
+ You need to pass in either a CIDR block to authorize or
+ and EC2 SecurityGroup.
+
+ @type cidr_ip: string
+ @param cidr_ip: A valid CIDR IP range to authorize
+
+ @type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup>`
+
+ @rtype: bool
+ @return: True if successful.
+ """
+ if isinstance(ec2_group, SecurityGroup):
+ group_name = ec2_group.name
+ group_owner_id = ec2_group.owner_id
+ else:
+ group_name = None
+ group_owner_id = None
+ return self.connection.authorize_dbsecurity_group(self.name,
+ cidr_ip,
+ group_name,
+ group_owner_id)
+
+ def revoke(self, cidr_ip=None, ec2_group=None):
+ """
+ Revoke access to a CIDR range or EC2 SecurityGroup.
+ You need to pass in either a CIDR block or
+ an EC2 SecurityGroup from which to revoke access.
+
+ @type cidr_ip: string
+ @param cidr_ip: A valid CIDR IP range to revoke
+
+ @type ec2_group: :class:`boto.ec2.securitygroup.SecurityGroup>`
+
+ @rtype: bool
+ @return: True if successful.
+ """
+ if isinstance(ec2_group, SecurityGroup):
+ group_name = ec2_group.name
+ group_owner_id = ec2_group.owner_id
+ return self.connection.revoke_dbsecurity_group(
+ self.name,
+ ec2_security_group_name=group_name,
+ ec2_security_group_owner_id=group_owner_id)
+
+ # Revoking by CIDR IP range
+ return self.connection.revoke_dbsecurity_group(
+ self.name, cidr_ip=cidr_ip)
+
+class IPRange(object):
+
+ def __init__(self, parent=None):
+ self.parent = parent
+ self.cidr_ip = None
+ self.status = None
+
+ def __repr__(self):
+ return 'IPRange:%s' % self.cidr_ip
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'CIDRIP':
+ self.cidr_ip = value
+ elif name == 'Status':
+ self.status = value
+ else:
+ setattr(self, name, value)
+
+class EC2SecurityGroup(object):
+
+ def __init__(self, parent=None):
+ self.parent = parent
+ self.name = None
+ self.owner_id = None
+
+ def __repr__(self):
+ return 'EC2SecurityGroup:%s' % self.name
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'EC2SecurityGroupName':
+ self.name = value
+ elif name == 'EC2SecurityGroupOwnerId':
+ self.owner_id = value
+ else:
+ setattr(self, name, value)
+
diff --git a/boto/rds/dbsnapshot.py b/boto/rds/dbsnapshot.py
new file mode 100644
index 0000000..78d0230
--- /dev/null
+++ b/boto/rds/dbsnapshot.py
@@ -0,0 +1,74 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class DBSnapshot(object):
+ """
+ Represents a RDS DB Snapshot
+ """
+
+ def __init__(self, connection=None, id=None):
+ self.connection = connection
+ self.id = id
+ self.engine = None
+ self.snapshot_create_time = None
+ self.instance_create_time = None
+ self.port = None
+ self.status = None
+ self.availability_zone = None
+ self.master_username = None
+ self.allocated_storage = None
+ self.instance_id = None
+ self.availability_zone = None
+
+ def __repr__(self):
+ return 'DBSnapshot:%s' % self.id
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'Engine':
+ self.engine = value
+ elif name == 'InstanceCreateTime':
+ self.instance_create_time = value
+ elif name == 'SnapshotCreateTime':
+ self.snapshot_create_time = value
+ elif name == 'DBInstanceIdentifier':
+ self.instance_id = value
+ elif name == 'DBSnapshotIdentifier':
+ self.id = value
+ elif name == 'Port':
+ self.port = int(value)
+ elif name == 'Status':
+ self.status = value
+ elif name == 'AvailabilityZone':
+ self.availability_zone = value
+ elif name == 'MasterUsername':
+ self.master_username = value
+ elif name == 'AllocatedStorage':
+ self.allocated_storage = int(value)
+ elif name == 'SnapshotTime':
+ self.time = value
+ else:
+ setattr(self, name, value)
+
+
+
diff --git a/boto/rds/event.py b/boto/rds/event.py
new file mode 100644
index 0000000..a91f8f0
--- /dev/null
+++ b/boto/rds/event.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class Event(object):
+
+ def __init__(self, connection=None):
+ self.connection = connection
+ self.message = None
+ self.source_identifier = None
+ self.source_type = None
+ self.engine = None
+ self.date = None
+
+ def __repr__(self):
+ return '"%s"' % self.message
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'SourceIdentifier':
+ self.source_identifier = value
+ elif name == 'SourceType':
+ self.source_type = value
+ elif name == 'Message':
+ self.message = value
+ elif name == 'Date':
+ self.date = value
+ else:
+ setattr(self, name, value)
+
diff --git a/boto/rds/parametergroup.py b/boto/rds/parametergroup.py
new file mode 100644
index 0000000..44d00e2
--- /dev/null
+++ b/boto/rds/parametergroup.py
@@ -0,0 +1,201 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class ParameterGroup(dict):
+
+ def __init__(self, connection=None):
+ dict.__init__(self)
+ self.connection = connection
+ self.name = None
+ self.description = None
+ self.engine = None
+ self._current_param = None
+
+ def __repr__(self):
+ return 'ParameterGroup:%s' % self.name
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Parameter':
+ if self._current_param:
+ self[self._current_param.name] = self._current_param
+ self._current_param = Parameter(self)
+ return self._current_param
+
+ def endElement(self, name, value, connection):
+ if name == 'DBParameterGroupName':
+ self.name = value
+ elif name == 'Description':
+ self.description = value
+ elif name == 'Engine':
+ self.engine = value
+ else:
+ setattr(self, name, value)
+
+ def modifiable(self):
+ mod = []
+ for key in self:
+ p = self[key]
+ if p.is_modifiable:
+ mod.append(p)
+ return mod
+
+ def get_params(self):
+ pg = self.connection.get_all_dbparameters(self.name)
+ self.update(pg)
+
+ def add_param(self, name, value, apply_method):
+ param = Parameter()
+ param.name = name
+ param.value = value
+ param.apply_method = apply_method
+ self.params.append(param)
+
+class Parameter(object):
+ """
+ Represents a RDS Parameter
+ """
+
+ ValidTypes = {'integer' : int,
+ 'string' : str,
+ 'boolean' : bool}
+ ValidSources = ['user', 'system', 'engine-default']
+ ValidApplyTypes = ['static', 'dynamic']
+ ValidApplyMethods = ['immediate', 'pending-reboot']
+
+ def __init__(self, group=None, name=None):
+ self.group = group
+ self.name = name
+ self._value = None
+ self.type = str
+ self.source = None
+ self.is_modifiable = True
+ self.description = None
+ self.apply_method = None
+ self.allowed_values = None
+
+ def __repr__(self):
+ return 'Parameter:%s' % self.name
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'ParameterName':
+ self.name = value
+ elif name == 'ParameterValue':
+ self._value = value
+ elif name == 'DataType':
+ if value in self.ValidTypes:
+ self.type = value
+ elif name == 'Source':
+ if value in self.ValidSources:
+ self.source = value
+ elif name == 'IsModifiable':
+ if value.lower() == 'true':
+ self.is_modifiable = True
+ else:
+ self.is_modifiable = False
+ elif name == 'Description':
+ self.description = value
+ elif name == 'ApplyType':
+ if value in self.ValidApplyTypes:
+ self.apply_type = value
+ elif name == 'AllowedValues':
+ self.allowed_values = value
+ else:
+ setattr(self, name, value)
+
+ def merge(self, d, i):
+ prefix = 'Parameters.member.%d.' % i
+ if self.name:
+ d[prefix+'ParameterName'] = self.name
+ if self._value:
+ d[prefix+'ParameterValue'] = self._value
+ if self.apply_type:
+ d[prefix+'ApplyMethod'] = self.apply_method
+
+ def _set_string_value(self, value):
+ if not isinstance(value, str) or isinstance(value, unicode):
+ raise ValueError, 'value must be of type str'
+ if self.allowed_values:
+ choices = self.allowed_values.split(',')
+ if value not in choices:
+ raise ValueError, 'value must be in %s' % self.allowed_values
+ self._value = value
+
+ def _set_integer_value(self, value):
+ if isinstance(value, str) or isinstance(value, unicode):
+ value = int(value)
+ if isinstance(value, int) or isinstance(value, long):
+ if self.allowed_values:
+ min, max = self.allowed_values.split('-')
+ if value < int(min) or value > int(max):
+ raise ValueError, 'range is %s' % self.allowed_values
+ self._value = value
+ else:
+ raise ValueError, 'value must be integer'
+
+ def _set_boolean_value(self, value):
+ if isinstance(value, bool):
+ self._value = value
+ elif isinstance(value, str) or isinstance(value, unicode):
+ if value.lower() == 'true':
+ self._value = True
+ else:
+ self._value = False
+ else:
+ raise ValueError, 'value must be boolean'
+
+ def set_value(self, value):
+ if self.type == 'string':
+ self._set_string_value(value)
+ elif self.type == 'integer':
+ self._set_integer_value(value)
+ elif self.type == 'boolean':
+ self._set_boolean_value(value)
+ else:
+ raise TypeError, 'unknown type (%s)' % self.type
+
+ def get_value(self):
+ if self._value == None:
+ return self._value
+ if self.type == 'string':
+ return self._value
+ elif self.type == 'integer':
+ if not isinstance(self._value, int) and not isinstance(self._value, long):
+ self._set_integer_value(self._value)
+ return self._value
+ elif self.type == 'boolean':
+ if not isinstance(self._value, bool):
+ self._set_boolean_value(self._value)
+ return self._value
+ else:
+ raise TypeError, 'unknown type (%s)' % self.type
+
+ value = property(get_value, set_value, 'The value of the parameter')
+
+ def apply(self, immediate=False):
+ if immediate:
+ self.apply_method = 'immediate'
+ else:
+ self.apply_method = 'pending-reboot'
+ self.group.connection.modify_parameter_group(self.group.name, [self])
+
diff --git a/boto/rds/regioninfo.py b/boto/rds/regioninfo.py
new file mode 100644
index 0000000..7d186ae
--- /dev/null
+++ b/boto/rds/regioninfo.py
@@ -0,0 +1,32 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from boto.regioninfo import RegionInfo
+
+class RDSRegionInfo(RegionInfo):
+
+ def __init__(self, connection=None, name=None, endpoint=None):
+ from boto.rds import RDSConnection
+ RegionInfo.__init__(self, connection, name, endpoint,
+ RDSConnection)
diff --git a/boto/regioninfo.py b/boto/regioninfo.py
new file mode 100644
index 0000000..907385f
--- /dev/null
+++ b/boto/regioninfo.py
@@ -0,0 +1,64 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class RegionInfo(object):
+ """
+ Represents an AWS Region
+ """
+
+ def __init__(self, connection=None, name=None, endpoint=None,
+ connection_cls=None):
+ self.connection = connection
+ self.name = name
+ self.endpoint = endpoint
+ self.connection_cls = connection_cls
+
+ def __repr__(self):
+ return 'RegionInfo:%s' % self.name
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'regionName':
+ self.name = value
+ elif name == 'regionEndpoint':
+ self.endpoint = value
+ else:
+ setattr(self, name, value)
+
+ def connect(self, **kw_params):
+ """
+ Connect to this Region's endpoint. Returns an connection
+ object pointing to the endpoint associated with this region.
+ You may pass any of the arguments accepted by the connection
+ class's constructor as keyword arguments and they will be
+ passed along to the connection object.
+
+ :rtype: Connection object
+ :return: The connection to this regions endpoint
+ """
+ if self.connection_cls:
+ return self.connection_cls(region=self, **kw_params)
+
+
diff --git a/boto/resultset.py b/boto/resultset.py
new file mode 100644
index 0000000..075fc5e
--- /dev/null
+++ b/boto/resultset.py
@@ -0,0 +1,153 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class ResultSet(list):
+ """
+ The ResultSet is used to pass results back from the Amazon services
+ to the client. It is light wrapper around Python's :py:class:`list` class,
+ with some additional methods for parsing XML results from AWS.
+ Because I don't really want any dependencies on external libraries,
+ I'm using the standard SAX parser that comes with Python. The good news is
+ that it's quite fast and efficient but it makes some things rather
+ difficult.
+
+ You can pass in, as the marker_elem parameter, a list of tuples.
+ Each tuple contains a string as the first element which represents
+ the XML element that the resultset needs to be on the lookout for
+ and a Python class as the second element of the tuple. Each time the
+ specified element is found in the XML, a new instance of the class
+ will be created and popped onto the stack.
+
+ :ivar str next_token: A hash used to assist in paging through very long
+ result sets. In most cases, passing this value to certain methods
+ will give you another 'page' of results.
+ """
+ def __init__(self, marker_elem=None):
+ list.__init__(self)
+ if isinstance(marker_elem, list):
+ self.markers = marker_elem
+ else:
+ self.markers = []
+ self.marker = None
+ self.key_marker = None
+ self.next_key_marker = None
+ self.next_version_id_marker = None
+ self.version_id_marker = None
+ self.is_truncated = False
+ self.next_token = None
+ self.status = True
+
+ def startElement(self, name, attrs, connection):
+ for t in self.markers:
+ if name == t[0]:
+ obj = t[1](connection)
+ self.append(obj)
+ return obj
+ return None
+
+ def to_boolean(self, value, true_value='true'):
+ if value == true_value:
+ return True
+ else:
+ return False
+
+ def endElement(self, name, value, connection):
+ if name == 'IsTruncated':
+ self.is_truncated = self.to_boolean(value)
+ elif name == 'Marker':
+ self.marker = value
+ elif name == 'KeyMarker':
+ self.key_marker = value
+ elif name == 'NextKeyMarker':
+ self.next_key_marker = value
+ elif name == 'VersionIdMarker':
+ self.version_id_marker = value
+ elif name == 'NextVersionIdMarker':
+ self.next_version_id_marker = value
+ elif name == 'UploadIdMarker':
+ self.upload_id_marker = value
+ elif name == 'NextUploadIdMarker':
+ self.next_upload_id_marker = value
+ elif name == 'Bucket':
+ self.bucket = value
+ elif name == 'MaxUploads':
+ self.max_uploads = int(value)
+ elif name == 'Prefix':
+ self.prefix = value
+ elif name == 'return':
+ self.status = self.to_boolean(value)
+ elif name == 'StatusCode':
+ self.status = self.to_boolean(value, 'Success')
+ elif name == 'ItemName':
+ self.append(value)
+ elif name == 'NextToken':
+ self.next_token = value
+ elif name == 'BoxUsage':
+ try:
+ connection.box_usage += float(value)
+ except:
+ pass
+ elif name == 'IsValid':
+ self.status = self.to_boolean(value, 'True')
+ else:
+ setattr(self, name, value)
+
+class BooleanResult(object):
+
+ def __init__(self, marker_elem=None):
+ self.status = True
+ self.request_id = None
+ self.box_usage = None
+
+ def __repr__(self):
+ if self.status:
+ return 'True'
+ else:
+ return 'False'
+
+ def __nonzero__(self):
+ return self.status
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def to_boolean(self, value, true_value='true'):
+ if value == true_value:
+ return True
+ else:
+ return False
+
+ def endElement(self, name, value, connection):
+ if name == 'return':
+ self.status = self.to_boolean(value)
+ elif name == 'StatusCode':
+ self.status = self.to_boolean(value, 'Success')
+ elif name == 'IsValid':
+ self.status = self.to_boolean(value, 'True')
+ elif name == 'RequestId':
+ self.request_id = value
+ elif name == 'requestId':
+ self.request_id = value
+ elif name == 'BoxUsage':
+ self.request_id = value
+ else:
+ setattr(self, name, value)
+
diff --git a/boto/route53/__init__.py b/boto/route53/__init__.py
new file mode 100644
index 0000000..d404bc7
--- /dev/null
+++ b/boto/route53/__init__.py
@@ -0,0 +1,26 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+# this is here for backward compatibility
+# originally, the Route53Connection class was defined here
+from connection import Route53Connection
diff --git a/boto/route53/connection.py b/boto/route53/connection.py
new file mode 100644
index 0000000..bbd218c
--- /dev/null
+++ b/boto/route53/connection.py
@@ -0,0 +1,285 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+import xml.sax
+import time
+import uuid
+import urllib
+import boto
+from boto.connection import AWSAuthConnection
+from boto import handler
+from boto.resultset import ResultSet
+import boto.jsonresponse
+import exception
+import hostedzone
+
+HZXML = """<?xml version="1.0" encoding="UTF-8"?>
+<CreateHostedZoneRequest xmlns="%(xmlns)s">
+ <Name>%(name)s</Name>
+ <CallerReference>%(caller_ref)s</CallerReference>
+ <HostedZoneConfig>
+ <Comment>%(comment)s</Comment>
+ </HostedZoneConfig>
+</CreateHostedZoneRequest>"""
+
+#boto.set_stream_logger('dns')
+
+class Route53Connection(AWSAuthConnection):
+
+ DefaultHost = 'route53.amazonaws.com'
+ Version = '2010-10-01'
+ XMLNameSpace = 'https://route53.amazonaws.com/doc/2010-10-01/'
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ port=None, proxy=None, proxy_port=None,
+ host=DefaultHost, debug=0):
+ AWSAuthConnection.__init__(self, host,
+ aws_access_key_id, aws_secret_access_key,
+ True, port, proxy, proxy_port, debug=debug)
+
+ def _required_auth_capability(self):
+ return ['route53']
+
+ def make_request(self, action, path, headers=None, data='', params=None):
+ if params:
+ pairs = []
+ for key, val in params.iteritems():
+ if val is None: continue
+ pairs.append(key + '=' + urllib.quote(str(val)))
+ path += '?' + '&'.join(pairs)
+ return AWSAuthConnection.make_request(self, action, path, headers, data)
+
+ # Hosted Zones
+
+ def get_all_hosted_zones(self):
+ """
+ Returns a Python data structure with information about all
+ Hosted Zones defined for the AWS account.
+ """
+ response = self.make_request('GET', '/%s/hostedzone' % self.Version)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status >= 300:
+ raise exception.DNSServerError(response.status,
+ response.reason,
+ body)
+ e = boto.jsonresponse.Element(list_marker='HostedZones',
+ item_marker=('HostedZone',))
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e
+
+ def get_hosted_zone(self, hosted_zone_id):
+ """
+ Get detailed information about a particular Hosted Zone.
+
+ :type hosted_zone_id: str
+ :param hosted_zone_id: The unique identifier for the Hosted Zone
+
+ """
+ uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
+ response = self.make_request('GET', uri)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status >= 300:
+ raise exception.DNSServerError(response.status,
+ response.reason,
+ body)
+ e = boto.jsonresponse.Element(list_marker='NameServers',
+ item_marker=('NameServer',))
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e
+
+ def create_hosted_zone(self, domain_name, caller_ref=None, comment=''):
+ """
+ Create a new Hosted Zone. Returns a Python data structure with
+ information about the newly created Hosted Zone.
+
+ :type domain_name: str
+ :param domain_name: The name of the domain. This should be a
+ fully-specified domain, and should end with
+ a final period as the last label indication.
+ If you omit the final period, Amazon Route 53
+ assumes the domain is relative to the root.
+ This is the name you have registered with your
+ DNS registrar. It is also the name you will
+ delegate from your registrar to the Amazon
+ Route 53 delegation servers returned in
+ response to this request.A list of strings
+ with the image IDs wanted
+
+ :type caller_ref: str
+ :param caller_ref: A unique string that identifies the request
+ and that allows failed CreateHostedZone requests
+ to be retried without the risk of executing the
+ operation twice.
+ If you don't provide a value for this, boto will
+ generate a Type 4 UUID and use that.
+
+ :type comment: str
+ :param comment: Any comments you want to include about the hosted
+ zone.
+
+ """
+ if caller_ref is None:
+ caller_ref = str(uuid.uuid4())
+ params = {'name' : domain_name,
+ 'caller_ref' : caller_ref,
+ 'comment' : comment,
+ 'xmlns' : self.XMLNameSpace}
+ xml = HZXML % params
+ uri = '/%s/hostedzone' % self.Version
+ response = self.make_request('POST', uri,
+ {'Content-Type' : 'text/xml'}, xml)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 201:
+ e = boto.jsonresponse.Element(list_marker='NameServers',
+ item_marker=('NameServer',))
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e
+ else:
+ raise exception.DNSServerError(response.status,
+ response.reason,
+ body)
+
+ def delete_hosted_zone(self, hosted_zone_id):
+ uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
+ response = self.make_request('DELETE', uri)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status not in (200, 204):
+ raise exception.DNSServerError(response.status,
+ response.reason,
+ body)
+ e = boto.jsonresponse.Element()
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e
+
+ # Resource Record Sets
+
+ def get_all_rrsets(self, hosted_zone_id, type=None,
+ name=None, maxitems=None):
+ """
+ Retrieve the Resource Record Sets defined for this Hosted Zone.
+ Returns the raw XML data returned by the Route53 call.
+
+ :type hosted_zone_id: str
+ :param hosted_zone_id: The unique identifier for the Hosted Zone
+
+ :type type: str
+ :param type: The type of resource record set to begin the record
+ listing from. Valid choices are:
+
+ * A
+ * AAAA
+ * CNAME
+ * MX
+ * NS
+ * PTR
+ * SOA
+ * SPF
+ * SRV
+ * TXT
+
+ :type name: str
+ :param name: The first name in the lexicographic ordering of domain
+ names to be retrieved
+
+ :type maxitems: int
+ :param maxitems: The maximum number of records
+
+ """
+ from boto.route53.record import ResourceRecordSets
+ params = {'type': type, 'name': name, 'maxitems': maxitems}
+ uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id)
+ response = self.make_request('GET', uri, params=params)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status >= 300:
+ raise exception.DNSServerError(response.status,
+ response.reason,
+ body)
+ rs = ResourceRecordSets(connection=self, hosted_zone_id=hosted_zone_id)
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs
+
+ def change_rrsets(self, hosted_zone_id, xml_body):
+ """
+ Create or change the authoritative DNS information for this
+ Hosted Zone.
+ Returns a Python data structure with information about the set of
+ changes, including the Change ID.
+
+ :type hosted_zone_id: str
+ :param hosted_zone_id: The unique identifier for the Hosted Zone
+
+ :type xml_body: str
+ :param xml_body: The list of changes to be made, defined in the
+ XML schema defined by the Route53 service.
+
+ """
+ uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id)
+ response = self.make_request('POST', uri,
+ {'Content-Type' : 'text/xml'},
+ xml_body)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status >= 300:
+ raise exception.DNSServerError(response.status,
+ response.reason,
+ body)
+ e = boto.jsonresponse.Element()
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e
+
+ def get_change(self, change_id):
+ """
+ Get information about a proposed set of changes, as submitted
+ by the change_rrsets method.
+ Returns a Python data structure with status information about the
+ changes.
+
+ :type change_id: str
+ :param change_id: The unique identifier for the set of changes.
+ This ID is returned in the response to the
+ change_rrsets method.
+
+ """
+ uri = '/%s/change/%s' % (self.Version, change_id)
+ response = self.make_request('GET', uri)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status >= 300:
+ raise exception.DNSServerError(response.status,
+ response.reason,
+ body)
+ e = boto.jsonresponse.Element()
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e
diff --git a/boto/route53/exception.py b/boto/route53/exception.py
new file mode 100644
index 0000000..ba41285
--- /dev/null
+++ b/boto/route53/exception.py
@@ -0,0 +1,27 @@
+# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.exception import BotoServerError
+
+class DNSServerError(BotoServerError):
+
+ pass
diff --git a/boto/route53/hostedzone.py b/boto/route53/hostedzone.py
new file mode 100644
index 0000000..66b79b8
--- /dev/null
+++ b/boto/route53/hostedzone.py
@@ -0,0 +1,56 @@
+# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+class HostedZone(object):
+
+ def __init__(self, id=None, name=None, owner=None, version=None,
+ caller_reference=None, config=None):
+ self.id = id
+ self.name = name
+ self.owner = owner
+ self.version = version
+ self.caller_reference = caller_reference
+ self.config = config
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Config':
+ self.config = Config()
+ return self.config
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Id':
+ self.id = value
+ elif name == 'Name':
+ self.name = value
+ elif name == 'Owner':
+ self.owner = value
+ elif name == 'Version':
+ self.version = value
+ elif name == 'CallerReference':
+ self.caller_reference = value
+ else:
+ setattr(self, name, value)
+
diff --git a/boto/route53/record.py b/boto/route53/record.py
new file mode 100644
index 0000000..24f0482
--- /dev/null
+++ b/boto/route53/record.py
@@ -0,0 +1,152 @@
+# Copyright (c) 2010 Chris Moyer http://coredumped.org/
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+RECORD_TYPES = ['A', 'AAAA', 'TXT', 'CNAME', 'MX', 'PTR', 'SRV', 'SPF']
+
+from boto.resultset import ResultSet
+class ResourceRecordSets(ResultSet):
+
+ ChangeResourceRecordSetsBody = """<?xml version="1.0" encoding="UTF-8"?>
+ <ChangeResourceRecordSetsRequest xmlns="https://route53.amazonaws.com/doc/2010-10-01/">
+ <ChangeBatch>
+ <Comment>%(comment)s</Comment>
+ <Changes>%(changes)s</Changes>
+ </ChangeBatch>
+ </ChangeResourceRecordSetsRequest>"""
+
+ ChangeXML = """<Change>
+ <Action>%(action)s</Action>
+ %(record)s
+ </Change>"""
+
+
+ def __init__(self, connection=None, hosted_zone_id=None, comment=None):
+ self.connection = connection
+ self.hosted_zone_id = hosted_zone_id
+ self.comment = comment
+ self.changes = []
+ self.next_record_name = None
+ self.next_record_type = None
+ ResultSet.__init__(self, [('ResourceRecordSet', Record)])
+
+ def __repr__(self):
+ return '<ResourceRecordSets: %s>' % self.hosted_zone_id
+
+ def add_change(self, action, name, type, ttl=600):
+ """Add a change request"""
+ change = Record(name, type, ttl)
+ self.changes.append([action, change])
+ return change
+
+ def to_xml(self):
+ """Convert this ResourceRecordSet into XML
+ to be saved via the ChangeResourceRecordSetsRequest"""
+ changesXML = ""
+ for change in self.changes:
+ changeParams = {"action": change[0], "record": change[1].to_xml()}
+ changesXML += self.ChangeXML % changeParams
+ params = {"comment": self.comment, "changes": changesXML}
+ return self.ChangeResourceRecordSetsBody % params
+
+ def commit(self):
+ """Commit this change"""
+ if not self.connection:
+ import boto
+ self.connection = boto.connect_route53()
+ return self.connection.change_rrsets(self.hosted_zone_id, self.to_xml())
+
+ def endElement(self, name, value, connection):
+ """Overwritten to also add the NextRecordName and
+ NextRecordType to the base object"""
+ if name == 'NextRecordName':
+ self.next_record_name = value
+ elif name == 'NextRecordType':
+ self.next_record_type = value
+ else:
+ return ResultSet.endElement(self, name, value, connection)
+
+ def __iter__(self):
+ """Override the next function to support paging"""
+ results = ResultSet.__iter__(self)
+ while results:
+ for obj in results:
+ yield obj
+ if self.is_truncated:
+ self.is_truncated = False
+ results = self.connection.get_all_rrsets(self.hosted_zone_id, name=self.next_record_name, type=self.next_record_type)
+ else:
+ results = None
+
+
+
+class Record(object):
+ """An individual ResourceRecordSet"""
+
+ XMLBody = """<ResourceRecordSet>
+ <Name>%(name)s</Name>
+ <Type>%(type)s</Type>
+ <TTL>%(ttl)s</TTL>
+ <ResourceRecords>%(records)s</ResourceRecords>
+ </ResourceRecordSet>"""
+
+ ResourceRecordBody = """<ResourceRecord>
+ <Value>%s</Value>
+ </ResourceRecord>"""
+
+
+ def __init__(self, name=None, type=None, ttl=600, resource_records=None):
+ self.name = name
+ self.type = type
+ self.ttl = ttl
+ if resource_records == None:
+ resource_records = []
+ self.resource_records = resource_records
+
+ def add_value(self, value):
+ """Add a resource record value"""
+ self.resource_records.append(value)
+
+ def to_xml(self):
+ """Spit this resource record set out as XML"""
+ records = ""
+ for r in self.resource_records:
+ records += self.ResourceRecordBody % r
+ params = {
+ "name": self.name,
+ "type": self.type,
+ "ttl": self.ttl,
+ "records": records
+ }
+ return self.XMLBody % params
+
+ def endElement(self, name, value, connection):
+ if name == 'Name':
+ self.name = value
+ elif name == 'Type':
+ self.type = value
+ elif name == 'TTL':
+ self.ttl = value
+ elif name == 'Value':
+ self.resource_records.append(value)
+
+ def startElement(self, name, attrs, connection):
+ return None
diff --git a/boto/s3/__init__.py b/boto/s3/__init__.py
new file mode 100644
index 0000000..f3f4c1e
--- /dev/null
+++ b/boto/s3/__init__.py
@@ -0,0 +1,24 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
diff --git a/boto/s3/acl.py b/boto/s3/acl.py
new file mode 100644
index 0000000..2640499
--- /dev/null
+++ b/boto/s3/acl.py
@@ -0,0 +1,163 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.s3.user import User
+
+
+CannedACLStrings = ['private', 'public-read',
+ 'public-read-write', 'authenticated-read',
+ 'bucket-owner-read', 'bucket-owner-full-control']
+
+
+class Policy:
+
+ def __init__(self, parent=None):
+ self.parent = parent
+ self.acl = None
+
+ def __repr__(self):
+ grants = []
+ for g in self.acl.grants:
+ if g.id == self.owner.id:
+ grants.append("%s (owner) = %s" % (g.display_name, g.permission))
+ else:
+ if g.type == 'CanonicalUser':
+ u = g.display_name
+ elif g.type == 'Group':
+ u = g.uri
+ else:
+ u = g.email
+ grants.append("%s = %s" % (u, g.permission))
+ return "<Policy: %s>" % ", ".join(grants)
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Owner':
+ self.owner = User(self)
+ return self.owner
+ elif name == 'AccessControlList':
+ self.acl = ACL(self)
+ return self.acl
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Owner':
+ pass
+ elif name == 'AccessControlList':
+ pass
+ else:
+ setattr(self, name, value)
+
+ def to_xml(self):
+ s = '<AccessControlPolicy>'
+ s += self.owner.to_xml()
+ s += self.acl.to_xml()
+ s += '</AccessControlPolicy>'
+ return s
+
+class ACL:
+
+ def __init__(self, policy=None):
+ self.policy = policy
+ self.grants = []
+
+ def add_grant(self, grant):
+ self.grants.append(grant)
+
+ def add_email_grant(self, permission, email_address):
+ grant = Grant(permission=permission, type='AmazonCustomerByEmail',
+ email_address=email_address)
+ self.grants.append(grant)
+
+ def add_user_grant(self, permission, user_id):
+ grant = Grant(permission=permission, type='CanonicalUser', id=user_id)
+ self.grants.append(grant)
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Grant':
+ self.grants.append(Grant(self))
+ return self.grants[-1]
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Grant':
+ pass
+ else:
+ setattr(self, name, value)
+
+ def to_xml(self):
+ s = '<AccessControlList>'
+ for grant in self.grants:
+ s += grant.to_xml()
+ s += '</AccessControlList>'
+ return s
+
+class Grant:
+
+ NameSpace = 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
+
+ def __init__(self, permission=None, type=None, id=None,
+ display_name=None, uri=None, email_address=None):
+ self.permission = permission
+ self.id = id
+ self.display_name = display_name
+ self.uri = uri
+ self.email_address = email_address
+ self.type = type
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Grantee':
+ self.type = attrs['xsi:type']
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'ID':
+ self.id = value
+ elif name == 'DisplayName':
+ self.display_name = value
+ elif name == 'URI':
+ self.uri = value
+ elif name == 'EmailAddress':
+ self.email_address = value
+ elif name == 'Grantee':
+ pass
+ elif name == 'Permission':
+ self.permission = value
+ else:
+ setattr(self, name, value)
+
+ def to_xml(self):
+ s = '<Grant>'
+ s += '<Grantee %s xsi:type="%s">' % (self.NameSpace, self.type)
+ if self.type == 'CanonicalUser':
+ s += '<ID>%s</ID>' % self.id
+ s += '<DisplayName>%s</DisplayName>' % self.display_name
+ elif self.type == 'Group':
+ s += '<URI>%s</URI>' % self.uri
+ else:
+ s += '<EmailAddress>%s</EmailAddress>' % self.email_address
+ s += '</Grantee>'
+ s += '<Permission>%s</Permission>' % self.permission
+ s += '</Grant>'
+ return s
+
+
diff --git a/boto/s3/bucket.py b/boto/s3/bucket.py
new file mode 100644
index 0000000..c1b38e9
--- /dev/null
+++ b/boto/s3/bucket.py
@@ -0,0 +1,1030 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import boto
+from boto import handler
+from boto.provider import Provider
+from boto.resultset import ResultSet
+from boto.s3.acl import ACL, Policy, CannedACLStrings, Grant
+from boto.s3.key import Key
+from boto.s3.prefix import Prefix
+from boto.s3.deletemarker import DeleteMarker
+from boto.s3.user import User
+from boto.s3.multipart import MultiPartUpload
+from boto.s3.multipart import CompleteMultiPartUpload
+from boto.s3.bucketlistresultset import BucketListResultSet
+from boto.s3.bucketlistresultset import VersionedBucketListResultSet
+from boto.s3.bucketlistresultset import MultiPartUploadListResultSet
+import boto.jsonresponse
+import boto.utils
+import xml.sax
+import urllib
+import re
+from collections import defaultdict
+
+# as per http://goo.gl/BDuud (02/19/2011)
+class S3WebsiteEndpointTranslate:
+ trans_region = defaultdict(lambda :'s3-website-us-east-1')
+
+ trans_region['EU'] = 's3-website-eu-west-1'
+ trans_region['us-west-1'] = 's3-website-us-west-1'
+ trans_region['ap-southeast-1'] = 's3-website-ap-southeast-1'
+
+ @classmethod
+ def translate_region(self, reg):
+ return self.trans_region[reg]
+
+S3Permissions = ['READ', 'WRITE', 'READ_ACP', 'WRITE_ACP', 'FULL_CONTROL']
+
+class Bucket(object):
+
+ BucketLoggingBody = """<?xml version="1.0" encoding="UTF-8"?>
+ <BucketLoggingStatus xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <LoggingEnabled>
+ <TargetBucket>%s</TargetBucket>
+ <TargetPrefix>%s</TargetPrefix>
+ </LoggingEnabled>
+ </BucketLoggingStatus>"""
+
+ EmptyBucketLoggingBody = """<?xml version="1.0" encoding="UTF-8"?>
+ <BucketLoggingStatus xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ </BucketLoggingStatus>"""
+
+ LoggingGroup = 'http://acs.amazonaws.com/groups/s3/LogDelivery'
+
+ BucketPaymentBody = """<?xml version="1.0" encoding="UTF-8"?>
+ <RequestPaymentConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <Payer>%s</Payer>
+ </RequestPaymentConfiguration>"""
+
+ VersioningBody = """<?xml version="1.0" encoding="UTF-8"?>
+ <VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <Status>%s</Status>
+ <MfaDelete>%s</MfaDelete>
+ </VersioningConfiguration>"""
+
+ WebsiteBody = """<?xml version="1.0" encoding="UTF-8"?>
+ <WebsiteConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
+ <IndexDocument><Suffix>%s</Suffix></IndexDocument>
+ %s
+ </WebsiteConfiguration>"""
+
+ WebsiteErrorFragment = """<ErrorDocument><Key>%s</Key></ErrorDocument>"""
+
+ VersionRE = '<Status>([A-Za-z]+)</Status>'
+ MFADeleteRE = '<MfaDelete>([A-Za-z]+)</MfaDelete>'
+
+ def __init__(self, connection=None, name=None, key_class=Key):
+ self.name = name
+ self.connection = connection
+ self.key_class = key_class
+
+ def __repr__(self):
+ return '<Bucket: %s>' % self.name
+
+ def __iter__(self):
+ return iter(BucketListResultSet(self))
+
+ def __contains__(self, key_name):
+ return not (self.get_key(key_name) is None)
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Name':
+ self.name = value
+ elif name == 'CreationDate':
+ self.creation_date = value
+ else:
+ setattr(self, name, value)
+
+ def set_key_class(self, key_class):
+ """
+ Set the Key class associated with this bucket. By default, this
+ would be the boto.s3.key.Key class but if you want to subclass that
+ for some reason this allows you to associate your new class with a
+ bucket so that when you call bucket.new_key() or when you get a listing
+ of keys in the bucket you will get an instances of your key class
+ rather than the default.
+
+ :type key_class: class
+ :param key_class: A subclass of Key that can be more specific
+ """
+ self.key_class = key_class
+
+ def lookup(self, key_name, headers=None):
+ """
+ Deprecated: Please use get_key method.
+
+ :type key_name: string
+ :param key_name: The name of the key to retrieve
+
+ :rtype: :class:`boto.s3.key.Key`
+ :returns: A Key object from this bucket.
+ """
+ return self.get_key(key_name, headers=headers)
+
+ def get_key(self, key_name, headers=None, version_id=None):
+ """
+ Check to see if a particular key exists within the bucket. This
+ method uses a HEAD request to check for the existance of the key.
+ Returns: An instance of a Key object or None
+
+ :type key_name: string
+ :param key_name: The name of the key to retrieve
+
+ :rtype: :class:`boto.s3.key.Key`
+ :returns: A Key object from this bucket.
+ """
+ if version_id:
+ query_args = 'versionId=%s' % version_id
+ else:
+ query_args = None
+ response = self.connection.make_request('HEAD', self.name, key_name,
+ headers=headers,
+ query_args=query_args)
+ # Allow any success status (2xx) - for example this lets us
+ # support Range gets, which return status 206:
+ if response.status/100 == 2:
+ response.read()
+ k = self.key_class(self)
+ provider = self.connection.provider
+ k.metadata = boto.utils.get_aws_metadata(response.msg, provider)
+ k.etag = response.getheader('etag')
+ k.content_type = response.getheader('content-type')
+ k.content_encoding = response.getheader('content-encoding')
+ k.last_modified = response.getheader('last-modified')
+ k.size = int(response.getheader('content-length'))
+ k.cache_control = response.getheader('cache-control')
+ k.name = key_name
+ k.handle_version_headers(response)
+ return k
+ else:
+ if response.status == 404:
+ response.read()
+ return None
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, '')
+
+ def list(self, prefix='', delimiter='', marker='', headers=None):
+ """
+ List key objects within a bucket. This returns an instance of an
+ BucketListResultSet that automatically handles all of the result
+ paging, etc. from S3. You just need to keep iterating until
+ there are no more results.
+
+ Called with no arguments, this will return an iterator object across
+ all keys within the bucket.
+
+ The Key objects returned by the iterator are obtained by parsing
+ the results of a GET on the bucket, also known as the List Objects
+ request. The XML returned by this request contains only a subset
+ of the information about each key. Certain metadata fields such
+ as Content-Type and user metadata are not available in the XML.
+ Therefore, if you want these additional metadata fields you will
+ have to do a HEAD request on the Key in the bucket.
+
+ :type prefix: string
+ :param prefix: allows you to limit the listing to a particular
+ prefix. For example, if you call the method with
+ prefix='/foo/' then the iterator will only cycle
+ through the keys that begin with the string '/foo/'.
+
+ :type delimiter: string
+ :param delimiter: can be used in conjunction with the prefix
+ to allow you to organize and browse your keys
+ hierarchically. See:
+ http://docs.amazonwebservices.com/AmazonS3/2006-03-01/
+ for more details.
+
+ :type marker: string
+ :param marker: The "marker" of where you are in the result set
+
+ :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
+ :return: an instance of a BucketListResultSet that handles paging, etc
+ """
+ return BucketListResultSet(self, prefix, delimiter, marker, headers)
+
+ def list_versions(self, prefix='', delimiter='', key_marker='',
+ version_id_marker='', headers=None):
+ """
+ List version objects within a bucket. This returns an instance of an
+ VersionedBucketListResultSet that automatically handles all of the result
+ paging, etc. from S3. You just need to keep iterating until
+ there are no more results.
+ Called with no arguments, this will return an iterator object across
+ all keys within the bucket.
+
+ :type prefix: string
+ :param prefix: allows you to limit the listing to a particular
+ prefix. For example, if you call the method with
+ prefix='/foo/' then the iterator will only cycle
+ through the keys that begin with the string '/foo/'.
+
+ :type delimiter: string
+ :param delimiter: can be used in conjunction with the prefix
+ to allow you to organize and browse your keys
+ hierarchically. See:
+ http://docs.amazonwebservices.com/AmazonS3/2006-03-01/
+ for more details.
+
+ :type marker: string
+ :param marker: The "marker" of where you are in the result set
+
+ :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
+ :return: an instance of a BucketListResultSet that handles paging, etc
+ """
+ return VersionedBucketListResultSet(self, prefix, delimiter, key_marker,
+ version_id_marker, headers)
+
+ def list_multipart_uploads(self, key_marker='',
+ upload_id_marker='',
+ headers=None):
+ """
+ List multipart upload objects within a bucket. This returns an
+ instance of an MultiPartUploadListResultSet that automatically
+ handles all of the result paging, etc. from S3. You just need
+ to keep iterating until there are no more results.
+
+ :type marker: string
+ :param marker: The "marker" of where you are in the result set
+
+ :rtype: :class:`boto.s3.bucketlistresultset.BucketListResultSet`
+ :return: an instance of a BucketListResultSet that handles paging, etc
+ """
+ return MultiPartUploadListResultSet(self, key_marker,
+ upload_id_marker,
+ headers)
+
+ def _get_all(self, element_map, initial_query_string='',
+ headers=None, **params):
+ l = []
+ for k,v in params.items():
+ k = k.replace('_', '-')
+ if k == 'maxkeys':
+ k = 'max-keys'
+ if isinstance(v, unicode):
+ v = v.encode('utf-8')
+ if v is not None and v != '':
+ l.append('%s=%s' % (urllib.quote(k), urllib.quote(str(v))))
+ if len(l):
+ s = initial_query_string + '&' + '&'.join(l)
+ else:
+ s = initial_query_string
+ response = self.connection.make_request('GET', self.name,
+ headers=headers, query_args=s)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 200:
+ rs = ResultSet(element_map)
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def get_all_keys(self, headers=None, **params):
+ """
+ A lower-level method for listing contents of a bucket.
+ This closely models the actual S3 API and requires you to manually
+ handle the paging of results. For a higher-level method
+ that handles the details of paging for you, you can use the list method.
+
+ :type max_keys: int
+ :param max_keys: The maximum number of keys to retrieve
+
+ :type prefix: string
+ :param prefix: The prefix of the keys you want to retrieve
+
+ :type marker: string
+ :param marker: The "marker" of where you are in the result set
+
+ :type delimiter: string
+ :param delimiter: If this optional, Unicode string parameter
+ is included with your request, then keys that
+ contain the same string between the prefix and
+ the first occurrence of the delimiter will be
+ rolled up into a single result element in the
+ CommonPrefixes collection. These rolled-up keys
+ are not returned elsewhere in the response.
+
+ :rtype: ResultSet
+ :return: The result from S3 listing the keys requested
+
+ """
+ return self._get_all([('Contents', self.key_class),
+ ('CommonPrefixes', Prefix)],
+ '', headers, **params)
+
+ def get_all_versions(self, headers=None, **params):
+ """
+ A lower-level, version-aware method for listing contents of a bucket.
+ This closely models the actual S3 API and requires you to manually
+ handle the paging of results. For a higher-level method
+ that handles the details of paging for you, you can use the list method.
+
+ :type max_keys: int
+ :param max_keys: The maximum number of keys to retrieve
+
+ :type prefix: string
+ :param prefix: The prefix of the keys you want to retrieve
+
+ :type key_marker: string
+ :param key_marker: The "marker" of where you are in the result set
+ with respect to keys.
+
+ :type version_id_marker: string
+ :param version_id_marker: The "marker" of where you are in the result
+ set with respect to version-id's.
+
+ :type delimiter: string
+ :param delimiter: If this optional, Unicode string parameter
+ is included with your request, then keys that
+ contain the same string between the prefix and
+ the first occurrence of the delimiter will be
+ rolled up into a single result element in the
+ CommonPrefixes collection. These rolled-up keys
+ are not returned elsewhere in the response.
+
+ :rtype: ResultSet
+ :return: The result from S3 listing the keys requested
+
+ """
+ return self._get_all([('Version', self.key_class),
+ ('CommonPrefixes', Prefix),
+ ('DeleteMarker', DeleteMarker)],
+ 'versions', headers, **params)
+
+ def get_all_multipart_uploads(self, headers=None, **params):
+ """
+ A lower-level, version-aware method for listing active
+ MultiPart uploads for a bucket. This closely models the
+ actual S3 API and requires you to manually handle the paging
+ of results. For a higher-level method that handles the
+ details of paging for you, you can use the list method.
+
+ :type max_uploads: int
+ :param max_uploads: The maximum number of uploads to retrieve.
+ Default value is 1000.
+
+ :type key_marker: string
+ :param key_marker: Together with upload_id_marker, this parameter
+ specifies the multipart upload after which listing
+ should begin. If upload_id_marker is not specified,
+ only the keys lexicographically greater than the
+ specified key_marker will be included in the list.
+
+ If upload_id_marker is specified, any multipart
+ uploads for a key equal to the key_marker might
+ also be included, provided those multipart uploads
+ have upload IDs lexicographically greater than the
+ specified upload_id_marker.
+
+ :type upload_id_marker: string
+ :param upload_id_marker: Together with key-marker, specifies
+ the multipart upload after which listing
+ should begin. If key_marker is not specified,
+ the upload_id_marker parameter is ignored.
+ Otherwise, any multipart uploads for a key
+ equal to the key_marker might be included
+ in the list only if they have an upload ID
+ lexicographically greater than the specified
+ upload_id_marker.
+
+
+ :rtype: ResultSet
+ :return: The result from S3 listing the uploads requested
+
+ """
+ return self._get_all([('Upload', MultiPartUpload)],
+ 'uploads', headers, **params)
+
+ def new_key(self, key_name=None):
+ """
+ Creates a new key
+
+ :type key_name: string
+ :param key_name: The name of the key to create
+
+ :rtype: :class:`boto.s3.key.Key` or subclass
+ :returns: An instance of the newly created key object
+ """
+ return self.key_class(self, key_name)
+
+ def generate_url(self, expires_in, method='GET',
+ headers=None, force_http=False):
+ return self.connection.generate_url(expires_in, method, self.name,
+ headers=headers,
+ force_http=force_http)
+
+ def delete_key(self, key_name, headers=None,
+ version_id=None, mfa_token=None):
+ """
+ Deletes a key from the bucket. If a version_id is provided,
+ only that version of the key will be deleted.
+
+ :type key_name: string
+ :param key_name: The key name to delete
+
+ :type version_id: string
+ :param version_id: The version ID (optional)
+
+ :type mfa_token: tuple or list of strings
+ :param mfa_token: A tuple or list consisting of the serial number
+ from the MFA device and the current value of
+ the six-digit token associated with the device.
+ This value is required anytime you are
+ deleting versioned objects from a bucket
+ that has the MFADelete option on the bucket.
+ """
+ provider = self.connection.provider
+ if version_id:
+ query_args = 'versionId=%s' % version_id
+ else:
+ query_args = None
+ if mfa_token:
+ if not headers:
+ headers = {}
+ headers[provider.mfa_header] = ' '.join(mfa_token)
+ response = self.connection.make_request('DELETE', self.name, key_name,
+ headers=headers,
+ query_args=query_args)
+ body = response.read()
+ if response.status != 204:
+ raise provider.storage_response_error(response.status,
+ response.reason, body)
+
+ def copy_key(self, new_key_name, src_bucket_name,
+ src_key_name, metadata=None, src_version_id=None,
+ storage_class='STANDARD', preserve_acl=False):
+ """
+ Create a new key in the bucket by copying another existing key.
+
+ :type new_key_name: string
+ :param new_key_name: The name of the new key
+
+ :type src_bucket_name: string
+ :param src_bucket_name: The name of the source bucket
+
+ :type src_key_name: string
+ :param src_key_name: The name of the source key
+
+ :type src_version_id: string
+ :param src_version_id: The version id for the key. This param
+ is optional. If not specified, the newest
+ version of the key will be copied.
+
+ :type metadata: dict
+ :param metadata: Metadata to be associated with new key.
+ If metadata is supplied, it will replace the
+ metadata of the source key being copied.
+ If no metadata is supplied, the source key's
+ metadata will be copied to the new key.
+
+ :type storage_class: string
+ :param storage_class: The storage class of the new key.
+ By default, the new key will use the
+ standard storage class. Possible values are:
+ STANDARD | REDUCED_REDUNDANCY
+
+ :type preserve_acl: bool
+ :param preserve_acl: If True, the ACL from the source key
+ will be copied to the destination
+ key. If False, the destination key
+ will have the default ACL.
+ Note that preserving the ACL in the
+ new key object will require two
+ additional API calls to S3, one to
+ retrieve the current ACL and one to
+ set that ACL on the new object. If
+ you don't care about the ACL, a value
+ of False will be significantly more
+ efficient.
+
+ :rtype: :class:`boto.s3.key.Key` or subclass
+ :returns: An instance of the newly created key object
+ """
+ if preserve_acl:
+ acl = self.get_xml_acl(src_key_name)
+ src = '%s/%s' % (src_bucket_name, urllib.quote(src_key_name))
+ if src_version_id:
+ src += '?version_id=%s' % src_version_id
+ provider = self.connection.provider
+ headers = {provider.copy_source_header : src}
+ if storage_class != 'STANDARD':
+ headers[provider.storage_class_header] = storage_class
+ if metadata:
+ headers[provider.metadata_directive_header] = 'REPLACE'
+ headers = boto.utils.merge_meta(headers, metadata)
+ else:
+ headers[provider.metadata_directive_header] = 'COPY'
+ response = self.connection.make_request('PUT', self.name, new_key_name,
+ headers=headers)
+ body = response.read()
+ if response.status == 200:
+ key = self.new_key(new_key_name)
+ h = handler.XmlHandler(key, self)
+ xml.sax.parseString(body, h)
+ if hasattr(key, 'Error'):
+ raise provider.storage_copy_error(key.Code, key.Message, body)
+ key.handle_version_headers(response)
+ if preserve_acl:
+ self.set_xml_acl(acl, new_key_name)
+ return key
+ else:
+ raise provider.storage_response_error(response.status, response.reason, body)
+
+ def set_canned_acl(self, acl_str, key_name='', headers=None,
+ version_id=None):
+ assert acl_str in CannedACLStrings
+
+ if headers:
+ headers[self.connection.provider.acl_header] = acl_str
+ else:
+ headers={self.connection.provider.acl_header: acl_str}
+
+ query_args='acl'
+ if version_id:
+ query_args += '&versionId=%s' % version_id
+ response = self.connection.make_request('PUT', self.name, key_name,
+ headers=headers, query_args=query_args)
+ body = response.read()
+ if response.status != 200:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def get_xml_acl(self, key_name='', headers=None, version_id=None):
+ query_args = 'acl'
+ if version_id:
+ query_args += '&versionId=%s' % version_id
+ response = self.connection.make_request('GET', self.name, key_name,
+ query_args=query_args,
+ headers=headers)
+ body = response.read()
+ if response.status != 200:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+ return body
+
+ def set_xml_acl(self, acl_str, key_name='', headers=None, version_id=None):
+ query_args = 'acl'
+ if version_id:
+ query_args += '&versionId=%s' % version_id
+ response = self.connection.make_request('PUT', self.name, key_name,
+ data=acl_str,
+ query_args=query_args,
+ headers=headers)
+ body = response.read()
+ if response.status != 200:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def set_acl(self, acl_or_str, key_name='', headers=None, version_id=None):
+ if isinstance(acl_or_str, Policy):
+ self.set_xml_acl(acl_or_str.to_xml(), key_name,
+ headers, version_id)
+ else:
+ self.set_canned_acl(acl_or_str, key_name,
+ headers, version_id)
+
+ def get_acl(self, key_name='', headers=None, version_id=None):
+ query_args = 'acl'
+ if version_id:
+ query_args += '&versionId=%s' % version_id
+ response = self.connection.make_request('GET', self.name, key_name,
+ query_args=query_args,
+ headers=headers)
+ body = response.read()
+ if response.status == 200:
+ policy = Policy(self)
+ h = handler.XmlHandler(policy, self)
+ xml.sax.parseString(body, h)
+ return policy
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def make_public(self, recursive=False, headers=None):
+ self.set_canned_acl('public-read', headers=headers)
+ if recursive:
+ for key in self:
+ self.set_canned_acl('public-read', key.name, headers=headers)
+
+ def add_email_grant(self, permission, email_address,
+ recursive=False, headers=None):
+ """
+ Convenience method that provides a quick way to add an email grant
+ to a bucket. This method retrieves the current ACL, creates a new
+ grant based on the parameters passed in, adds that grant to the ACL
+ and then PUT's the new ACL back to S3.
+
+ :type permission: string
+ :param permission: The permission being granted. Should be one of:
+ (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
+
+ :type email_address: string
+ :param email_address: The email address associated with the AWS
+ account your are granting the permission to.
+
+ :type recursive: boolean
+ :param recursive: A boolean value to controls whether the command
+ will apply the grant to all keys within the bucket
+ or not. The default value is False. By passing a
+ True value, the call will iterate through all keys
+ in the bucket and apply the same grant to each key.
+ CAUTION: If you have a lot of keys, this could take
+ a long time!
+ """
+ if permission not in S3Permissions:
+ raise self.connection.provider.storage_permissions_error(
+ 'Unknown Permission: %s' % permission)
+ policy = self.get_acl(headers=headers)
+ policy.acl.add_email_grant(permission, email_address)
+ self.set_acl(policy, headers=headers)
+ if recursive:
+ for key in self:
+ key.add_email_grant(permission, email_address, headers=headers)
+
+ def add_user_grant(self, permission, user_id,
+ recursive=False, headers=None):
+ """
+ Convenience method that provides a quick way to add a canonical
+ user grant to a bucket. This method retrieves the current ACL,
+ creates a new grant based on the parameters passed in, adds that
+ grant to the ACL and then PUT's the new ACL back to S3.
+
+ :type permission: string
+ :param permission: The permission being granted. Should be one of:
+ (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
+
+ :type user_id: string
+ :param user_id: The canonical user id associated with the AWS
+ account your are granting the permission to.
+
+ :type recursive: boolean
+ :param recursive: A boolean value to controls whether the command
+ will apply the grant to all keys within the bucket
+ or not. The default value is False. By passing a
+ True value, the call will iterate through all keys
+ in the bucket and apply the same grant to each key.
+ CAUTION: If you have a lot of keys, this could take
+ a long time!
+ """
+ if permission not in S3Permissions:
+ raise self.connection.provider.storage_permissions_error(
+ 'Unknown Permission: %s' % permission)
+ policy = self.get_acl(headers=headers)
+ policy.acl.add_user_grant(permission, user_id)
+ self.set_acl(policy, headers=headers)
+ if recursive:
+ for key in self:
+ key.add_user_grant(permission, user_id, headers=headers)
+
+ def list_grants(self, headers=None):
+ policy = self.get_acl(headers=headers)
+ return policy.acl.grants
+
+ def get_location(self):
+ """
+ Returns the LocationConstraint for the bucket.
+
+ :rtype: str
+ :return: The LocationConstraint for the bucket or the empty
+ string if no constraint was specified when bucket
+ was created.
+ """
+ response = self.connection.make_request('GET', self.name,
+ query_args='location')
+ body = response.read()
+ if response.status == 200:
+ rs = ResultSet(self)
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs.LocationConstraint
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def enable_logging(self, target_bucket, target_prefix='', headers=None):
+ if isinstance(target_bucket, Bucket):
+ target_bucket = target_bucket.name
+ body = self.BucketLoggingBody % (target_bucket, target_prefix)
+ response = self.connection.make_request('PUT', self.name, data=body,
+ query_args='logging', headers=headers)
+ body = response.read()
+ if response.status == 200:
+ return True
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def disable_logging(self, headers=None):
+ body = self.EmptyBucketLoggingBody
+ response = self.connection.make_request('PUT', self.name, data=body,
+ query_args='logging', headers=headers)
+ body = response.read()
+ if response.status == 200:
+ return True
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def get_logging_status(self, headers=None):
+ response = self.connection.make_request('GET', self.name,
+ query_args='logging', headers=headers)
+ body = response.read()
+ if response.status == 200:
+ return body
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def set_as_logging_target(self, headers=None):
+ policy = self.get_acl(headers=headers)
+ g1 = Grant(permission='WRITE', type='Group', uri=self.LoggingGroup)
+ g2 = Grant(permission='READ_ACP', type='Group', uri=self.LoggingGroup)
+ policy.acl.add_grant(g1)
+ policy.acl.add_grant(g2)
+ self.set_acl(policy, headers=headers)
+
+ def get_request_payment(self, headers=None):
+ response = self.connection.make_request('GET', self.name,
+ query_args='requestPayment', headers=headers)
+ body = response.read()
+ if response.status == 200:
+ return body
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def set_request_payment(self, payer='BucketOwner', headers=None):
+ body = self.BucketPaymentBody % payer
+ response = self.connection.make_request('PUT', self.name, data=body,
+ query_args='requestPayment', headers=headers)
+ body = response.read()
+ if response.status == 200:
+ return True
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def configure_versioning(self, versioning, mfa_delete=False,
+ mfa_token=None, headers=None):
+ """
+ Configure versioning for this bucket.
+ Note: This feature is currently in beta release and is available
+ only in the Northern California region.
+
+ :type versioning: bool
+ :param versioning: A boolean indicating whether version is
+ enabled (True) or disabled (False).
+
+ :type mfa_delete: bool
+ :param mfa_delete: A boolean indicating whether the Multi-Factor
+ Authentication Delete feature is enabled (True)
+ or disabled (False). If mfa_delete is enabled
+ then all Delete operations will require the
+ token from your MFA device to be passed in
+ the request.
+
+ :type mfa_token: tuple or list of strings
+ :param mfa_token: A tuple or list consisting of the serial number
+ from the MFA device and the current value of
+ the six-digit token associated with the device.
+ This value is required when you are changing
+ the status of the MfaDelete property of
+ the bucket.
+ """
+ if versioning:
+ ver = 'Enabled'
+ else:
+ ver = 'Suspended'
+ if mfa_delete:
+ mfa = 'Enabled'
+ else:
+ mfa = 'Disabled'
+ body = self.VersioningBody % (ver, mfa)
+ if mfa_token:
+ if not headers:
+ headers = {}
+ provider = self.connection.provider
+ headers[provider.mfa_header] = ' '.join(mfa_token)
+ response = self.connection.make_request('PUT', self.name, data=body,
+ query_args='versioning', headers=headers)
+ body = response.read()
+ if response.status == 200:
+ return True
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def get_versioning_status(self, headers=None):
+ """
+ Returns the current status of versioning on the bucket.
+
+ :rtype: dict
+ :returns: A dictionary containing a key named 'Versioning'
+ that can have a value of either Enabled, Disabled,
+ or Suspended. Also, if MFADelete has ever been enabled
+ on the bucket, the dictionary will contain a key
+ named 'MFADelete' which will have a value of either
+ Enabled or Suspended.
+ """
+ response = self.connection.make_request('GET', self.name,
+ query_args='versioning', headers=headers)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 200:
+ d = {}
+ ver = re.search(self.VersionRE, body)
+ if ver:
+ d['Versioning'] = ver.group(1)
+ mfa = re.search(self.MFADeleteRE, body)
+ if mfa:
+ d['MfaDelete'] = mfa.group(1)
+ return d
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def configure_website(self, suffix, error_key='', headers=None):
+ """
+ Configure this bucket to act as a website
+
+ :type suffix: str
+ :param suffix: Suffix that is appended to a request that is for a
+ "directory" on the website endpoint (e.g. if the suffix
+ is index.html and you make a request to
+ samplebucket/images/ the data that is returned will
+ be for the object with the key name images/index.html).
+ The suffix must not be empty and must not include a
+ slash character.
+
+ :type error_key: str
+ :param error_key: The object key name to use when a 4XX class
+ error occurs. This is optional.
+
+ """
+ if error_key:
+ error_frag = self.WebsiteErrorFragment % error_key
+ else:
+ error_frag = ''
+ body = self.WebsiteBody % (suffix, error_frag)
+ response = self.connection.make_request('PUT', self.name, data=body,
+ query_args='website',
+ headers=headers)
+ body = response.read()
+ if response.status == 200:
+ return True
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def get_website_configuration(self, headers=None):
+ """
+ Returns the current status of website configuration on the bucket.
+
+ :rtype: dict
+ :returns: A dictionary containing a Python representation
+ of the XML response from S3. The overall structure is:
+
+ * WebsiteConfiguration
+ * IndexDocument
+ * Suffix : suffix that is appended to request that
+ is for a "directory" on the website endpoint
+ * ErrorDocument
+ * Key : name of object to serve when an error occurs
+ """
+ response = self.connection.make_request('GET', self.name,
+ query_args='website', headers=headers)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 200:
+ e = boto.jsonresponse.Element()
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def delete_website_configuration(self, headers=None):
+ """
+ Removes all website configuration from the bucket.
+ """
+ response = self.connection.make_request('DELETE', self.name,
+ query_args='website', headers=headers)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 204:
+ return True
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def get_website_endpoint(self):
+ """
+ Returns the fully qualified hostname to use is you want to access this
+ bucket as a website. This doesn't validate whether the bucket has
+ been correctly configured as a website or not.
+ """
+ l = [self.name]
+ l.append(S3WebsiteEndpointTranslate.translate_region(self.get_location()))
+ l.append('.'.join(self.connection.host.split('.')[-2:]))
+ return '.'.join(l)
+
+ def get_policy(self, headers=None):
+ response = self.connection.make_request('GET', self.name,
+ query_args='policy', headers=headers)
+ body = response.read()
+ if response.status == 200:
+ return body
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def set_policy(self, policy, headers=None):
+ response = self.connection.make_request('PUT', self.name,
+ data=policy,
+ query_args='policy',
+ headers=headers)
+ body = response.read()
+ if response.status >= 200 and response.status <= 204:
+ return True
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def initiate_multipart_upload(self, key_name, headers=None):
+ query_args = 'uploads'
+ response = self.connection.make_request('POST', self.name, key_name,
+ query_args=query_args,
+ headers=headers)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 200:
+ resp = MultiPartUpload(self)
+ h = handler.XmlHandler(resp, self)
+ xml.sax.parseString(body, h)
+ return resp
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def complete_multipart_upload(self, key_name, upload_id,
+ xml_body, headers=None):
+ query_args = 'uploadId=%s' % upload_id
+ if headers is None:
+ headers = {}
+ headers['Content-Type'] = 'text/xml'
+ response = self.connection.make_request('POST', self.name, key_name,
+ query_args=query_args,
+ headers=headers, data=xml_body)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status == 200:
+ resp = CompleteMultiPartUpload(self)
+ h = handler.XmlHandler(resp, self)
+ xml.sax.parseString(body, h)
+ return resp
+ else:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def cancel_multipart_upload(self, key_name, upload_id, headers=None):
+ query_args = 'uploadId=%s' % upload_id
+ response = self.connection.make_request('DELETE', self.name, key_name,
+ query_args=query_args,
+ headers=headers)
+ body = response.read()
+ boto.log.debug(body)
+ if response.status != 204:
+ raise self.connection.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def delete(self, headers=None):
+ return self.connection.delete_bucket(self.name, headers=headers)
+
diff --git a/boto/s3/bucketlistresultset.py b/boto/s3/bucketlistresultset.py
new file mode 100644
index 0000000..0123663
--- /dev/null
+++ b/boto/s3/bucketlistresultset.py
@@ -0,0 +1,139 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+def bucket_lister(bucket, prefix='', delimiter='', marker='', headers=None):
+ """
+ A generator function for listing keys in a bucket.
+ """
+ more_results = True
+ k = None
+ while more_results:
+ rs = bucket.get_all_keys(prefix=prefix, marker=marker,
+ delimiter=delimiter, headers=headers)
+ for k in rs:
+ yield k
+ if k:
+ marker = k.name
+ more_results= rs.is_truncated
+
+class BucketListResultSet:
+ """
+ A resultset for listing keys within a bucket. Uses the bucket_lister
+ generator function and implements the iterator interface. This
+ transparently handles the results paging from S3 so even if you have
+ many thousands of keys within the bucket you can iterate over all
+ keys in a reasonably efficient manner.
+ """
+
+ def __init__(self, bucket=None, prefix='', delimiter='', marker='', headers=None):
+ self.bucket = bucket
+ self.prefix = prefix
+ self.delimiter = delimiter
+ self.marker = marker
+ self.headers = headers
+
+ def __iter__(self):
+ return bucket_lister(self.bucket, prefix=self.prefix,
+ delimiter=self.delimiter, marker=self.marker, headers=self.headers)
+
+def versioned_bucket_lister(bucket, prefix='', delimiter='',
+ key_marker='', version_id_marker='', headers=None):
+ """
+ A generator function for listing versions in a bucket.
+ """
+ more_results = True
+ k = None
+ while more_results:
+ rs = bucket.get_all_versions(prefix=prefix, key_marker=key_marker,
+ version_id_marker=version_id_marker,
+ delimiter=delimiter, headers=headers)
+ for k in rs:
+ yield k
+ key_marker = rs.next_key_marker
+ version_id_marker = rs.next_version_id_marker
+ more_results= rs.is_truncated
+
+class VersionedBucketListResultSet:
+ """
+ A resultset for listing versions within a bucket. Uses the bucket_lister
+ generator function and implements the iterator interface. This
+ transparently handles the results paging from S3 so even if you have
+ many thousands of keys within the bucket you can iterate over all
+ keys in a reasonably efficient manner.
+ """
+
+ def __init__(self, bucket=None, prefix='', delimiter='', key_marker='',
+ version_id_marker='', headers=None):
+ self.bucket = bucket
+ self.prefix = prefix
+ self.delimiter = delimiter
+ self.key_marker = key_marker
+ self.version_id_marker = version_id_marker
+ self.headers = headers
+
+ def __iter__(self):
+ return versioned_bucket_lister(self.bucket, prefix=self.prefix,
+ delimiter=self.delimiter,
+ key_marker=self.key_marker,
+ version_id_marker=self.version_id_marker,
+ headers=self.headers)
+
+def multipart_upload_lister(bucket, key_marker='',
+ upload_id_marker='',
+ headers=None):
+ """
+ A generator function for listing multipart uploads in a bucket.
+ """
+ more_results = True
+ k = None
+ while more_results:
+ rs = bucket.get_all_multipart_uploads(key_marker=key_marker,
+ upload_id_marker=upload_id_marker,
+ headers=headers)
+ for k in rs:
+ yield k
+ key_marker = rs.next_key_marker
+ upload_id_marker = rs.next_upload_id_marker
+ more_results= rs.is_truncated
+
+class MultiPartUploadListResultSet:
+ """
+ A resultset for listing multipart uploads within a bucket.
+ Uses the multipart_upload_lister generator function and
+ implements the iterator interface. This
+ transparently handles the results paging from S3 so even if you have
+ many thousands of uploads within the bucket you can iterate over all
+ keys in a reasonably efficient manner.
+ """
+ def __init__(self, bucket=None, key_marker='',
+ upload_id_marker='', headers=None):
+ self.bucket = bucket
+ self.key_marker = key_marker
+ self.upload_id_marker = upload_id_marker
+ self.headers = headers
+
+ def __iter__(self):
+ return multipart_upload_lister(self.bucket,
+ key_marker=self.key_marker,
+ upload_id_marker=self.upload_id_marker,
+ headers=self.headers)
+
+
diff --git a/boto/s3/connection.py b/boto/s3/connection.py
new file mode 100644
index 0000000..25ba4ab
--- /dev/null
+++ b/boto/s3/connection.py
@@ -0,0 +1,401 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import xml.sax
+import urllib, base64
+import time
+import boto.utils
+from boto.connection import AWSAuthConnection
+from boto import handler
+from boto.provider import Provider
+from boto.s3.bucket import Bucket
+from boto.s3.key import Key
+from boto.resultset import ResultSet
+from boto.exception import BotoClientError
+
+def check_lowercase_bucketname(n):
+ """
+ Bucket names must not contain uppercase characters. We check for
+ this by appending a lowercase character and testing with islower().
+ Note this also covers cases like numeric bucket names with dashes.
+
+ >>> check_lowercase_bucketname("Aaaa")
+ Traceback (most recent call last):
+ ...
+ BotoClientError: S3Error: Bucket names cannot contain upper-case
+ characters when using either the sub-domain or virtual hosting calling
+ format.
+
+ >>> check_lowercase_bucketname("1234-5678-9123")
+ True
+ >>> check_lowercase_bucketname("abcdefg1234")
+ True
+ """
+ if not (n + 'a').islower():
+ raise BotoClientError("Bucket names cannot contain upper-case " \
+ "characters when using either the sub-domain or virtual " \
+ "hosting calling format.")
+ return True
+
+def assert_case_insensitive(f):
+ def wrapper(*args, **kwargs):
+ if len(args) == 3 and check_lowercase_bucketname(args[2]):
+ pass
+ return f(*args, **kwargs)
+ return wrapper
+
+class _CallingFormat:
+
+ def build_url_base(self, connection, protocol, server, bucket, key=''):
+ url_base = '%s://' % protocol
+ url_base += self.build_host(server, bucket)
+ url_base += connection.get_path(self.build_path_base(bucket, key))
+ return url_base
+
+ def build_host(self, server, bucket):
+ if bucket == '':
+ return server
+ else:
+ return self.get_bucket_server(server, bucket)
+
+ def build_auth_path(self, bucket, key=''):
+ path = ''
+ if bucket != '':
+ path = '/' + bucket
+ return path + '/%s' % urllib.quote(key)
+
+ def build_path_base(self, bucket, key=''):
+ return '/%s' % urllib.quote(key)
+
+class SubdomainCallingFormat(_CallingFormat):
+
+ @assert_case_insensitive
+ def get_bucket_server(self, server, bucket):
+ return '%s.%s' % (bucket, server)
+
+class VHostCallingFormat(_CallingFormat):
+
+ @assert_case_insensitive
+ def get_bucket_server(self, server, bucket):
+ return bucket
+
+class OrdinaryCallingFormat(_CallingFormat):
+
+ def get_bucket_server(self, server, bucket):
+ return server
+
+ def build_path_base(self, bucket, key=''):
+ path_base = '/'
+ if bucket:
+ path_base += "%s/" % bucket
+ return path_base + urllib.quote(key)
+
+class Location:
+ DEFAULT = '' # US Classic Region
+ EU = 'EU'
+ USWest = 'us-west-1'
+ APSoutheast = 'ap-southeast-1'
+
+class S3Connection(AWSAuthConnection):
+
+ DefaultHost = 's3.amazonaws.com'
+ QueryString = 'Signature=%s&Expires=%d&AWSAccessKeyId=%s'
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None,
+ host=DefaultHost, debug=0, https_connection_factory=None,
+ calling_format=SubdomainCallingFormat(), path='/', provider='aws',
+ bucket_class=Bucket):
+ self.calling_format = calling_format
+ self.bucket_class = bucket_class
+ AWSAuthConnection.__init__(self, host,
+ aws_access_key_id, aws_secret_access_key,
+ is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
+ debug=debug, https_connection_factory=https_connection_factory,
+ path=path, provider=provider)
+
+ def _required_auth_capability(self):
+ return ['s3']
+
+ def __iter__(self):
+ for bucket in self.get_all_buckets():
+ yield bucket
+
+ def __contains__(self, bucket_name):
+ return not (self.lookup(bucket_name) is None)
+
+ def set_bucket_class(self, bucket_class):
+ """
+ Set the Bucket class associated with this bucket. By default, this
+ would be the boto.s3.key.Bucket class but if you want to subclass that
+ for some reason this allows you to associate your new class.
+
+ :type bucket_class: class
+ :param bucket_class: A subclass of Bucket that can be more specific
+ """
+ self.bucket_class = bucket_class
+
+ def build_post_policy(self, expiration_time, conditions):
+ """
+ Taken from the AWS book Python examples and modified for use with boto
+ """
+ assert type(expiration_time) == time.struct_time, \
+ 'Policy document must include a valid expiration Time object'
+
+ # Convert conditions object mappings to condition statements
+
+ return '{"expiration": "%s",\n"conditions": [%s]}' % \
+ (time.strftime(boto.utils.ISO8601, expiration_time), ",".join(conditions))
+
+
+ def build_post_form_args(self, bucket_name, key, expires_in = 6000,
+ acl = None, success_action_redirect = None, max_content_length = None,
+ http_method = "http", fields=None, conditions=None):
+ """
+ Taken from the AWS book Python examples and modified for use with boto
+ This only returns the arguments required for the post form, not the actual form
+ This does not return the file input field which also needs to be added
+
+ :param bucket_name: Bucket to submit to
+ :type bucket_name: string
+
+ :param key: Key name, optionally add ${filename} to the end to attach the submitted filename
+ :type key: string
+
+ :param expires_in: Time (in seconds) before this expires, defaults to 6000
+ :type expires_in: integer
+
+ :param acl: ACL rule to use, if any
+ :type acl: :class:`boto.s3.acl.ACL`
+
+ :param success_action_redirect: URL to redirect to on success
+ :type success_action_redirect: string
+
+ :param max_content_length: Maximum size for this file
+ :type max_content_length: integer
+
+ :type http_method: string
+ :param http_method: HTTP Method to use, "http" or "https"
+
+
+ :rtype: dict
+ :return: A dictionary containing field names/values as well as a url to POST to
+
+ .. code-block:: python
+
+ {
+ "action": action_url_to_post_to,
+ "fields": [
+ {
+ "name": field_name,
+ "value": field_value
+ },
+ {
+ "name": field_name2,
+ "value": field_value2
+ }
+ ]
+ }
+
+ """
+ if fields == None:
+ fields = []
+ if conditions == None:
+ conditions = []
+ expiration = time.gmtime(int(time.time() + expires_in))
+
+ # Generate policy document
+ conditions.append('{"bucket": "%s"}' % bucket_name)
+ if key.endswith("${filename}"):
+ conditions.append('["starts-with", "$key", "%s"]' % key[:-len("${filename}")])
+ else:
+ conditions.append('{"key": "%s"}' % key)
+ if acl:
+ conditions.append('{"acl": "%s"}' % acl)
+ fields.append({ "name": "acl", "value": acl})
+ if success_action_redirect:
+ conditions.append('{"success_action_redirect": "%s"}' % success_action_redirect)
+ fields.append({ "name": "success_action_redirect", "value": success_action_redirect})
+ if max_content_length:
+ conditions.append('["content-length-range", 0, %i]' % max_content_length)
+ fields.append({"name":'content-length-range', "value": "0,%i" % max_content_length})
+
+ policy = self.build_post_policy(expiration, conditions)
+
+ # Add the base64-encoded policy document as the 'policy' field
+ policy_b64 = base64.b64encode(policy)
+ fields.append({"name": "policy", "value": policy_b64})
+
+ # Add the AWS access key as the 'AWSAccessKeyId' field
+ fields.append({"name": "AWSAccessKeyId", "value": self.aws_access_key_id})
+
+ # Add signature for encoded policy document as the 'AWSAccessKeyId' field
+ signature = self._auth_handler.sign_string(policy_b64)
+ fields.append({"name": "signature", "value": signature})
+ fields.append({"name": "key", "value": key})
+
+ # HTTPS protocol will be used if the secure HTTP option is enabled.
+ url = '%s://%s/' % (http_method, self.calling_format.build_host(self.server_name(), bucket_name))
+
+ return {"action": url, "fields": fields}
+
+
+ def generate_url(self, expires_in, method, bucket='', key='',
+ headers=None, query_auth=True, force_http=False):
+ if not headers:
+ headers = {}
+ expires = int(time.time() + expires_in)
+ auth_path = self.calling_format.build_auth_path(bucket, key)
+ auth_path = self.get_path(auth_path)
+ c_string = boto.utils.canonical_string(method, auth_path, headers,
+ expires, self.provider)
+ b64_hmac = self._auth_handler.sign_string(c_string)
+ encoded_canonical = urllib.quote_plus(b64_hmac)
+ self.calling_format.build_path_base(bucket, key)
+ if query_auth:
+ query_part = '?' + self.QueryString % (encoded_canonical, expires,
+ self.aws_access_key_id)
+ sec_hdr = self.provider.security_token_header
+ if sec_hdr in headers:
+ query_part += ('&%s=%s' % (sec_hdr,
+ urllib.quote(headers[sec_hdr])));
+ else:
+ query_part = ''
+ if force_http:
+ protocol = 'http'
+ port = 80
+ else:
+ protocol = self.protocol
+ port = self.port
+ return self.calling_format.build_url_base(self, protocol, self.server_name(port),
+ bucket, key) + query_part
+
+ def get_all_buckets(self, headers=None):
+ response = self.make_request('GET', headers=headers)
+ body = response.read()
+ if response.status > 300:
+ raise self.provider.storage_response_error(
+ response.status, response.reason, body)
+ rs = ResultSet([('Bucket', self.bucket_class)])
+ h = handler.XmlHandler(rs, self)
+ xml.sax.parseString(body, h)
+ return rs
+
+ def get_canonical_user_id(self, headers=None):
+ """
+ Convenience method that returns the "CanonicalUserID" of the user who's credentials
+ are associated with the connection. The only way to get this value is to do a GET
+ request on the service which returns all buckets associated with the account. As part
+ of that response, the canonical userid is returned. This method simply does all of
+ that and then returns just the user id.
+
+ :rtype: string
+ :return: A string containing the canonical user id.
+ """
+ rs = self.get_all_buckets(headers=headers)
+ return rs.ID
+
+ def get_bucket(self, bucket_name, validate=True, headers=None):
+ bucket = self.bucket_class(self, bucket_name)
+ if validate:
+ bucket.get_all_keys(headers, maxkeys=0)
+ return bucket
+
+ def lookup(self, bucket_name, validate=True, headers=None):
+ try:
+ bucket = self.get_bucket(bucket_name, validate, headers=headers)
+ except:
+ bucket = None
+ return bucket
+
+ def create_bucket(self, bucket_name, headers=None,
+ location=Location.DEFAULT, policy=None):
+ """
+ Creates a new located bucket. By default it's in the USA. You can pass
+ Location.EU to create an European bucket.
+
+ :type bucket_name: string
+ :param bucket_name: The name of the new bucket
+
+ :type headers: dict
+ :param headers: Additional headers to pass along with the request to AWS.
+
+ :type location: :class:`boto.s3.connection.Location`
+ :param location: The location of the new bucket
+
+ :type policy: :class:`boto.s3.acl.CannedACLStrings`
+ :param policy: A canned ACL policy that will be applied to the new key in S3.
+
+ """
+ check_lowercase_bucketname(bucket_name)
+
+ if policy:
+ if headers:
+ headers[self.provider.acl_header] = policy
+ else:
+ headers = {self.provider.acl_header : policy}
+ if location == Location.DEFAULT:
+ data = ''
+ else:
+ data = '<CreateBucketConstraint><LocationConstraint>' + \
+ location + '</LocationConstraint></CreateBucketConstraint>'
+ response = self.make_request('PUT', bucket_name, headers=headers,
+ data=data)
+ body = response.read()
+ if response.status == 409:
+ raise self.provider.storage_create_error(
+ response.status, response.reason, body)
+ if response.status == 200:
+ return self.bucket_class(self, bucket_name)
+ else:
+ raise self.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def delete_bucket(self, bucket, headers=None):
+ response = self.make_request('DELETE', bucket, headers=headers)
+ body = response.read()
+ if response.status != 204:
+ raise self.provider.storage_response_error(
+ response.status, response.reason, body)
+
+ def make_request(self, method, bucket='', key='', headers=None, data='',
+ query_args=None, sender=None, override_num_retries=None):
+ if isinstance(bucket, self.bucket_class):
+ bucket = bucket.name
+ if isinstance(key, Key):
+ key = key.name
+ path = self.calling_format.build_path_base(bucket, key)
+ boto.log.debug('path=%s' % path)
+ auth_path = self.calling_format.build_auth_path(bucket, key)
+ boto.log.debug('auth_path=%s' % auth_path)
+ host = self.calling_format.build_host(self.server_name(), bucket)
+ if query_args:
+ path += '?' + query_args
+ boto.log.debug('path=%s' % path)
+ auth_path += '?' + query_args
+ boto.log.debug('auth_path=%s' % auth_path)
+ return AWSAuthConnection.make_request(self, method, path, headers,
+ data, host, auth_path, sender,
+ override_num_retries=override_num_retries)
+
diff --git a/boto/s3/deletemarker.py b/boto/s3/deletemarker.py
new file mode 100644
index 0000000..3462d42
--- /dev/null
+++ b/boto/s3/deletemarker.py
@@ -0,0 +1,56 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.s3.user import User
+
+class DeleteMarker:
+ def __init__(self, bucket=None, name=None):
+ self.bucket = bucket
+ self.name = name
+ self.is_latest = False
+ self.last_modified = None
+ self.owner = None
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Owner':
+ self.owner = User(self)
+ return self.owner
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Key':
+ self.name = value.encode('utf-8')
+ elif name == 'IsLatest':
+ if value == 'true':
+ self.is_lastest = True
+ else:
+ self.is_latest = False
+ elif name == 'LastModified':
+ self.last_modified = value
+ elif name == 'Owner':
+ pass
+ elif name == 'VersionId':
+ self.version_id = value
+ else:
+ setattr(self, name, value)
+
+
diff --git a/boto/s3/key.py b/boto/s3/key.py
new file mode 100644
index 0000000..c7e77f4
--- /dev/null
+++ b/boto/s3/key.py
@@ -0,0 +1,1059 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import mimetypes
+import os
+import rfc822
+import StringIO
+import base64
+import boto.utils
+from boto.exception import BotoClientError
+from boto.provider import Provider
+from boto.s3.user import User
+from boto import UserAgent
+try:
+ from hashlib import md5
+except ImportError:
+ from md5 import md5
+
+
+class Key(object):
+
+ DefaultContentType = 'application/octet-stream'
+
+ BufferSize = 8192
+
+ def __init__(self, bucket=None, name=None):
+ self.bucket = bucket
+ self.name = name
+ self.metadata = {}
+ self.cache_control = None
+ self.content_type = self.DefaultContentType
+ self.content_encoding = None
+ self.filename = None
+ self.etag = None
+ self.last_modified = None
+ self.owner = None
+ self.storage_class = 'STANDARD'
+ self.md5 = None
+ self.base64md5 = None
+ self.path = None
+ self.resp = None
+ self.mode = None
+ self.size = None
+ self.version_id = None
+ self.source_version_id = None
+ self.delete_marker = False
+
+ def __repr__(self):
+ if self.bucket:
+ return '<Key: %s,%s>' % (self.bucket.name, self.name)
+ else:
+ return '<Key: None,%s>' % self.name
+
+ def __getattr__(self, name):
+ if name == 'key':
+ return self.name
+ else:
+ raise AttributeError
+
+ def __setattr__(self, name, value):
+ if name == 'key':
+ self.__dict__['name'] = value
+ else:
+ self.__dict__[name] = value
+
+ def __iter__(self):
+ return self
+
+ @property
+ def provider(self):
+ provider = None
+ if self.bucket:
+ if self.bucket.connection:
+ provider = self.bucket.connection.provider
+ return provider
+
+ def get_md5_from_hexdigest(self, md5_hexdigest):
+ """
+ A utility function to create the 2-tuple (md5hexdigest, base64md5)
+ from just having a precalculated md5_hexdigest.
+ """
+ import binascii
+ digest = binascii.unhexlify(md5_hexdigest)
+ base64md5 = base64.encodestring(digest)
+ if base64md5[-1] == '\n':
+ base64md5 = base64md5[0:-1]
+ return (md5_hexdigest, base64md5)
+
+ def handle_version_headers(self, resp, force=False):
+ provider = self.bucket.connection.provider
+ # If the Key object already has a version_id attribute value, it
+ # means that it represents an explicit version and the user is
+ # doing a get_contents_*(version_id=<foo>) to retrieve another
+ # version of the Key. In that case, we don't really want to
+ # overwrite the version_id in this Key object. Comprende?
+ if self.version_id is None or force:
+ self.version_id = resp.getheader(provider.version_id, None)
+ self.source_version_id = resp.getheader(provider.copy_source_version_id, None)
+ if resp.getheader(provider.delete_marker, 'false') == 'true':
+ self.delete_marker = True
+ else:
+ self.delete_marker = False
+
+ def open_read(self, headers=None, query_args=None,
+ override_num_retries=None, response_headers=None):
+ """
+ Open this key for reading
+
+ :type headers: dict
+ :param headers: Headers to pass in the web request
+
+ :type query_args: string
+ :param query_args: Arguments to pass in the query string (ie, 'torrent')
+
+ :type override_num_retries: int
+ :param override_num_retries: If not None will override configured
+ num_retries parameter for underlying GET.
+
+ :type response_headers: dict
+ :param response_headers: A dictionary containing HTTP headers/values
+ that will override any headers associated with
+ the stored object in the response.
+ See http://goo.gl/EWOPb for details.
+ """
+ if self.resp == None:
+ self.mode = 'r'
+
+ provider = self.bucket.connection.provider
+ self.resp = self.bucket.connection.make_request(
+ 'GET', self.bucket.name, self.name, headers,
+ query_args=query_args,
+ override_num_retries=override_num_retries)
+ if self.resp.status < 199 or self.resp.status > 299:
+ body = self.resp.read()
+ raise provider.storage_response_error(self.resp.status,
+ self.resp.reason, body)
+ response_headers = self.resp.msg
+ self.metadata = boto.utils.get_aws_metadata(response_headers,
+ provider)
+ for name,value in response_headers.items():
+ if name.lower() == 'content-length':
+ self.size = int(value)
+ elif name.lower() == 'etag':
+ self.etag = value
+ elif name.lower() == 'content-type':
+ self.content_type = value
+ elif name.lower() == 'content-encoding':
+ self.content_encoding = value
+ elif name.lower() == 'last-modified':
+ self.last_modified = value
+ elif name.lower() == 'cache-control':
+ self.cache_control = value
+ self.handle_version_headers(self.resp)
+
+ def open_write(self, headers=None, override_num_retries=None):
+ """
+ Open this key for writing.
+ Not yet implemented
+
+ :type headers: dict
+ :param headers: Headers to pass in the write request
+
+ :type override_num_retries: int
+ :param override_num_retries: If not None will override configured
+ num_retries parameter for underlying PUT.
+ """
+ raise BotoClientError('Not Implemented')
+
+ def open(self, mode='r', headers=None, query_args=None,
+ override_num_retries=None):
+ if mode == 'r':
+ self.mode = 'r'
+ self.open_read(headers=headers, query_args=query_args,
+ override_num_retries=override_num_retries)
+ elif mode == 'w':
+ self.mode = 'w'
+ self.open_write(headers=headers,
+ override_num_retries=override_num_retries)
+ else:
+ raise BotoClientError('Invalid mode: %s' % mode)
+
+ closed = False
+ def close(self):
+ if self.resp:
+ self.resp.read()
+ self.resp = None
+ self.mode = None
+ self.closed = True
+
+ def next(self):
+ """
+ By providing a next method, the key object supports use as an iterator.
+ For example, you can now say:
+
+ for bytes in key:
+ write bytes to a file or whatever
+
+ All of the HTTP connection stuff is handled for you.
+ """
+ self.open_read()
+ data = self.resp.read(self.BufferSize)
+ if not data:
+ self.close()
+ raise StopIteration
+ return data
+
+ def read(self, size=0):
+ if size == 0:
+ size = self.BufferSize
+ self.open_read()
+ data = self.resp.read(size)
+ if not data:
+ self.close()
+ return data
+
+ def change_storage_class(self, new_storage_class, dst_bucket=None):
+ """
+ Change the storage class of an existing key.
+ Depending on whether a different destination bucket is supplied
+ or not, this will either move the item within the bucket, preserving
+ all metadata and ACL info bucket changing the storage class or it
+ will copy the item to the provided destination bucket, also
+ preserving metadata and ACL info.
+
+ :type new_storage_class: string
+ :param new_storage_class: The new storage class for the Key.
+ Possible values are:
+ * STANDARD
+ * REDUCED_REDUNDANCY
+
+ :type dst_bucket: string
+ :param dst_bucket: The name of a destination bucket. If not
+ provided the current bucket of the key
+ will be used.
+
+ """
+ if new_storage_class == 'STANDARD':
+ return self.copy(self.bucket.name, self.name,
+ reduced_redundancy=False, preserve_acl=True)
+ elif new_storage_class == 'REDUCED_REDUNDANCY':
+ return self.copy(self.bucket.name, self.name,
+ reduced_redundancy=True, preserve_acl=True)
+ else:
+ raise BotoClientError('Invalid storage class: %s' %
+ new_storage_class)
+
+ def copy(self, dst_bucket, dst_key, metadata=None,
+ reduced_redundancy=False, preserve_acl=False):
+ """
+ Copy this Key to another bucket.
+
+ :type dst_bucket: string
+ :param dst_bucket: The name of the destination bucket
+
+ :type dst_key: string
+ :param dst_key: The name of the destination key
+
+ :type metadata: dict
+ :param metadata: Metadata to be associated with new key.
+ If metadata is supplied, it will replace the
+ metadata of the source key being copied.
+ If no metadata is supplied, the source key's
+ metadata will be copied to the new key.
+
+ :type reduced_redundancy: bool
+ :param reduced_redundancy: If True, this will force the storage
+ class of the new Key to be
+ REDUCED_REDUNDANCY regardless of the
+ storage class of the key being copied.
+ The Reduced Redundancy Storage (RRS)
+ feature of S3, provides lower
+ redundancy at lower storage cost.
+
+ :type preserve_acl: bool
+ :param preserve_acl: If True, the ACL from the source key
+ will be copied to the destination
+ key. If False, the destination key
+ will have the default ACL.
+ Note that preserving the ACL in the
+ new key object will require two
+ additional API calls to S3, one to
+ retrieve the current ACL and one to
+ set that ACL on the new object. If
+ you don't care about the ACL, a value
+ of False will be significantly more
+ efficient.
+
+ :rtype: :class:`boto.s3.key.Key` or subclass
+ :returns: An instance of the newly created key object
+ """
+ dst_bucket = self.bucket.connection.lookup(dst_bucket)
+ if reduced_redundancy:
+ storage_class = 'REDUCED_REDUNDANCY'
+ else:
+ storage_class = self.storage_class
+ return dst_bucket.copy_key(dst_key, self.bucket.name,
+ self.name, metadata,
+ storage_class=storage_class,
+ preserve_acl=preserve_acl)
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Owner':
+ self.owner = User(self)
+ return self.owner
+ else:
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Key':
+ self.name = value.encode('utf-8')
+ elif name == 'ETag':
+ self.etag = value
+ elif name == 'LastModified':
+ self.last_modified = value
+ elif name == 'Size':
+ self.size = int(value)
+ elif name == 'StorageClass':
+ self.storage_class = value
+ elif name == 'Owner':
+ pass
+ elif name == 'VersionId':
+ self.version_id = value
+ else:
+ setattr(self, name, value)
+
+ def exists(self):
+ """
+ Returns True if the key exists
+
+ :rtype: bool
+ :return: Whether the key exists on S3
+ """
+ return bool(self.bucket.lookup(self.name))
+
+ def delete(self):
+ """
+ Delete this key from S3
+ """
+ return self.bucket.delete_key(self.name, version_id=self.version_id)
+
+ def get_metadata(self, name):
+ return self.metadata.get(name)
+
+ def set_metadata(self, name, value):
+ self.metadata[name] = value
+
+ def update_metadata(self, d):
+ self.metadata.update(d)
+
+ # convenience methods for setting/getting ACL
+ def set_acl(self, acl_str, headers=None):
+ if self.bucket != None:
+ self.bucket.set_acl(acl_str, self.name, headers=headers)
+
+ def get_acl(self, headers=None):
+ if self.bucket != None:
+ return self.bucket.get_acl(self.name, headers=headers)
+
+ def get_xml_acl(self, headers=None):
+ if self.bucket != None:
+ return self.bucket.get_xml_acl(self.name, headers=headers)
+
+ def set_xml_acl(self, acl_str, headers=None):
+ if self.bucket != None:
+ return self.bucket.set_xml_acl(acl_str, self.name, headers=headers)
+
+ def set_canned_acl(self, acl_str, headers=None):
+ return self.bucket.set_canned_acl(acl_str, self.name, headers)
+
+ def make_public(self, headers=None):
+ return self.bucket.set_canned_acl('public-read', self.name, headers)
+
+ def generate_url(self, expires_in, method='GET', headers=None,
+ query_auth=True, force_http=False):
+ """
+ Generate a URL to access this key.
+
+ :type expires_in: int
+ :param expires_in: How long the url is valid for, in seconds
+
+ :type method: string
+ :param method: The method to use for retrieving the file (default is GET)
+
+ :type headers: dict
+ :param headers: Any headers to pass along in the request
+
+ :type query_auth: bool
+ :param query_auth:
+
+ :rtype: string
+ :return: The URL to access the key
+ """
+ return self.bucket.connection.generate_url(expires_in, method,
+ self.bucket.name, self.name,
+ headers, query_auth, force_http)
+
+ def send_file(self, fp, headers=None, cb=None, num_cb=10, query_args=None):
+ """
+ Upload a file to a key into a bucket on S3.
+
+ :type fp: file
+ :param fp: The file pointer to upload
+
+ :type headers: dict
+ :param headers: The headers to pass along with the PUT request
+
+ :type cb: function
+ :param cb: a callback function that will be called to report
+ progress on the upload. The callback should accept two integer
+ parameters, the first representing the number of bytes that have
+ been successfully transmitted to S3 and the second representing
+ the total number of bytes that need to be transmitted.
+
+ :type num_cb: int
+ :param num_cb: (optional) If a callback is specified with the cb
+ parameter this parameter determines the granularity
+ of the callback by defining the maximum number of
+ times the callback will be called during the file
+ transfer. Providing a negative integer will cause
+ your callback to be called with each buffer read.
+
+ """
+ provider = self.bucket.connection.provider
+
+ def sender(http_conn, method, path, data, headers):
+ http_conn.putrequest(method, path)
+ for key in headers:
+ http_conn.putheader(key, headers[key])
+ http_conn.endheaders()
+ fp.seek(0)
+ save_debug = self.bucket.connection.debug
+ self.bucket.connection.debug = 0
+ http_conn.set_debuglevel(0)
+ if cb:
+ if num_cb > 2:
+ cb_count = self.size / self.BufferSize / (num_cb-2)
+ elif num_cb < 0:
+ cb_count = -1
+ else:
+ cb_count = 0
+ i = total_bytes = 0
+ cb(total_bytes, self.size)
+ l = fp.read(self.BufferSize)
+ while len(l) > 0:
+ http_conn.send(l)
+ if cb:
+ total_bytes += len(l)
+ i += 1
+ if i == cb_count or cb_count == -1:
+ cb(total_bytes, self.size)
+ i = 0
+ l = fp.read(self.BufferSize)
+ if cb:
+ cb(total_bytes, self.size)
+ response = http_conn.getresponse()
+ body = response.read()
+ fp.seek(0)
+ http_conn.set_debuglevel(save_debug)
+ self.bucket.connection.debug = save_debug
+ if response.status == 500 or response.status == 503 or \
+ response.getheader('location'):
+ # we'll try again
+ return response
+ elif response.status >= 200 and response.status <= 299:
+ self.etag = response.getheader('etag')
+ if self.etag != '"%s"' % self.md5:
+ raise provider.storage_data_error(
+ 'ETag from S3 did not match computed MD5')
+ return response
+ else:
+ raise provider.storage_response_error(
+ response.status, response.reason, body)
+
+ if not headers:
+ headers = {}
+ else:
+ headers = headers.copy()
+ headers['User-Agent'] = UserAgent
+ headers['Content-MD5'] = self.base64md5
+ if self.storage_class != 'STANDARD':
+ headers[provider.storage_class_header] = self.storage_class
+ if headers.has_key('Content-Encoding'):
+ self.content_encoding = headers['Content-Encoding']
+ if headers.has_key('Content-Type'):
+ self.content_type = headers['Content-Type']
+ elif self.path:
+ self.content_type = mimetypes.guess_type(self.path)[0]
+ if self.content_type == None:
+ self.content_type = self.DefaultContentType
+ headers['Content-Type'] = self.content_type
+ else:
+ headers['Content-Type'] = self.content_type
+ headers['Content-Length'] = str(self.size)
+ headers['Expect'] = '100-Continue'
+ headers = boto.utils.merge_meta(headers, self.metadata, provider)
+ resp = self.bucket.connection.make_request('PUT', self.bucket.name,
+ self.name, headers,
+ sender=sender,
+ query_args=query_args)
+ self.handle_version_headers(resp, force=True)
+
+ def compute_md5(self, fp):
+ """
+ :type fp: file
+ :param fp: File pointer to the file to MD5 hash. The file pointer will be
+ reset to the beginning of the file before the method returns.
+
+ :rtype: tuple
+ :return: A tuple containing the hex digest version of the MD5 hash
+ as the first element and the base64 encoded version of the
+ plain digest as the second element.
+ """
+ m = md5()
+ fp.seek(0)
+ s = fp.read(self.BufferSize)
+ while s:
+ m.update(s)
+ s = fp.read(self.BufferSize)
+ hex_md5 = m.hexdigest()
+ base64md5 = base64.encodestring(m.digest())
+ if base64md5[-1] == '\n':
+ base64md5 = base64md5[0:-1]
+ self.size = fp.tell()
+ fp.seek(0)
+ return (hex_md5, base64md5)
+
+ def set_contents_from_file(self, fp, headers=None, replace=True,
+ cb=None, num_cb=10, policy=None, md5=None,
+ reduced_redundancy=False, query_args=None):
+ """
+ Store an object in S3 using the name of the Key object as the
+ key in S3 and the contents of the file pointed to by 'fp' as the
+ contents.
+
+ :type fp: file
+ :param fp: the file whose contents to upload
+
+ :type headers: dict
+ :param headers: additional HTTP headers that will be sent with the PUT request.
+
+ :type replace: bool
+ :param replace: If this parameter is False, the method
+ will first check to see if an object exists in the
+ bucket with the same key. If it does, it won't
+ overwrite it. The default value is True which will
+ overwrite the object.
+
+ :type cb: function
+ :param cb: a callback function that will be called to report
+ progress on the upload. The callback should accept two integer
+ parameters, the first representing the number of bytes that have
+ been successfully transmitted to S3 and the second representing
+ the total number of bytes that need to be transmitted.
+
+ :type cb: int
+ :param num_cb: (optional) If a callback is specified with the cb parameter
+ this parameter determines the granularity of the callback by defining
+ the maximum number of times the callback will be called during the file transfer.
+
+ :type policy: :class:`boto.s3.acl.CannedACLStrings`
+ :param policy: A canned ACL policy that will be applied to the new key in S3.
+
+ :type md5: A tuple containing the hexdigest version of the MD5 checksum of the
+ file as the first element and the Base64-encoded version of the plain
+ checksum as the second element. This is the same format returned by
+ the compute_md5 method.
+ :param md5: If you need to compute the MD5 for any reason prior to upload,
+ it's silly to have to do it twice so this param, if present, will be
+ used as the MD5 values of the file. Otherwise, the checksum will be computed.
+ :type reduced_redundancy: bool
+ :param reduced_redundancy: If True, this will set the storage
+ class of the new Key to be
+ REDUCED_REDUNDANCY. The Reduced Redundancy
+ Storage (RRS) feature of S3, provides lower
+ redundancy at lower storage cost.
+
+ """
+ provider = self.bucket.connection.provider
+ if headers is None:
+ headers = {}
+ if policy:
+ headers[provider.acl_header] = policy
+ if reduced_redundancy:
+ self.storage_class = 'REDUCED_REDUNDANCY'
+ if provider.storage_class_header:
+ headers[provider.storage_class_header] = self.storage_class
+ # TODO - What if the provider doesn't support reduced reduncancy?
+ # What if different providers provide different classes?
+ if hasattr(fp, 'name'):
+ self.path = fp.name
+ if self.bucket != None:
+ if not md5:
+ md5 = self.compute_md5(fp)
+ else:
+ # even if md5 is provided, still need to set size of content
+ fp.seek(0, 2)
+ self.size = fp.tell()
+ fp.seek(0)
+ self.md5 = md5[0]
+ self.base64md5 = md5[1]
+ if self.name == None:
+ self.name = self.md5
+ if not replace:
+ k = self.bucket.lookup(self.name)
+ if k:
+ return
+ self.send_file(fp, headers, cb, num_cb, query_args)
+
+ def set_contents_from_filename(self, filename, headers=None, replace=True,
+ cb=None, num_cb=10, policy=None, md5=None,
+ reduced_redundancy=False):
+ """
+ Store an object in S3 using the name of the Key object as the
+ key in S3 and the contents of the file named by 'filename'.
+ See set_contents_from_file method for details about the
+ parameters.
+
+ :type filename: string
+ :param filename: The name of the file that you want to put onto S3
+
+ :type headers: dict
+ :param headers: Additional headers to pass along with the request to AWS.
+
+ :type replace: bool
+ :param replace: If True, replaces the contents of the file if it already exists.
+
+ :type cb: function
+ :param cb: (optional) a callback function that will be called to report
+ progress on the download. The callback should accept two integer
+ parameters, the first representing the number of bytes that have
+ been successfully transmitted from S3 and the second representing
+ the total number of bytes that need to be transmitted.
+
+ :type cb: int
+ :param num_cb: (optional) If a callback is specified with the cb parameter
+ this parameter determines the granularity of the callback by defining
+ the maximum number of times the callback will be called during the file transfer.
+
+ :type policy: :class:`boto.s3.acl.CannedACLStrings`
+ :param policy: A canned ACL policy that will be applied to the new key in S3.
+
+ :type md5: A tuple containing the hexdigest version of the MD5 checksum of the
+ file as the first element and the Base64-encoded version of the plain
+ checksum as the second element. This is the same format returned by
+ the compute_md5 method.
+ :param md5: If you need to compute the MD5 for any reason prior to upload,
+ it's silly to have to do it twice so this param, if present, will be
+ used as the MD5 values of the file. Otherwise, the checksum will be computed.
+
+ :type reduced_redundancy: bool
+ :param reduced_redundancy: If True, this will set the storage
+ class of the new Key to be
+ REDUCED_REDUNDANCY. The Reduced Redundancy
+ Storage (RRS) feature of S3, provides lower
+ redundancy at lower storage cost.
+ """
+ fp = open(filename, 'rb')
+ self.set_contents_from_file(fp, headers, replace, cb, num_cb,
+ policy, md5, reduced_redundancy)
+ fp.close()
+
+ def set_contents_from_string(self, s, headers=None, replace=True,
+ cb=None, num_cb=10, policy=None, md5=None,
+ reduced_redundancy=False):
+ """
+ Store an object in S3 using the name of the Key object as the
+ key in S3 and the string 's' as the contents.
+ See set_contents_from_file method for details about the
+ parameters.
+
+ :type headers: dict
+ :param headers: Additional headers to pass along with the request to AWS.
+
+ :type replace: bool
+ :param replace: If True, replaces the contents of the file if it already exists.
+
+ :type cb: function
+ :param cb: (optional) a callback function that will be called to report
+ progress on the download. The callback should accept two integer
+ parameters, the first representing the number of bytes that have
+ been successfully transmitted from S3 and the second representing
+ the total number of bytes that need to be transmitted.
+
+ :type cb: int
+ :param num_cb: (optional) If a callback is specified with the cb parameter
+ this parameter determines the granularity of the callback by defining
+ the maximum number of times the callback will be called during the file transfer.
+
+ :type policy: :class:`boto.s3.acl.CannedACLStrings`
+ :param policy: A canned ACL policy that will be applied to the new key in S3.
+
+ :type md5: A tuple containing the hexdigest version of the MD5 checksum of the
+ file as the first element and the Base64-encoded version of the plain
+ checksum as the second element. This is the same format returned by
+ the compute_md5 method.
+ :param md5: If you need to compute the MD5 for any reason prior to upload,
+ it's silly to have to do it twice so this param, if present, will be
+ used as the MD5 values of the file. Otherwise, the checksum will be computed.
+
+ :type reduced_redundancy: bool
+ :param reduced_redundancy: If True, this will set the storage
+ class of the new Key to be
+ REDUCED_REDUNDANCY. The Reduced Redundancy
+ Storage (RRS) feature of S3, provides lower
+ redundancy at lower storage cost.
+ """
+ fp = StringIO.StringIO(s)
+ r = self.set_contents_from_file(fp, headers, replace, cb, num_cb,
+ policy, md5, reduced_redundancy)
+ fp.close()
+ return r
+
+ def get_file(self, fp, headers=None, cb=None, num_cb=10,
+ torrent=False, version_id=None, override_num_retries=None,
+ response_headers=None):
+ """
+ Retrieves a file from an S3 Key
+
+ :type fp: file
+ :param fp: File pointer to put the data into
+
+ :type headers: string
+ :param: headers to send when retrieving the files
+
+ :type cb: function
+ :param cb: (optional) a callback function that will be called to report
+ progress on the download. The callback should accept two integer
+ parameters, the first representing the number of bytes that have
+ been successfully transmitted from S3 and the second representing
+ the total number of bytes that need to be transmitted.
+
+
+ :type cb: int
+ :param num_cb: (optional) If a callback is specified with the cb parameter
+ this parameter determines the granularity of the callback by defining
+ the maximum number of times the callback will be called during the file transfer.
+
+ :type torrent: bool
+ :param torrent: Flag for whether to get a torrent for the file
+
+ :type override_num_retries: int
+ :param override_num_retries: If not None will override configured
+ num_retries parameter for underlying GET.
+
+ :type response_headers: dict
+ :param response_headers: A dictionary containing HTTP headers/values
+ that will override any headers associated with
+ the stored object in the response.
+ See http://goo.gl/EWOPb for details.
+ """
+ if cb:
+ if num_cb > 2:
+ cb_count = self.size / self.BufferSize / (num_cb-2)
+ elif num_cb < 0:
+ cb_count = -1
+ else:
+ cb_count = 0
+ i = total_bytes = 0
+ cb(total_bytes, self.size)
+ save_debug = self.bucket.connection.debug
+ if self.bucket.connection.debug == 1:
+ self.bucket.connection.debug = 0
+
+ query_args = []
+ if torrent:
+ query_args.append('torrent')
+ # If a version_id is passed in, use that. If not, check to see
+ # if the Key object has an explicit version_id and, if so, use that.
+ # Otherwise, don't pass a version_id query param.
+ if version_id is None:
+ version_id = self.version_id
+ if version_id:
+ query_args.append('versionId=%s' % version_id)
+ if response_headers:
+ for key in response_headers:
+ query_args.append('%s=%s' % (key, response_headers[key]))
+ query_args = '&'.join(query_args)
+ self.open('r', headers, query_args=query_args,
+ override_num_retries=override_num_retries)
+ for bytes in self:
+ fp.write(bytes)
+ if cb:
+ total_bytes += len(bytes)
+ i += 1
+ if i == cb_count or cb_count == -1:
+ cb(total_bytes, self.size)
+ i = 0
+ if cb:
+ cb(total_bytes, self.size)
+ self.close()
+ self.bucket.connection.debug = save_debug
+
+ def get_torrent_file(self, fp, headers=None, cb=None, num_cb=10):
+ """
+ Get a torrent file (see to get_file)
+
+ :type fp: file
+ :param fp: The file pointer of where to put the torrent
+
+ :type headers: dict
+ :param headers: Headers to be passed
+
+ :type cb: function
+ :param cb: (optional) a callback function that will be called to
+ report progress on the download. The callback should
+ accept two integer parameters, the first representing
+ the number of bytes that have been successfully
+ transmitted from S3 and the second representing the
+ total number of bytes that need to be transmitted.
+
+ :type num_cb: int
+ :param num_cb: (optional) If a callback is specified with the
+ cb parameter this parameter determines the
+ granularity of the callback by defining the
+ maximum number of times the callback will be
+ called during the file transfer.
+
+ """
+ return self.get_file(fp, headers, cb, num_cb, torrent=True)
+
+ def get_contents_to_file(self, fp, headers=None,
+ cb=None, num_cb=10,
+ torrent=False,
+ version_id=None,
+ res_download_handler=None,
+ response_headers=None):
+ """
+ Retrieve an object from S3 using the name of the Key object as the
+ key in S3. Write the contents of the object to the file pointed
+ to by 'fp'.
+
+ :type fp: File -like object
+ :param fp:
+
+ :type headers: dict
+ :param headers: additional HTTP headers that will be sent with
+ the GET request.
+
+ :type cb: function
+ :param cb: (optional) a callback function that will be called to
+ report progress on the download. The callback should
+ accept two integer parameters, the first representing
+ the number of bytes that have been successfully
+ transmitted from S3 and the second representing the
+ total number of bytes that need to be transmitted.
+
+ :type num_cb: int
+ :param num_cb: (optional) If a callback is specified with the
+ cb parameter this parameter determines the
+ granularity of the callback by defining the
+ maximum number of times the callback will be
+ called during the file transfer.
+
+ :type torrent: bool
+ :param torrent: If True, returns the contents of a torrent
+ file as a string.
+
+ :type res_upload_handler: ResumableDownloadHandler
+ :param res_download_handler: If provided, this handler will
+ perform the download.
+
+ :type response_headers: dict
+ :param response_headers: A dictionary containing HTTP headers/values
+ that will override any headers associated with
+ the stored object in the response.
+ See http://goo.gl/EWOPb for details.
+ """
+ if self.bucket != None:
+ if res_download_handler:
+ res_download_handler.get_file(self, fp, headers, cb, num_cb,
+ torrent=torrent,
+ version_id=version_id)
+ else:
+ self.get_file(fp, headers, cb, num_cb, torrent=torrent,
+ version_id=version_id,
+ response_headers=response_headers)
+
+ def get_contents_to_filename(self, filename, headers=None,
+ cb=None, num_cb=10,
+ torrent=False,
+ version_id=None,
+ res_download_handler=None,
+ response_headers=None):
+ """
+ Retrieve an object from S3 using the name of the Key object as the
+ key in S3. Store contents of the object to a file named by 'filename'.
+ See get_contents_to_file method for details about the
+ parameters.
+
+ :type filename: string
+ :param filename: The filename of where to put the file contents
+
+ :type headers: dict
+ :param headers: Any additional headers to send in the request
+
+ :type cb: function
+ :param cb: (optional) a callback function that will be called to
+ report progress on the download. The callback should
+ accept two integer parameters, the first representing
+ the number of bytes that have been successfully
+ transmitted from S3 and the second representing the
+ total number of bytes that need to be transmitted.
+
+ :type num_cb: int
+ :param num_cb: (optional) If a callback is specified with the
+ cb parameter this parameter determines the
+ granularity of the callback by defining the
+ maximum number of times the callback will be
+ called during the file transfer.
+
+ :type torrent: bool
+ :param torrent: If True, returns the contents of a torrent file
+ as a string.
+
+ :type res_upload_handler: ResumableDownloadHandler
+ :param res_download_handler: If provided, this handler will
+ perform the download.
+
+ :type response_headers: dict
+ :param response_headers: A dictionary containing HTTP headers/values
+ that will override any headers associated with
+ the stored object in the response.
+ See http://goo.gl/EWOPb for details.
+ """
+ fp = open(filename, 'wb')
+ self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
+ version_id=version_id,
+ res_download_handler=res_download_handler,
+ response_headers=response_headers)
+ fp.close()
+ # if last_modified date was sent from s3, try to set file's timestamp
+ if self.last_modified != None:
+ try:
+ modified_tuple = rfc822.parsedate_tz(self.last_modified)
+ modified_stamp = int(rfc822.mktime_tz(modified_tuple))
+ os.utime(fp.name, (modified_stamp, modified_stamp))
+ except Exception: pass
+
+ def get_contents_as_string(self, headers=None,
+ cb=None, num_cb=10,
+ torrent=False,
+ version_id=None,
+ response_headers=None):
+ """
+ Retrieve an object from S3 using the name of the Key object as the
+ key in S3. Return the contents of the object as a string.
+ See get_contents_to_file method for details about the
+ parameters.
+
+ :type headers: dict
+ :param headers: Any additional headers to send in the request
+
+ :type cb: function
+ :param cb: (optional) a callback function that will be called to
+ report progress on the download. The callback should
+ accept two integer parameters, the first representing
+ the number of bytes that have been successfully
+ transmitted from S3 and the second representing the
+ total number of bytes that need to be transmitted.
+
+ :type num_cb: int
+ :param num_cb: (optional) If a callback is specified with the
+ cb parameter this parameter determines the
+ granularity of the callback by defining the
+ maximum number of times the callback will be
+ called during the file transfer.
+
+ :type torrent: bool
+ :param torrent: If True, returns the contents of a torrent file
+ as a string.
+
+ :type response_headers: dict
+ :param response_headers: A dictionary containing HTTP headers/values
+ that will override any headers associated with
+ the stored object in the response.
+ See http://goo.gl/EWOPb for details.
+
+ :rtype: string
+ :returns: The contents of the file as a string
+ """
+ fp = StringIO.StringIO()
+ self.get_contents_to_file(fp, headers, cb, num_cb, torrent=torrent,
+ version_id=version_id,
+ response_headers=response_headers)
+ return fp.getvalue()
+
+ def add_email_grant(self, permission, email_address, headers=None):
+ """
+ Convenience method that provides a quick way to add an email grant
+ to a key. This method retrieves the current ACL, creates a new
+ grant based on the parameters passed in, adds that grant to the ACL
+ and then PUT's the new ACL back to S3.
+
+ :type permission: string
+ :param permission: The permission being granted. Should be one of:
+ (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
+
+ :type email_address: string
+ :param email_address: The email address associated with the AWS
+ account your are granting the permission to.
+
+ :type recursive: boolean
+ :param recursive: A boolean value to controls whether the command
+ will apply the grant to all keys within the bucket
+ or not. The default value is False. By passing a
+ True value, the call will iterate through all keys
+ in the bucket and apply the same grant to each key.
+ CAUTION: If you have a lot of keys, this could take
+ a long time!
+ """
+ policy = self.get_acl(headers=headers)
+ policy.acl.add_email_grant(permission, email_address)
+ self.set_acl(policy, headers=headers)
+
+ def add_user_grant(self, permission, user_id, headers=None):
+ """
+ Convenience method that provides a quick way to add a canonical
+ user grant to a key. This method retrieves the current ACL,
+ creates a new grant based on the parameters passed in, adds that
+ grant to the ACL and then PUT's the new ACL back to S3.
+
+ :type permission: string
+ :param permission: The permission being granted. Should be one of:
+ (READ, WRITE, READ_ACP, WRITE_ACP, FULL_CONTROL).
+
+ :type user_id: string
+ :param user_id: The canonical user id associated with the AWS
+ account your are granting the permission to.
+
+ :type recursive: boolean
+ :param recursive: A boolean value to controls whether the command
+ will apply the grant to all keys within the bucket
+ or not. The default value is False. By passing a
+ True value, the call will iterate through all keys
+ in the bucket and apply the same grant to each key.
+ CAUTION: If you have a lot of keys, this could take
+ a long time!
+ """
+ policy = self.get_acl()
+ policy.acl.add_user_grant(permission, user_id)
+ self.set_acl(policy, headers=headers)
diff --git a/boto/s3/multipart.py b/boto/s3/multipart.py
new file mode 100644
index 0000000..f68540a
--- /dev/null
+++ b/boto/s3/multipart.py
@@ -0,0 +1,260 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import user
+import key
+from boto import handler
+import xml.sax
+
+class CompleteMultiPartUpload(object):
+ """
+ Represents a completed MultiPart Upload. Contains the
+ following useful attributes:
+
+ * location - The URI of the completed upload
+ * bucket_name - The name of the bucket in which the upload
+ is contained
+ * key_name - The name of the new, completed key
+ * etag - The MD5 hash of the completed, combined upload
+ """
+
+ def __init__(self, bucket=None):
+ self.bucket = None
+ self.location = None
+ self.bucket_name = None
+ self.key_name = None
+ self.etag = None
+
+ def __repr__(self):
+ return '<CompleteMultiPartUpload: %s.%s>' % (self.bucket_name,
+ self.key_name)
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Location':
+ self.location = value
+ elif name == 'Bucket':
+ self.bucket_name = value
+ elif name == 'Key':
+ self.key_name = value
+ elif name == 'ETag':
+ self.etag = value
+ else:
+ setattr(self, name, value)
+
+class Part(object):
+ """
+ Represents a single part in a MultiPart upload.
+ Attributes include:
+
+ * part_number - The integer part number
+ * last_modified - The last modified date of this part
+ * etag - The MD5 hash of this part
+ * size - The size, in bytes, of this part
+ """
+
+ def __init__(self, bucket=None):
+ self.bucket = bucket
+ self.part_number = None
+ self.last_modified = None
+ self.etag = None
+ self.size = None
+
+ def __repr__(self):
+ if isinstance(self.part_number, int):
+ return '<Part %d>' % self.part_number
+ else:
+ return '<Part %s>' % None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'PartNumber':
+ self.part_number = int(value)
+ elif name == 'LastModified':
+ self.last_modified = value
+ elif name == 'ETag':
+ self.etag = value
+ elif name == 'Size':
+ self.size = int(value)
+ else:
+ setattr(self, name, value)
+
+def part_lister(mpupload, part_number_marker=''):
+ """
+ A generator function for listing parts of a multipart upload.
+ """
+ more_results = True
+ part = None
+ while more_results:
+ parts = mpupload.get_all_parts(None, part_number_marker)
+ for part in parts:
+ yield part
+ part_number_marker = mpupload.next_part_number_marker
+ more_results= mpupload.is_truncated
+
+class MultiPartUpload(object):
+ """
+ Represents a MultiPart Upload operation.
+ """
+
+ def __init__(self, bucket=None):
+ self.bucket = bucket
+ self.bucket_name = None
+ self.key_name = None
+ self.id = id
+ self.initiator = None
+ self.owner = None
+ self.storage_class = None
+ self.initiated = None
+ self.part_number_marker = None
+ self.next_part_number_marker = None
+ self.max_parts = None
+ self.is_truncated = False
+ self._parts = None
+
+ def __repr__(self):
+ return '<MultiPartUpload %s>' % self.key_name
+
+ def __iter__(self):
+ return part_lister(self, part_number_marker=self.part_number_marker)
+
+ def to_xml(self):
+ self.get_all_parts()
+ s = '<CompleteMultipartUpload>\n'
+ for part in self:
+ s += ' <Part>\n'
+ s += ' <PartNumber>%d</PartNumber>\n' % part.part_number
+ s += ' <ETag>%s</ETag>\n' % part.etag
+ s += ' </Part>\n'
+ s += '</CompleteMultipartUpload>'
+ return s
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Initiator':
+ self.initiator = user.User(self)
+ return self.initiator
+ elif name == 'Owner':
+ self.owner = user.User(self)
+ return self.owner
+ elif name == 'Part':
+ part = Part(self.bucket)
+ self._parts.append(part)
+ return part
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Bucket':
+ self.bucket_name = value
+ elif name == 'Key':
+ self.key_name = value
+ elif name == 'UploadId':
+ self.id = value
+ elif name == 'StorageClass':
+ self.storage_class = value
+ elif name == 'PartNumberMarker':
+ self.part_number_marker = value
+ elif name == 'NextPartNumberMarker':
+ self.next_part_number_marker = value
+ elif name == 'MaxParts':
+ self.max_parts = int(value)
+ elif name == 'IsTruncated':
+ if value == 'true':
+ self.is_truncated = True
+ else:
+ self.is_truncated = False
+ else:
+ setattr(self, name, value)
+
+ def get_all_parts(self, max_parts=None, part_number_marker=None):
+ """
+ Return the uploaded parts of this MultiPart Upload. This is
+ a lower-level method that requires you to manually page through
+ results. To simplify this process, you can just use the
+ object itself as an iterator and it will automatically handle
+ all of the paging with S3.
+ """
+ self._parts = []
+ query_args = 'uploadId=%s' % self.id
+ if max_parts:
+ query_args += '&max_parts=%d' % max_parts
+ if part_number_marker:
+ query_args += '&part-number-marker=%s' % part_number_marker
+ response = self.bucket.connection.make_request('GET', self.bucket.name,
+ self.key_name,
+ query_args=query_args)
+ body = response.read()
+ if response.status == 200:
+ h = handler.XmlHandler(self, self)
+ xml.sax.parseString(body, h)
+ return self._parts
+
+ def upload_part_from_file(self, fp, part_num, headers=None, replace=True,
+ cb=None, num_cb=10, policy=None, md5=None,
+ reduced_redundancy=False):
+ """
+ Upload another part of this MultiPart Upload.
+
+ :type fp: file
+ :param fp: The file object you want to upload.
+
+ :type part_num: int
+ :param part_num: The number of this part.
+
+ The other parameters are exactly as defined for the
+ :class:`boto.s3.key.Key` set_contents_from_file method.
+ """
+ if part_num < 1:
+ raise ValueError('Part numbers must be greater than zero')
+ query_args = 'uploadId=%s&partNumber=%d' % (self.id, part_num)
+ key = self.bucket.new_key(self.key_name)
+ key.set_contents_from_file(fp, headers, replace, cb, num_cb, policy,
+ md5, reduced_redundancy, query_args)
+
+ def complete_upload(self):
+ """
+ Complete the MultiPart Upload operation. This method should
+ be called when all parts of the file have been successfully
+ uploaded to S3.
+
+ :rtype: :class:`boto.s3.multipart.CompletedMultiPartUpload`
+ :returns: An object representing the completed upload.
+ """
+ xml = self.to_xml()
+ self.bucket.complete_multipart_upload(self.key_name,
+ self.id, xml)
+
+ def cancel_upload(self):
+ """
+ Cancels a MultiPart Upload operation. The storage consumed by
+ any previously uploaded parts will be freed. However, if any
+ part uploads are currently in progress, those part uploads
+ might or might not succeed. As a result, it might be necessary
+ to abort a given multipart upload multiple times in order to
+ completely free all storage consumed by all parts.
+ """
+ self.bucket.cancel_multipart_upload(self.key_name, self.id)
+
+
diff --git a/boto/s3/prefix.py b/boto/s3/prefix.py
new file mode 100644
index 0000000..fc0f26a
--- /dev/null
+++ b/boto/s3/prefix.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class Prefix:
+ def __init__(self, bucket=None, name=None):
+ self.bucket = bucket
+ self.name = name
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Prefix':
+ self.name = value
+ else:
+ setattr(self, name, value)
+
diff --git a/boto/s3/resumable_download_handler.py b/boto/s3/resumable_download_handler.py
new file mode 100644
index 0000000..0d01477
--- /dev/null
+++ b/boto/s3/resumable_download_handler.py
@@ -0,0 +1,330 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import errno
+import httplib
+import os
+import re
+import socket
+import time
+import boto
+from boto import config, storage_uri_for_key
+from boto.connection import AWSAuthConnection
+from boto.exception import ResumableDownloadException
+from boto.exception import ResumableTransferDisposition
+
+"""
+Resumable download handler.
+
+Resumable downloads will retry failed downloads, resuming at the byte count
+completed by the last download attempt. If too many retries happen with no
+progress (per configurable num_retries param), the download will be aborted.
+
+The caller can optionally specify a tracker_file_name param in the
+ResumableDownloadHandler constructor. If you do this, that file will
+save the state needed to allow retrying later, in a separate process
+(e.g., in a later run of gsutil).
+
+Note that resumable downloads work across providers (they depend only
+on support Range GETs), but this code is in the boto.s3 package
+because it is the wrong abstraction level to go in the top-level boto
+package.
+
+TODO: At some point we should refactor the code to have a storage_service
+package where all these provider-independent files go.
+"""
+
+
+class ByteTranslatingCallbackHandler(object):
+ """
+ Proxy class that translates progress callbacks made by
+ boto.s3.Key.get_file(), taking into account that we're resuming
+ a download.
+ """
+ def __init__(self, proxied_cb, download_start_point):
+ self.proxied_cb = proxied_cb
+ self.download_start_point = download_start_point
+
+ def call(self, total_bytes_uploaded, total_size):
+ self.proxied_cb(self.download_start_point + total_bytes_uploaded,
+ self.download_start_point + total_size)
+
+
+def get_cur_file_size(fp, position_to_eof=False):
+ """
+ Returns size of file, optionally leaving fp positioned at EOF.
+ """
+ if not position_to_eof:
+ cur_pos = fp.tell()
+ fp.seek(0, os.SEEK_END)
+ cur_file_size = fp.tell()
+ if not position_to_eof:
+ fp.seek(cur_pos, os.SEEK_SET)
+ return cur_file_size
+
+
+class ResumableDownloadHandler(object):
+ """
+ Handler for resumable downloads.
+ """
+
+ ETAG_REGEX = '([a-z0-9]{32})\n'
+
+ RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error,
+ socket.gaierror)
+
+ def __init__(self, tracker_file_name=None, num_retries=None):
+ """
+ Constructor. Instantiate once for each downloaded file.
+
+ :type tracker_file_name: string
+ :param tracker_file_name: optional file name to save tracking info
+ about this download. If supplied and the current process fails
+ the download, it can be retried in a new process. If called
+ with an existing file containing an unexpired timestamp,
+ we'll resume the transfer for this file; else we'll start a
+ new resumable download.
+
+ :type num_retries: int
+ :param num_retries: the number of times we'll re-try a resumable
+ download making no progress. (Count resets every time we get
+ progress, so download can span many more than this number of
+ retries.)
+ """
+ self.tracker_file_name = tracker_file_name
+ self.num_retries = num_retries
+ self.etag_value_for_current_download = None
+ if tracker_file_name:
+ self._load_tracker_file_etag()
+ # Save download_start_point in instance state so caller can
+ # find how much was transferred by this ResumableDownloadHandler
+ # (across retries).
+ self.download_start_point = None
+
+ def _load_tracker_file_etag(self):
+ f = None
+ try:
+ f = open(self.tracker_file_name, 'r')
+ etag_line = f.readline()
+ m = re.search(self.ETAG_REGEX, etag_line)
+ if m:
+ self.etag_value_for_current_download = m.group(1)
+ else:
+ print('Couldn\'t read etag in tracker file (%s). Restarting '
+ 'download from scratch.' % self.tracker_file_name)
+ except IOError, e:
+ # Ignore non-existent file (happens first time a download
+ # is attempted on an object), but warn user for other errors.
+ if e.errno != errno.ENOENT:
+ # Will restart because
+ # self.etag_value_for_current_download == None.
+ print('Couldn\'t read URI tracker file (%s): %s. Restarting '
+ 'download from scratch.' %
+ (self.tracker_file_name, e.strerror))
+ finally:
+ if f:
+ f.close()
+
+ def _save_tracker_info(self, key):
+ self.etag_value_for_current_download = key.etag.strip('"\'')
+ if not self.tracker_file_name:
+ return
+ f = None
+ try:
+ f = open(self.tracker_file_name, 'w')
+ f.write('%s\n' % self.etag_value_for_current_download)
+ except IOError, e:
+ raise ResumableDownloadException(
+ 'Couldn\'t write tracker file (%s): %s.\nThis can happen'
+ 'if you\'re using an incorrectly configured download tool\n'
+ '(e.g., gsutil configured to save tracker files to an '
+ 'unwritable directory)' %
+ (self.tracker_file_name, e.strerror),
+ ResumableTransferDisposition.ABORT)
+ finally:
+ if f:
+ f.close()
+
+ def _remove_tracker_file(self):
+ if (self.tracker_file_name and
+ os.path.exists(self.tracker_file_name)):
+ os.unlink(self.tracker_file_name)
+
+ def _attempt_resumable_download(self, key, fp, headers, cb, num_cb,
+ torrent, version_id):
+ """
+ Attempts a resumable download.
+
+ Raises ResumableDownloadException if any problems occur.
+ """
+ cur_file_size = get_cur_file_size(fp, position_to_eof=True)
+
+ if (cur_file_size and
+ self.etag_value_for_current_download and
+ self.etag_value_for_current_download == key.etag.strip('"\'')):
+ # Try to resume existing transfer.
+ if cur_file_size > key.size:
+ raise ResumableDownloadException(
+ '%s is larger (%d) than %s (%d).\nDeleting tracker file, so '
+ 'if you re-try this download it will start from scratch' %
+ (fp.name, cur_file_size, str(storage_uri_for_key(key)),
+ key.size), ResumableTransferDisposition.ABORT)
+ elif cur_file_size == key.size:
+ if key.bucket.connection.debug >= 1:
+ print 'Download complete.'
+ return
+ if key.bucket.connection.debug >= 1:
+ print 'Resuming download.'
+ headers = headers.copy()
+ headers['Range'] = 'bytes=%d-%d' % (cur_file_size, key.size - 1)
+ cb = ByteTranslatingCallbackHandler(cb, cur_file_size).call
+ self.download_start_point = cur_file_size
+ else:
+ if key.bucket.connection.debug >= 1:
+ print 'Starting new resumable download.'
+ self._save_tracker_info(key)
+ self.download_start_point = 0
+ # Truncate the file, in case a new resumable download is being
+ # started atop an existing file.
+ fp.truncate(0)
+
+ # Disable AWSAuthConnection-level retry behavior, since that would
+ # cause downloads to restart from scratch.
+ key.get_file(fp, headers, cb, num_cb, torrent, version_id,
+ override_num_retries=0)
+ fp.flush()
+
+ def _check_final_md5(self, key, file_name):
+ """
+ Checks that etag from server agrees with md5 computed after the
+ download completes. This is important, since the download could
+ have spanned a number of hours and multiple processes (e.g.,
+ gsutil runs), and the user could change some of the file and not
+ realize they have inconsistent data.
+ """
+ fp = open(file_name, 'r')
+ if key.bucket.connection.debug >= 1:
+ print 'Checking md5 against etag.'
+ hex_md5 = key.compute_md5(fp)[0]
+ if hex_md5 != key.etag.strip('"\''):
+ file_name = fp.name
+ fp.close()
+ os.unlink(file_name)
+ raise ResumableDownloadException(
+ 'File changed during download: md5 signature doesn\'t match '
+ 'etag (incorrect downloaded file deleted)',
+ ResumableTransferDisposition.ABORT)
+
+ def get_file(self, key, fp, headers, cb=None, num_cb=10, torrent=False,
+ version_id=None):
+ """
+ Retrieves a file from a Key
+ :type key: :class:`boto.s3.key.Key` or subclass
+ :param key: The Key object from which upload is to be downloaded
+
+ :type fp: file
+ :param fp: File pointer into which data should be downloaded
+
+ :type headers: string
+ :param: headers to send when retrieving the files
+
+ :type cb: function
+ :param cb: (optional) a callback function that will be called to report
+ progress on the download. The callback should accept two integer
+ parameters, the first representing the number of bytes that have
+ been successfully transmitted from the storage service and
+ the second representing the total number of bytes that need
+ to be transmitted.
+
+ :type num_cb: int
+ :param num_cb: (optional) If a callback is specified with the cb
+ parameter this parameter determines the granularity of the callback
+ by defining the maximum number of times the callback will be
+ called during the file transfer.
+
+ :type torrent: bool
+ :param torrent: Flag for whether to get a torrent for the file
+
+ :type version_id: string
+ :param version_id: The version ID (optional)
+
+ Raises ResumableDownloadException if a problem occurs during
+ the transfer.
+ """
+
+ debug = key.bucket.connection.debug
+ if not headers:
+ headers = {}
+
+ # Use num-retries from constructor if one was provided; else check
+ # for a value specified in the boto config file; else default to 5.
+ if self.num_retries is None:
+ self.num_retries = config.getint('Boto', 'num_retries', 5)
+ progress_less_iterations = 0
+
+ while True: # Retry as long as we're making progress.
+ had_file_bytes_before_attempt = get_cur_file_size(fp)
+ try:
+ self._attempt_resumable_download(key, fp, headers, cb, num_cb,
+ torrent, version_id)
+ # Download succceded, so remove the tracker file (if have one).
+ self._remove_tracker_file()
+ self._check_final_md5(key, fp.name)
+ if debug >= 1:
+ print 'Resumable download complete.'
+ return
+ except self.RETRYABLE_EXCEPTIONS, e:
+ if debug >= 1:
+ print('Caught exception (%s)' % e.__repr__())
+ except ResumableDownloadException, e:
+ if e.disposition == ResumableTransferDisposition.ABORT:
+ if debug >= 1:
+ print('Caught non-retryable ResumableDownloadException '
+ '(%s)' % e.message)
+ raise
+ else:
+ if debug >= 1:
+ print('Caught ResumableDownloadException (%s) - will '
+ 'retry' % e.message)
+
+ # At this point we had a re-tryable failure; see if made progress.
+ if get_cur_file_size(fp) > had_file_bytes_before_attempt:
+ progress_less_iterations = 0
+ else:
+ progress_less_iterations += 1
+
+ if progress_less_iterations > self.num_retries:
+ # Don't retry any longer in the current process.
+ raise ResumableDownloadException(
+ 'Too many resumable download attempts failed without '
+ 'progress. You might try this download again later',
+ ResumableTransferDisposition.ABORT)
+
+ # Close the key, in case a previous download died partway
+ # through and left data in the underlying key HTTP buffer.
+ key.close()
+
+ sleep_time_secs = 2**progress_less_iterations
+ if debug >= 1:
+ print('Got retryable failure (%d progress-less in a row).\n'
+ 'Sleeping %d seconds before re-trying' %
+ (progress_less_iterations, sleep_time_secs))
+ time.sleep(sleep_time_secs)
diff --git a/boto/s3/user.py b/boto/s3/user.py
new file mode 100644
index 0000000..f45f038
--- /dev/null
+++ b/boto/s3/user.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class User:
+ def __init__(self, parent=None, id='', display_name=''):
+ if parent:
+ parent.owner = self
+ self.type = None
+ self.id = id
+ self.display_name = display_name
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'DisplayName':
+ self.display_name = value
+ elif name == 'ID':
+ self.id = value
+ else:
+ setattr(self, name, value)
+
+ def to_xml(self, element_name='Owner'):
+ if self.type:
+ s = '<%s xsi:type="%s">' % (element_name, self.type)
+ else:
+ s = '<%s>' % element_name
+ s += '<ID>%s</ID>' % self.id
+ s += '<DisplayName>%s</DisplayName>' % self.display_name
+ s += '</%s>' % element_name
+ return s
diff --git a/boto/sdb/__init__.py b/boto/sdb/__init__.py
new file mode 100644
index 0000000..f5642c1
--- /dev/null
+++ b/boto/sdb/__init__.py
@@ -0,0 +1,56 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from regioninfo import SDBRegionInfo
+
+def regions():
+ """
+ Get all available regions for the SDB service.
+
+ :rtype: list
+ :return: A list of :class:`boto.sdb.regioninfo.RegionInfo` instances
+ """
+ return [SDBRegionInfo(name='us-east-1',
+ endpoint='sdb.amazonaws.com'),
+ SDBRegionInfo(name='eu-west-1',
+ endpoint='sdb.eu-west-1.amazonaws.com'),
+ SDBRegionInfo(name='us-west-1',
+ endpoint='sdb.us-west-1.amazonaws.com'),
+ SDBRegionInfo(name='ap-southeast-1',
+ endpoint='sdb.ap-southeast-1.amazonaws.com')
+ ]
+
+def connect_to_region(region_name):
+ """
+ Given a valid region name, return a
+ :class:`boto.sdb.connection.SDBConnection`.
+
+ :param str region_name: The name of the region to connect to.
+
+ :rtype: :class:`boto.sdb.connection.SDBConnection` or ``None``
+ :return: A connection to the given region, or None if an invalid region
+ name is given
+ """
+ for region in regions():
+ if region.name == region_name:
+ return region.connect()
+ return None
diff --git a/boto/sdb/connection.py b/boto/sdb/connection.py
new file mode 100644
index 0000000..b5a45b8
--- /dev/null
+++ b/boto/sdb/connection.py
@@ -0,0 +1,607 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import xml.sax
+import threading
+from boto import handler
+from boto.connection import AWSQueryConnection
+from boto.sdb.domain import Domain, DomainMetaData
+from boto.sdb.item import Item
+from boto.sdb.regioninfo import SDBRegionInfo
+from boto.exception import SDBResponseError
+
+class ItemThread(threading.Thread):
+ """
+ A threaded :class:`Item <boto.sdb.item.Item>` retriever utility class.
+ Retrieved :class:`Item <boto.sdb.item.Item>` objects are stored in the
+ ``items`` instance variable after
+ :py:meth:`run() <run>` is called.
+
+ .. tip::
+ The item retrieval will not start until the
+ :func:`run() <boto.sdb.connection.ItemThread.run>` method is called.
+ """
+ def __init__(self, name, domain_name, item_names):
+ """
+ :param str name: A thread name. Used for identification.
+ :param str domain_name: The name of a SimpleDB
+ :class:`Domain <boto.sdb.domain.Domain>`
+ :type item_names: string or list of strings
+ :param item_names: The name(s) of the items to retrieve from the specified
+ :class:`Domain <boto.sdb.domain.Domain>`.
+ :ivar list items: A list of items retrieved. Starts as empty list.
+ """
+ threading.Thread.__init__(self, name=name)
+ #print 'starting %s with %d items' % (name, len(item_names))
+ self.domain_name = domain_name
+ self.conn = SDBConnection()
+ self.item_names = item_names
+ self.items = []
+
+ def run(self):
+ """
+ Start the threaded retrieval of items. Populates the
+ ``items`` list with :class:`Item <boto.sdb.item.Item>` objects.
+ """
+ for item_name in self.item_names:
+ item = self.conn.get_attributes(self.domain_name, item_name)
+ self.items.append(item)
+
+#boto.set_stream_logger('sdb')
+
+class SDBConnection(AWSQueryConnection):
+ """
+ This class serves as a gateway to your SimpleDB region (defaults to
+ us-east-1). Methods within allow access to SimpleDB
+ :class:`Domain <boto.sdb.domain.Domain>` objects and their associated
+ :class:`Item <boto.sdb.item.Item>` objects.
+
+ .. tip::
+ While you may instantiate this class directly, it may be easier to
+ go through :py:func:`boto.connect_sdb`.
+ """
+ DefaultRegionName = 'us-east-1'
+ DefaultRegionEndpoint = 'sdb.amazonaws.com'
+ APIVersion = '2009-04-15'
+ ResponseError = SDBResponseError
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, debug=0,
+ https_connection_factory=None, region=None, path='/',
+ converter=None):
+ """
+ For any keywords that aren't documented, refer to the parent class,
+ :py:class:`boto.connection.AWSAuthConnection`. You can avoid having
+ to worry about these keyword arguments by instantiating these objects
+ via :py:func:`boto.connect_sdb`.
+
+ :type region: :class:`boto.sdb.regioninfo.SDBRegionInfo`
+ :keyword region: Explicitly specify a region. Defaults to ``us-east-1``
+ if not specified.
+ """
+ if not region:
+ region = SDBRegionInfo(self, self.DefaultRegionName,
+ self.DefaultRegionEndpoint)
+ self.region = region
+ AWSQueryConnection.__init__(self, aws_access_key_id,
+ aws_secret_access_key,
+ is_secure, port, proxy,
+ proxy_port, proxy_user, proxy_pass,
+ self.region.endpoint, debug,
+ https_connection_factory, path)
+ self.box_usage = 0.0
+ self.converter = converter
+ self.item_cls = Item
+
+ def _required_auth_capability(self):
+ return ['sdb']
+
+ def set_item_cls(self, cls):
+ """
+ While the default item class is :py:class:`boto.sdb.item.Item`, this
+ default may be overridden. Use this method to change a connection's
+ item class.
+
+ :param object cls: The new class to set as this connection's item
+ class. See the default item class for inspiration as to what your
+ replacement should/could look like.
+ """
+ self.item_cls = cls
+
+ def _build_name_value_list(self, params, attributes, replace=False,
+ label='Attribute'):
+ keys = attributes.keys()
+ keys.sort()
+ i = 1
+ for key in keys:
+ value = attributes[key]
+ if isinstance(value, list):
+ for v in value:
+ params['%s.%d.Name' % (label, i)] = key
+ if self.converter:
+ v = self.converter.encode(v)
+ params['%s.%d.Value' % (label, i)] = v
+ if replace:
+ params['%s.%d.Replace' % (label, i)] = 'true'
+ i += 1
+ else:
+ params['%s.%d.Name' % (label, i)] = key
+ if self.converter:
+ value = self.converter.encode(value)
+ params['%s.%d.Value' % (label, i)] = value
+ if replace:
+ params['%s.%d.Replace' % (label, i)] = 'true'
+ i += 1
+
+ def _build_expected_value(self, params, expected_value):
+ params['Expected.1.Name'] = expected_value[0]
+ if expected_value[1] is True:
+ params['Expected.1.Exists'] = 'true'
+ elif expected_value[1] is False:
+ params['Expected.1.Exists'] = 'false'
+ else:
+ params['Expected.1.Value'] = expected_value[1]
+
+ def _build_batch_list(self, params, items, replace=False):
+ item_names = items.keys()
+ i = 0
+ for item_name in item_names:
+ params['Item.%d.ItemName' % i] = item_name
+ j = 0
+ item = items[item_name]
+ if item is not None:
+ attr_names = item.keys()
+ for attr_name in attr_names:
+ value = item[attr_name]
+ if isinstance(value, list):
+ for v in value:
+ if self.converter:
+ v = self.converter.encode(v)
+ params['Item.%d.Attribute.%d.Name' % (i, j)] = attr_name
+ params['Item.%d.Attribute.%d.Value' % (i, j)] = v
+ if replace:
+ params['Item.%d.Attribute.%d.Replace' % (i, j)] = 'true'
+ j += 1
+ else:
+ params['Item.%d.Attribute.%d.Name' % (i, j)] = attr_name
+ if self.converter:
+ value = self.converter.encode(value)
+ params['Item.%d.Attribute.%d.Value' % (i, j)] = value
+ if replace:
+ params['Item.%d.Attribute.%d.Replace' % (i, j)] = 'true'
+ j += 1
+ i += 1
+
+ def _build_name_list(self, params, attribute_names):
+ i = 1
+ attribute_names.sort()
+ for name in attribute_names:
+ params['Attribute.%d.Name' % i] = name
+ i += 1
+
+ def get_usage(self):
+ """
+ Returns the BoxUsage (in USD) accumulated on this specific SDBConnection
+ instance.
+
+ .. tip:: This can be out of date, and should only be treated as a
+ rough estimate. Also note that this estimate only applies to the
+ requests made on this specific connection instance. It is by
+ no means an account-wide estimate.
+
+ :rtype: float
+ :return: The accumulated BoxUsage of all requests made on the connection.
+ """
+ return self.box_usage
+
+ def print_usage(self):
+ """
+ Print the BoxUsage and approximate costs of all requests made on
+ this specific SDBConnection instance.
+
+ .. tip:: This can be out of date, and should only be treated as a
+ rough estimate. Also note that this estimate only applies to the
+ requests made on this specific connection instance. It is by
+ no means an account-wide estimate.
+ """
+ print 'Total Usage: %f compute seconds' % self.box_usage
+ cost = self.box_usage * 0.14
+ print 'Approximate Cost: $%f' % cost
+
+ def get_domain(self, domain_name, validate=True):
+ """
+ Retrieves a :py:class:`boto.sdb.domain.Domain` object whose name
+ matches ``domain_name``.
+
+ :param str domain_name: The name of the domain to retrieve
+ :keyword bool validate: When ``True``, check to see if the domain
+ actually exists. If ``False``, blindly return a
+ :py:class:`Domain <boto.sdb.domain.Domain>` object with the
+ specified name set.
+
+ :raises:
+ :py:class:`boto.exception.SDBResponseError` if ``validate`` is
+ ``True`` and no match could be found.
+
+ :rtype: :py:class:`boto.sdb.domain.Domain`
+ :return: The requested domain
+ """
+ domain = Domain(self, domain_name)
+ if validate:
+ self.select(domain, """select * from `%s` limit 1""" % domain_name)
+ return domain
+
+ def lookup(self, domain_name, validate=True):
+ """
+ Lookup an existing SimpleDB domain. This differs from
+ :py:meth:`get_domain` in that ``None`` is returned if ``validate`` is
+ ``True`` and no match was found (instead of raising an exception).
+
+ :param str domain_name: The name of the domain to retrieve
+
+ :param bool validate: If ``True``, a ``None`` value will be returned
+ if the specified domain can't be found. If ``False``, a
+ :py:class:`Domain <boto.sdb.domain.Domain>` object will be dumbly
+ returned, regardless of whether it actually exists.
+
+ :rtype: :class:`boto.sdb.domain.Domain` object or ``None``
+ :return: The Domain object or ``None`` if the domain does not exist.
+ """
+ try:
+ domain = self.get_domain(domain_name, validate)
+ except:
+ domain = None
+ return domain
+
+ def get_all_domains(self, max_domains=None, next_token=None):
+ """
+ Returns a :py:class:`boto.resultset.ResultSet` containing
+ all :py:class:`boto.sdb.domain.Domain` objects associated with
+ this connection's Access Key ID.
+
+ :keyword int max_domains: Limit the returned
+ :py:class:`ResultSet <boto.resultset.ResultSet>` to the specified
+ number of members.
+ :keyword str next_token: A token string that was returned in an
+ earlier call to this method as the ``next_token`` attribute
+ on the returned :py:class:`ResultSet <boto.resultset.ResultSet>`
+ object. This attribute is set if there are more than Domains than
+ the value specified in the ``max_domains`` keyword. Pass the
+ ``next_token`` value from you earlier query in this keyword to
+ get the next 'page' of domains.
+ """
+ params = {}
+ if max_domains:
+ params['MaxNumberOfDomains'] = max_domains
+ if next_token:
+ params['NextToken'] = next_token
+ return self.get_list('ListDomains', params, [('DomainName', Domain)])
+
+ def create_domain(self, domain_name):
+ """
+ Create a SimpleDB domain.
+
+ :type domain_name: string
+ :param domain_name: The name of the new domain
+
+ :rtype: :class:`boto.sdb.domain.Domain` object
+ :return: The newly created domain
+ """
+ params = {'DomainName':domain_name}
+ d = self.get_object('CreateDomain', params, Domain)
+ d.name = domain_name
+ return d
+
+ def get_domain_and_name(self, domain_or_name):
+ """
+ Given a ``str`` or :class:`boto.sdb.domain.Domain`, return a
+ ``tuple`` with the following members (in order):
+
+ * In instance of :class:`boto.sdb.domain.Domain` for the requested
+ domain
+ * The domain's name as a ``str``
+
+ :type domain_or_name: ``str`` or :class:`boto.sdb.domain.Domain`
+ :param domain_or_name: The domain or domain name to get the domain
+ and name for.
+
+ :raises: :class:`boto.exception.SDBResponseError` when an invalid
+ domain name is specified.
+
+ :rtype: tuple
+ :return: A ``tuple`` with contents outlined as per above.
+ """
+ if (isinstance(domain_or_name, Domain)):
+ return (domain_or_name, domain_or_name.name)
+ else:
+ return (self.get_domain(domain_or_name), domain_or_name)
+
+ def delete_domain(self, domain_or_name):
+ """
+ Delete a SimpleDB domain.
+
+ .. caution:: This will delete the domain and all items within the domain.
+
+ :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
+ :param domain_or_name: Either the name of a domain or a Domain object
+
+ :rtype: bool
+ :return: True if successful
+
+ """
+ domain, domain_name = self.get_domain_and_name(domain_or_name)
+ params = {'DomainName':domain_name}
+ return self.get_status('DeleteDomain', params)
+
+ def domain_metadata(self, domain_or_name):
+ """
+ Get the Metadata for a SimpleDB domain.
+
+ :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
+ :param domain_or_name: Either the name of a domain or a Domain object
+
+ :rtype: :class:`boto.sdb.domain.DomainMetaData` object
+ :return: The newly created domain metadata object
+ """
+ domain, domain_name = self.get_domain_and_name(domain_or_name)
+ params = {'DomainName':domain_name}
+ d = self.get_object('DomainMetadata', params, DomainMetaData)
+ d.domain = domain
+ return d
+
+ def put_attributes(self, domain_or_name, item_name, attributes,
+ replace=True, expected_value=None):
+ """
+ Store attributes for a given item in a domain.
+
+ :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
+ :param domain_or_name: Either the name of a domain or a Domain object
+
+ :type item_name: string
+ :param item_name: The name of the item whose attributes are being
+ stored.
+
+ :type attribute_names: dict or dict-like object
+ :param attribute_names: The name/value pairs to store as attributes
+
+ :type expected_value: list
+ :param expected_value: If supplied, this is a list or tuple consisting
+ of a single attribute name and expected value. The list can be
+ of the form:
+
+ * ['name', 'value']
+
+ In which case the call will first verify that the attribute "name"
+ of this item has a value of "value". If it does, the delete
+ will proceed, otherwise a ConditionalCheckFailed error will be
+ returned. The list can also be of the form:
+
+ * ['name', True|False]
+
+ which will simply check for the existence (True) or
+ non-existence (False) of the attribute.
+
+ :type replace: bool
+ :param replace: Whether the attribute values passed in will replace
+ existing values or will be added as addition values.
+ Defaults to True.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ domain, domain_name = self.get_domain_and_name(domain_or_name)
+ params = {'DomainName' : domain_name,
+ 'ItemName' : item_name}
+ self._build_name_value_list(params, attributes, replace)
+ if expected_value:
+ self._build_expected_value(params, expected_value)
+ return self.get_status('PutAttributes', params)
+
+ def batch_put_attributes(self, domain_or_name, items, replace=True):
+ """
+ Store attributes for multiple items in a domain.
+
+ :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
+ :param domain_or_name: Either the name of a domain or a Domain object
+
+ :type items: dict or dict-like object
+ :param items: A dictionary-like object. The keys of the dictionary are
+ the item names and the values are themselves dictionaries
+ of attribute names/values, exactly the same as the
+ attribute_names parameter of the scalar put_attributes
+ call.
+
+ :type replace: bool
+ :param replace: Whether the attribute values passed in will replace
+ existing values or will be added as addition values.
+ Defaults to True.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ domain, domain_name = self.get_domain_and_name(domain_or_name)
+ params = {'DomainName' : domain_name}
+ self._build_batch_list(params, items, replace)
+ return self.get_status('BatchPutAttributes', params, verb='POST')
+
+ def get_attributes(self, domain_or_name, item_name, attribute_names=None,
+ consistent_read=False, item=None):
+ """
+ Retrieve attributes for a given item in a domain.
+
+ :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
+ :param domain_or_name: Either the name of a domain or a Domain object
+
+ :type item_name: string
+ :param item_name: The name of the item whose attributes are
+ being retrieved.
+
+ :type attribute_names: string or list of strings
+ :param attribute_names: An attribute name or list of attribute names.
+ This parameter is optional. If not supplied, all attributes will
+ be retrieved for the item.
+
+ :type consistent_read: bool
+ :param consistent_read: When set to true, ensures that the most recent
+ data is returned.
+
+ :type item: :class:`boto.sdb.item.Item`
+ :keyword item: Instead of instantiating a new Item object, you may
+ specify one to update.
+
+ :rtype: :class:`boto.sdb.item.Item`
+ :return: An Item with the requested attribute name/values set on it
+ """
+ domain, domain_name = self.get_domain_and_name(domain_or_name)
+ params = {'DomainName' : domain_name,
+ 'ItemName' : item_name}
+ if consistent_read:
+ params['ConsistentRead'] = 'true'
+ if attribute_names:
+ if not isinstance(attribute_names, list):
+ attribute_names = [attribute_names]
+ self.build_list_params(params, attribute_names, 'AttributeName')
+ response = self.make_request('GetAttributes', params)
+ body = response.read()
+ if response.status == 200:
+ if item == None:
+ item = self.item_cls(domain, item_name)
+ h = handler.XmlHandler(item, self)
+ xml.sax.parseString(body, h)
+ return item
+ else:
+ raise SDBResponseError(response.status, response.reason, body)
+
+ def delete_attributes(self, domain_or_name, item_name, attr_names=None,
+ expected_value=None):
+ """
+ Delete attributes from a given item in a domain.
+
+ :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
+ :param domain_or_name: Either the name of a domain or a Domain object
+
+ :type item_name: string
+ :param item_name: The name of the item whose attributes are being
+ deleted.
+
+ :type attributes: dict, list or :class:`boto.sdb.item.Item`
+ :param attributes: Either a list containing attribute names which
+ will cause all values associated with that attribute
+ name to be deleted or a dict or Item containing the
+ attribute names and keys and list of values to
+ delete as the value. If no value is supplied,
+ all attribute name/values for the item will be
+ deleted.
+
+ :type expected_value: list
+ :param expected_value: If supplied, this is a list or tuple consisting
+ of a single attribute name and expected value. The list can be
+ of the form:
+
+ * ['name', 'value']
+
+ In which case the call will first verify that the attribute "name"
+ of this item has a value of "value". If it does, the delete
+ will proceed, otherwise a ConditionalCheckFailed error will be
+ returned. The list can also be of the form:
+
+ * ['name', True|False]
+
+ which will simply check for the existence (True) or
+ non-existence (False) of the attribute.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ domain, domain_name = self.get_domain_and_name(domain_or_name)
+ params = {'DomainName':domain_name,
+ 'ItemName' : item_name}
+ if attr_names:
+ if isinstance(attr_names, list):
+ self._build_name_list(params, attr_names)
+ elif isinstance(attr_names, dict) or isinstance(attr_names, self.item_cls):
+ self._build_name_value_list(params, attr_names)
+ if expected_value:
+ self._build_expected_value(params, expected_value)
+ return self.get_status('DeleteAttributes', params)
+
+ def batch_delete_attributes(self, domain_or_name, items):
+ """
+ Delete multiple items in a domain.
+
+ :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object.
+ :param domain_or_name: Either the name of a domain or a Domain object
+
+ :type items: dict or dict-like object
+ :param items: A dictionary-like object. The keys of the dictionary are
+ the item names and the values are either:
+
+ * dictionaries of attribute names/values, exactly the
+ same as the attribute_names parameter of the scalar
+ put_attributes call. The attribute name/value pairs
+ will only be deleted if they match the name/value
+ pairs passed in.
+ * None which means that all attributes associated
+ with the item should be deleted.
+
+ :return: True if successful
+ """
+ domain, domain_name = self.get_domain_and_name(domain_or_name)
+ params = {'DomainName' : domain_name}
+ self._build_batch_list(params, items, False)
+ return self.get_status('BatchDeleteAttributes', params, verb='POST')
+
+ def select(self, domain_or_name, query='', next_token=None,
+ consistent_read=False):
+ """
+ Returns a set of Attributes for item names within domain_name that
+ match the query. The query must be expressed in using the SELECT
+ style syntax rather than the original SimpleDB query language.
+ Even though the select request does not require a domain object,
+ a domain object must be passed into this method so the Item objects
+ returned can point to the appropriate domain.
+
+ :type domain_or_name: string or :class:`boto.sdb.domain.Domain` object
+ :param domain_or_name: Either the name of a domain or a Domain object
+
+ :type query: string
+ :param query: The SimpleDB query to be performed.
+
+ :type consistent_read: bool
+ :param consistent_read: When set to true, ensures that the most recent
+ data is returned.
+
+ :rtype: ResultSet
+ :return: An iterator containing the results.
+ """
+ domain, domain_name = self.get_domain_and_name(domain_or_name)
+ params = {'SelectExpression' : query}
+ if consistent_read:
+ params['ConsistentRead'] = 'true'
+ if next_token:
+ params['NextToken'] = next_token
+ try:
+ return self.get_list('Select', params, [('Item', self.item_cls)],
+ parent=domain)
+ except SDBResponseError, e:
+ e.body = "Query: %s\n%s" % (query, e.body)
+ raise e
diff --git a/boto/sdb/db/__init__.py b/boto/sdb/db/__init__.py
new file mode 100644
index 0000000..86044ed
--- /dev/null
+++ b/boto/sdb/db/__init__.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
diff --git a/boto/sdb/db/blob.py b/boto/sdb/db/blob.py
new file mode 100644
index 0000000..45a3624
--- /dev/null
+++ b/boto/sdb/db/blob.py
@@ -0,0 +1,68 @@
+# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+class Blob(object):
+ """Blob object"""
+ def __init__(self, value=None, file=None, id=None):
+ self._file = file
+ self.id = id
+ self.value = value
+
+ @property
+ def file(self):
+ from StringIO import StringIO
+ if self._file:
+ f = self._file
+ else:
+ f = StringIO(self.value)
+ return f
+
+ def __str__(self):
+ if hasattr(self.file, "get_contents_as_string"):
+ value = self.file.get_contents_as_string()
+ else:
+ value = self.file.getvalue()
+ try:
+ return str(value)
+ except:
+ return unicode(value)
+
+ def read(self):
+ return self.file.read()
+
+ def readline(self):
+ return self.file.readline()
+
+ def next(self):
+ return self.file.next()
+
+ def __iter__(self):
+ return iter(self.file)
+
+ @property
+ def size(self):
+ if self._file:
+ return self._file.size
+ elif self.value:
+ return len(self.value)
+ else:
+ return 0
diff --git a/boto/sdb/db/key.py b/boto/sdb/db/key.py
new file mode 100644
index 0000000..42a9d8d
--- /dev/null
+++ b/boto/sdb/db/key.py
@@ -0,0 +1,59 @@
+# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class Key(object):
+
+ @classmethod
+ def from_path(cls, *args, **kwds):
+ raise NotImplementedError, "Paths are not currently supported"
+
+ def __init__(self, encoded=None, obj=None):
+ self.name = None
+ if obj:
+ self.id = obj.id
+ self.kind = obj.kind()
+ else:
+ self.id = None
+ self.kind = None
+
+ def app(self):
+ raise NotImplementedError, "Applications are not currently supported"
+
+ def kind(self):
+ return self.kind
+
+ def id(self):
+ return self.id
+
+ def name(self):
+ raise NotImplementedError, "Key Names are not currently supported"
+
+ def id_or_name(self):
+ return self.id
+
+ def has_id_or_name(self):
+ return self.id != None
+
+ def parent(self):
+ raise NotImplementedError, "Key parents are not currently supported"
+
+ def __str__(self):
+ return self.id_or_name()
diff --git a/boto/sdb/db/manager/__init__.py b/boto/sdb/db/manager/__init__.py
new file mode 100644
index 0000000..0777796
--- /dev/null
+++ b/boto/sdb/db/manager/__init__.py
@@ -0,0 +1,88 @@
+# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+import boto
+
+def get_manager(cls):
+ """
+ Returns the appropriate Manager class for a given Model class. It does this by
+ looking in the boto config for a section like this::
+
+ [DB]
+ db_type = SimpleDB
+ db_user = <aws access key id>
+ db_passwd = <aws secret access key>
+ db_name = my_domain
+ [DB_TestBasic]
+ db_type = SimpleDB
+ db_user = <another aws access key id>
+ db_passwd = <another aws secret access key>
+ db_name = basic_domain
+ db_port = 1111
+
+ The values in the DB section are "generic values" that will be used if nothing more
+ specific is found. You can also create a section for a specific Model class that
+ gives the db info for that class. In the example above, TestBasic is a Model subclass.
+ """
+ db_user = boto.config.get('DB', 'db_user', None)
+ db_passwd = boto.config.get('DB', 'db_passwd', None)
+ db_type = boto.config.get('DB', 'db_type', 'SimpleDB')
+ db_name = boto.config.get('DB', 'db_name', None)
+ db_table = boto.config.get('DB', 'db_table', None)
+ db_host = boto.config.get('DB', 'db_host', "sdb.amazonaws.com")
+ db_port = boto.config.getint('DB', 'db_port', 443)
+ enable_ssl = boto.config.getbool('DB', 'enable_ssl', True)
+ sql_dir = boto.config.get('DB', 'sql_dir', None)
+ debug = boto.config.getint('DB', 'debug', 0)
+ # first see if there is a fully qualified section name in the Boto config file
+ module_name = cls.__module__.replace('.', '_')
+ db_section = 'DB_' + module_name + '_' + cls.__name__
+ if not boto.config.has_section(db_section):
+ db_section = 'DB_' + cls.__name__
+ if boto.config.has_section(db_section):
+ db_user = boto.config.get(db_section, 'db_user', db_user)
+ db_passwd = boto.config.get(db_section, 'db_passwd', db_passwd)
+ db_type = boto.config.get(db_section, 'db_type', db_type)
+ db_name = boto.config.get(db_section, 'db_name', db_name)
+ db_table = boto.config.get(db_section, 'db_table', db_table)
+ db_host = boto.config.get(db_section, 'db_host', db_host)
+ db_port = boto.config.getint(db_section, 'db_port', db_port)
+ enable_ssl = boto.config.getint(db_section, 'enable_ssl', enable_ssl)
+ debug = boto.config.getint(db_section, 'debug', debug)
+ elif hasattr(cls.__bases__[0], "_manager"):
+ return cls.__bases__[0]._manager
+ if db_type == 'SimpleDB':
+ from sdbmanager import SDBManager
+ return SDBManager(cls, db_name, db_user, db_passwd,
+ db_host, db_port, db_table, sql_dir, enable_ssl)
+ elif db_type == 'PostgreSQL':
+ from pgmanager import PGManager
+ if db_table:
+ return PGManager(cls, db_name, db_user, db_passwd,
+ db_host, db_port, db_table, sql_dir, enable_ssl)
+ else:
+ return None
+ elif db_type == 'XML':
+ from xmlmanager import XMLManager
+ return XMLManager(cls, db_name, db_user, db_passwd,
+ db_host, db_port, db_table, sql_dir, enable_ssl)
+ else:
+ raise ValueError, 'Unknown db_type: %s' % db_type
+
diff --git a/boto/sdb/db/manager/pgmanager.py b/boto/sdb/db/manager/pgmanager.py
new file mode 100644
index 0000000..73a93f0
--- /dev/null
+++ b/boto/sdb/db/manager/pgmanager.py
@@ -0,0 +1,389 @@
+# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+from boto.sdb.db.key import Key
+from boto.sdb.db.model import Model
+import psycopg2
+import psycopg2.extensions
+import uuid
+import os
+import string
+from boto.exception import SDBPersistenceError
+
+psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
+
+class PGConverter:
+
+ def __init__(self, manager):
+ self.manager = manager
+ self.type_map = {Key : (self.encode_reference, self.decode_reference),
+ Model : (self.encode_reference, self.decode_reference)}
+
+ def encode(self, type, value):
+ if type in self.type_map:
+ encode = self.type_map[type][0]
+ return encode(value)
+ return value
+
+ def decode(self, type, value):
+ if type in self.type_map:
+ decode = self.type_map[type][1]
+ return decode(value)
+ return value
+
+ def encode_prop(self, prop, value):
+ if isinstance(value, list):
+ if hasattr(prop, 'item_type'):
+ s = "{"
+ new_value = []
+ for v in value:
+ item_type = getattr(prop, 'item_type')
+ if Model in item_type.mro():
+ item_type = Model
+ new_value.append('%s' % self.encode(item_type, v))
+ s += ','.join(new_value)
+ s += "}"
+ return s
+ else:
+ return value
+ return self.encode(prop.data_type, value)
+
+ def decode_prop(self, prop, value):
+ if prop.data_type == list:
+ if value != None:
+ if not isinstance(value, list):
+ value = [value]
+ if hasattr(prop, 'item_type'):
+ item_type = getattr(prop, "item_type")
+ if Model in item_type.mro():
+ if item_type != self.manager.cls:
+ return item_type._manager.decode_value(prop, value)
+ else:
+ item_type = Model
+ return [self.decode(item_type, v) for v in value]
+ return value
+ elif hasattr(prop, 'reference_class'):
+ ref_class = getattr(prop, 'reference_class')
+ if ref_class != self.manager.cls:
+ return ref_class._manager.decode_value(prop, value)
+ else:
+ return self.decode(prop.data_type, value)
+ elif hasattr(prop, 'calculated_type'):
+ calc_type = getattr(prop, 'calculated_type')
+ return self.decode(calc_type, value)
+ else:
+ return self.decode(prop.data_type, value)
+
+ def encode_reference(self, value):
+ if isinstance(value, str) or isinstance(value, unicode):
+ return value
+ if value == None:
+ return ''
+ else:
+ return value.id
+
+ def decode_reference(self, value):
+ if not value:
+ return None
+ try:
+ return self.manager.get_object_from_id(value)
+ except:
+ raise ValueError, 'Unable to convert %s to Object' % value
+
+class PGManager(object):
+
+ def __init__(self, cls, db_name, db_user, db_passwd,
+ db_host, db_port, db_table, sql_dir, enable_ssl):
+ self.cls = cls
+ self.db_name = db_name
+ self.db_user = db_user
+ self.db_passwd = db_passwd
+ self.db_host = db_host
+ self.db_port = db_port
+ self.db_table = db_table
+ self.sql_dir = sql_dir
+ self.in_transaction = False
+ self.converter = PGConverter(self)
+ self._connect()
+
+ def _build_connect_string(self):
+ cs = 'dbname=%s user=%s password=%s host=%s port=%d'
+ return cs % (self.db_name, self.db_user, self.db_passwd,
+ self.db_host, self.db_port)
+
+ def _connect(self):
+ self.connection = psycopg2.connect(self._build_connect_string())
+ self.connection.set_client_encoding('UTF8')
+ self.cursor = self.connection.cursor()
+
+ def _object_lister(self, cursor):
+ try:
+ for row in cursor:
+ yield self._object_from_row(row, cursor.description)
+ except StopIteration:
+ cursor.close()
+ raise StopIteration
+
+ def _dict_from_row(self, row, description):
+ d = {}
+ for i in range(0, len(row)):
+ d[description[i][0]] = row[i]
+ return d
+
+ def _object_from_row(self, row, description=None):
+ if not description:
+ description = self.cursor.description
+ d = self._dict_from_row(row, description)
+ obj = self.cls(d['id'])
+ obj._manager = self
+ obj._auto_update = False
+ for prop in obj.properties(hidden=False):
+ if prop.data_type != Key:
+ v = self.decode_value(prop, d[prop.name])
+ v = prop.make_value_from_datastore(v)
+ if hasattr(prop, 'calculated_type'):
+ prop._set_direct(obj, v)
+ elif not prop.empty(v):
+ setattr(obj, prop.name, v)
+ else:
+ setattr(obj, prop.name, prop.default_value())
+ return obj
+
+ def _build_insert_qs(self, obj, calculated):
+ fields = []
+ values = []
+ templs = []
+ id_calculated = [p for p in calculated if p.name == 'id']
+ for prop in obj.properties(hidden=False):
+ if prop not in calculated:
+ value = prop.get_value_for_datastore(obj)
+ if value != prop.default_value() or prop.required:
+ value = self.encode_value(prop, value)
+ values.append(value)
+ fields.append('"%s"' % prop.name)
+ templs.append('%s')
+ qs = 'INSERT INTO "%s" (' % self.db_table
+ if len(id_calculated) == 0:
+ qs += '"id",'
+ qs += ','.join(fields)
+ qs += ") VALUES ("
+ if len(id_calculated) == 0:
+ qs += "'%s'," % obj.id
+ qs += ','.join(templs)
+ qs += ')'
+ if calculated:
+ qs += ' RETURNING '
+ calc_values = ['"%s"' % p.name for p in calculated]
+ qs += ','.join(calc_values)
+ qs += ';'
+ return qs, values
+
+ def _build_update_qs(self, obj, calculated):
+ fields = []
+ values = []
+ for prop in obj.properties(hidden=False):
+ if prop not in calculated:
+ value = prop.get_value_for_datastore(obj)
+ if value != prop.default_value() or prop.required:
+ value = self.encode_value(prop, value)
+ values.append(value)
+ field = '"%s"=' % prop.name
+ field += '%s'
+ fields.append(field)
+ qs = 'UPDATE "%s" SET ' % self.db_table
+ qs += ','.join(fields)
+ qs += """ WHERE "id" = '%s'""" % obj.id
+ if calculated:
+ qs += ' RETURNING '
+ calc_values = ['"%s"' % p.name for p in calculated]
+ qs += ','.join(calc_values)
+ qs += ';'
+ return qs, values
+
+ def _get_sql(self, mapping=None):
+ print '_get_sql'
+ sql = None
+ if self.sql_dir:
+ path = os.path.join(self.sql_dir, self.cls.__name__ + '.sql')
+ print path
+ if os.path.isfile(path):
+ fp = open(path)
+ sql = fp.read()
+ fp.close()
+ t = string.Template(sql)
+ sql = t.safe_substitute(mapping)
+ return sql
+
+ def start_transaction(self):
+ print 'start_transaction'
+ self.in_transaction = True
+
+ def end_transaction(self):
+ print 'end_transaction'
+ self.in_transaction = False
+ self.commit()
+
+ def commit(self):
+ if not self.in_transaction:
+ print '!!commit on %s' % self.db_table
+ try:
+ self.connection.commit()
+
+ except psycopg2.ProgrammingError, err:
+ self.connection.rollback()
+ raise err
+
+ def rollback(self):
+ print '!!rollback on %s' % self.db_table
+ self.connection.rollback()
+
+ def delete_table(self):
+ self.cursor.execute('DROP TABLE "%s";' % self.db_table)
+ self.commit()
+
+ def create_table(self, mapping=None):
+ self.cursor.execute(self._get_sql(mapping))
+ self.commit()
+
+ def encode_value(self, prop, value):
+ return self.converter.encode_prop(prop, value)
+
+ def decode_value(self, prop, value):
+ return self.converter.decode_prop(prop, value)
+
+ def execute_sql(self, query):
+ self.cursor.execute(query, None)
+ self.commit()
+
+ def query_sql(self, query, vars=None):
+ self.cursor.execute(query, vars)
+ return self.cursor.fetchall()
+
+ def lookup(self, cls, name, value):
+ values = []
+ qs = 'SELECT * FROM "%s" WHERE ' % self.db_table
+ found = False
+ for property in cls.properties(hidden=False):
+ if property.name == name:
+ found = True
+ value = self.encode_value(property, value)
+ values.append(value)
+ qs += "%s=" % name
+ qs += "%s"
+ if not found:
+ raise SDBPersistenceError('%s is not a valid field' % name)
+ qs += ';'
+ print qs
+ self.cursor.execute(qs, values)
+ if self.cursor.rowcount == 1:
+ row = self.cursor.fetchone()
+ return self._object_from_row(row, self.cursor.description)
+ elif self.cursor.rowcount == 0:
+ raise KeyError, 'Object not found'
+ else:
+ raise LookupError, 'Multiple Objects Found'
+
+ def query(self, cls, filters, limit=None, order_by=None):
+ parts = []
+ qs = 'SELECT * FROM "%s"' % self.db_table
+ if filters:
+ qs += ' WHERE '
+ properties = cls.properties(hidden=False)
+ for filter, value in filters:
+ name, op = filter.strip().split()
+ found = False
+ for property in properties:
+ if property.name == name:
+ found = True
+ value = self.encode_value(property, value)
+ parts.append(""""%s"%s'%s'""" % (name, op, value))
+ if not found:
+ raise SDBPersistenceError('%s is not a valid field' % name)
+ qs += ','.join(parts)
+ qs += ';'
+ print qs
+ cursor = self.connection.cursor()
+ cursor.execute(qs)
+ return self._object_lister(cursor)
+
+ def get_property(self, prop, obj, name):
+ qs = """SELECT "%s" FROM "%s" WHERE id='%s';""" % (name, self.db_table, obj.id)
+ print qs
+ self.cursor.execute(qs, None)
+ if self.cursor.rowcount == 1:
+ rs = self.cursor.fetchone()
+ for prop in obj.properties(hidden=False):
+ if prop.name == name:
+ v = self.decode_value(prop, rs[0])
+ return v
+ raise AttributeError, '%s not found' % name
+
+ def set_property(self, prop, obj, name, value):
+ pass
+ value = self.encode_value(prop, value)
+ qs = 'UPDATE "%s" SET ' % self.db_table
+ qs += "%s='%s'" % (name, self.encode_value(prop, value))
+ qs += " WHERE id='%s'" % obj.id
+ qs += ';'
+ print qs
+ self.cursor.execute(qs)
+ self.commit()
+
+ def get_object(self, cls, id):
+ qs = """SELECT * FROM "%s" WHERE id='%s';""" % (self.db_table, id)
+ self.cursor.execute(qs, None)
+ if self.cursor.rowcount == 1:
+ row = self.cursor.fetchone()
+ return self._object_from_row(row, self.cursor.description)
+ else:
+ raise SDBPersistenceError('%s object with id=%s does not exist' % (cls.__name__, id))
+
+ def get_object_from_id(self, id):
+ return self.get_object(self.cls, id)
+
+ def _find_calculated_props(self, obj):
+ return [p for p in obj.properties() if hasattr(p, 'calculated_type')]
+
+ def save_object(self, obj):
+ obj._auto_update = False
+ calculated = self._find_calculated_props(obj)
+ if not obj.id:
+ obj.id = str(uuid.uuid4())
+ qs, values = self._build_insert_qs(obj, calculated)
+ else:
+ qs, values = self._build_update_qs(obj, calculated)
+ print qs
+ self.cursor.execute(qs, values)
+ if calculated:
+ calc_values = self.cursor.fetchone()
+ print calculated
+ print calc_values
+ for i in range(0, len(calculated)):
+ prop = calculated[i]
+ prop._set_direct(obj, calc_values[i])
+ self.commit()
+
+ def delete_object(self, obj):
+ qs = """DELETE FROM "%s" WHERE id='%s';""" % (self.db_table, obj.id)
+ print qs
+ self.cursor.execute(qs)
+ self.commit()
+
+
diff --git a/boto/sdb/db/manager/sdbmanager.py b/boto/sdb/db/manager/sdbmanager.py
new file mode 100644
index 0000000..6aac568
--- /dev/null
+++ b/boto/sdb/db/manager/sdbmanager.py
@@ -0,0 +1,638 @@
+# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010 Chris Moyer http://coredumped.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+import boto
+import re
+from boto.utils import find_class
+import uuid
+from boto.sdb.db.key import Key
+from boto.sdb.db.model import Model
+from boto.sdb.db.blob import Blob
+from boto.sdb.db.property import ListProperty, MapProperty
+from datetime import datetime, date
+from boto.exception import SDBPersistenceError
+
+ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
+
+
+class SDBConverter:
+ """
+ Responsible for converting base Python types to format compatible with underlying
+ database. For SimpleDB, that means everything needs to be converted to a string
+ when stored in SimpleDB and from a string when retrieved.
+
+ To convert a value, pass it to the encode or decode method. The encode method
+ will take a Python native value and convert to DB format. The decode method will
+ take a DB format value and convert it to Python native format. To find the appropriate
+ method to call, the generic encode/decode methods will look for the type-specific
+ method by searching for a method called "encode_<type name>" or "decode_<type name>".
+ """
+ def __init__(self, manager):
+ self.manager = manager
+ self.type_map = { bool : (self.encode_bool, self.decode_bool),
+ int : (self.encode_int, self.decode_int),
+ long : (self.encode_long, self.decode_long),
+ float : (self.encode_float, self.decode_float),
+ Model : (self.encode_reference, self.decode_reference),
+ Key : (self.encode_reference, self.decode_reference),
+ datetime : (self.encode_datetime, self.decode_datetime),
+ date : (self.encode_date, self.decode_date),
+ Blob: (self.encode_blob, self.decode_blob),
+ }
+
+ def encode(self, item_type, value):
+ try:
+ if Model in item_type.mro():
+ item_type = Model
+ except:
+ pass
+ if item_type in self.type_map:
+ encode = self.type_map[item_type][0]
+ return encode(value)
+ return value
+
+ def decode(self, item_type, value):
+ if item_type in self.type_map:
+ decode = self.type_map[item_type][1]
+ return decode(value)
+ return value
+
+ def encode_list(self, prop, value):
+ if value in (None, []):
+ return []
+ if not isinstance(value, list):
+ # This is a little trick to avoid encoding when it's just a single value,
+ # since that most likely means it's from a query
+ item_type = getattr(prop, "item_type")
+ return self.encode(item_type, value)
+ # Just enumerate(value) won't work here because
+ # we need to add in some zero padding
+ # We support lists up to 1,000 attributes, since
+ # SDB technically only supports 1024 attributes anyway
+ values = {}
+ for k,v in enumerate(value):
+ values["%03d" % k] = v
+ return self.encode_map(prop, values)
+
+ def encode_map(self, prop, value):
+ if value == None:
+ return None
+ if not isinstance(value, dict):
+ raise ValueError, 'Expected a dict value, got %s' % type(value)
+ new_value = []
+ for key in value:
+ item_type = getattr(prop, "item_type")
+ if Model in item_type.mro():
+ item_type = Model
+ encoded_value = self.encode(item_type, value[key])
+ if encoded_value != None:
+ new_value.append('%s:%s' % (key, encoded_value))
+ return new_value
+
+ def encode_prop(self, prop, value):
+ if isinstance(prop, ListProperty):
+ return self.encode_list(prop, value)
+ elif isinstance(prop, MapProperty):
+ return self.encode_map(prop, value)
+ else:
+ return self.encode(prop.data_type, value)
+
+ def decode_list(self, prop, value):
+ if not isinstance(value, list):
+ value = [value]
+ if hasattr(prop, 'item_type'):
+ item_type = getattr(prop, "item_type")
+ dec_val = {}
+ for val in value:
+ if val != None:
+ k,v = self.decode_map_element(item_type, val)
+ try:
+ k = int(k)
+ except:
+ k = v
+ dec_val[k] = v
+ value = dec_val.values()
+ return value
+
+ def decode_map(self, prop, value):
+ if not isinstance(value, list):
+ value = [value]
+ ret_value = {}
+ item_type = getattr(prop, "item_type")
+ for val in value:
+ k,v = self.decode_map_element(item_type, val)
+ ret_value[k] = v
+ return ret_value
+
+ def decode_map_element(self, item_type, value):
+ """Decode a single element for a map"""
+ key = value
+ if ":" in value:
+ key, value = value.split(':',1)
+ if Model in item_type.mro():
+ value = item_type(id=value)
+ else:
+ value = self.decode(item_type, value)
+ return (key, value)
+
+ def decode_prop(self, prop, value):
+ if isinstance(prop, ListProperty):
+ return self.decode_list(prop, value)
+ elif isinstance(prop, MapProperty):
+ return self.decode_map(prop, value)
+ else:
+ return self.decode(prop.data_type, value)
+
+ def encode_int(self, value):
+ value = int(value)
+ value += 2147483648
+ return '%010d' % value
+
+ def decode_int(self, value):
+ try:
+ value = int(value)
+ except:
+ boto.log.error("Error, %s is not an integer" % value)
+ value = 0
+ value = int(value)
+ value -= 2147483648
+ return int(value)
+
+ def encode_long(self, value):
+ value = long(value)
+ value += 9223372036854775808
+ return '%020d' % value
+
+ def decode_long(self, value):
+ value = long(value)
+ value -= 9223372036854775808
+ return value
+
+ def encode_bool(self, value):
+ if value == True or str(value).lower() in ("true", "yes"):
+ return 'true'
+ else:
+ return 'false'
+
+ def decode_bool(self, value):
+ if value.lower() == 'true':
+ return True
+ else:
+ return False
+
+ def encode_float(self, value):
+ """
+ See http://tools.ietf.org/html/draft-wood-ldapext-float-00.
+ """
+ s = '%e' % value
+ l = s.split('e')
+ mantissa = l[0].ljust(18, '0')
+ exponent = l[1]
+ if value == 0.0:
+ case = '3'
+ exponent = '000'
+ elif mantissa[0] != '-' and exponent[0] == '+':
+ case = '5'
+ exponent = exponent[1:].rjust(3, '0')
+ elif mantissa[0] != '-' and exponent[0] == '-':
+ case = '4'
+ exponent = 999 + int(exponent)
+ exponent = '%03d' % exponent
+ elif mantissa[0] == '-' and exponent[0] == '-':
+ case = '2'
+ mantissa = '%f' % (10 + float(mantissa))
+ mantissa = mantissa.ljust(18, '0')
+ exponent = exponent[1:].rjust(3, '0')
+ else:
+ case = '1'
+ mantissa = '%f' % (10 + float(mantissa))
+ mantissa = mantissa.ljust(18, '0')
+ exponent = 999 - int(exponent)
+ exponent = '%03d' % exponent
+ return '%s %s %s' % (case, exponent, mantissa)
+
+ def decode_float(self, value):
+ case = value[0]
+ exponent = value[2:5]
+ mantissa = value[6:]
+ if case == '3':
+ return 0.0
+ elif case == '5':
+ pass
+ elif case == '4':
+ exponent = '%03d' % (int(exponent) - 999)
+ elif case == '2':
+ mantissa = '%f' % (float(mantissa) - 10)
+ exponent = '-' + exponent
+ else:
+ mantissa = '%f' % (float(mantissa) - 10)
+ exponent = '%03d' % abs((int(exponent) - 999))
+ return float(mantissa + 'e' + exponent)
+
+ def encode_datetime(self, value):
+ if isinstance(value, str) or isinstance(value, unicode):
+ return value
+ return value.strftime(ISO8601)
+
+ def decode_datetime(self, value):
+ try:
+ return datetime.strptime(value, ISO8601)
+ except:
+ return None
+
+ def encode_date(self, value):
+ if isinstance(value, str) or isinstance(value, unicode):
+ return value
+ return value.isoformat()
+
+ def decode_date(self, value):
+ try:
+ value = value.split("-")
+ return date(int(value[0]), int(value[1]), int(value[2]))
+ except:
+ return None
+
+ def encode_reference(self, value):
+ if value in (None, 'None', '', ' '):
+ return None
+ if isinstance(value, str) or isinstance(value, unicode):
+ return value
+ else:
+ return value.id
+
+ def decode_reference(self, value):
+ if not value or value == "None":
+ return None
+ return value
+
+ def encode_blob(self, value):
+ if not value:
+ return None
+ if isinstance(value, str):
+ return value
+
+ if not value.id:
+ bucket = self.manager.get_blob_bucket()
+ key = bucket.new_key(str(uuid.uuid4()))
+ value.id = "s3://%s/%s" % (key.bucket.name, key.name)
+ else:
+ match = re.match("^s3:\/\/([^\/]*)\/(.*)$", value.id)
+ if match:
+ s3 = self.manager.get_s3_connection()
+ bucket = s3.get_bucket(match.group(1), validate=False)
+ key = bucket.get_key(match.group(2))
+ else:
+ raise SDBPersistenceError("Invalid Blob ID: %s" % value.id)
+
+ if value.value != None:
+ key.set_contents_from_string(value.value)
+ return value.id
+
+
+ def decode_blob(self, value):
+ if not value:
+ return None
+ match = re.match("^s3:\/\/([^\/]*)\/(.*)$", value)
+ if match:
+ s3 = self.manager.get_s3_connection()
+ bucket = s3.get_bucket(match.group(1), validate=False)
+ key = bucket.get_key(match.group(2))
+ else:
+ return None
+ if key:
+ return Blob(file=key, id="s3://%s/%s" % (key.bucket.name, key.name))
+ else:
+ return None
+
+class SDBManager(object):
+
+ def __init__(self, cls, db_name, db_user, db_passwd,
+ db_host, db_port, db_table, ddl_dir, enable_ssl, consistent=None):
+ self.cls = cls
+ self.db_name = db_name
+ self.db_user = db_user
+ self.db_passwd = db_passwd
+ self.db_host = db_host
+ self.db_port = db_port
+ self.db_table = db_table
+ self.ddl_dir = ddl_dir
+ self.enable_ssl = enable_ssl
+ self.s3 = None
+ self.bucket = None
+ self.converter = SDBConverter(self)
+ self._sdb = None
+ self._domain = None
+ if consistent == None and hasattr(cls, "__consistent__"):
+ consistent = cls.__consistent__
+ self.consistent = consistent
+
+ @property
+ def sdb(self):
+ if self._sdb is None:
+ self._connect()
+ return self._sdb
+
+ @property
+ def domain(self):
+ if self._domain is None:
+ self._connect()
+ return self._domain
+
+ def _connect(self):
+ self._sdb = boto.connect_sdb(aws_access_key_id=self.db_user,
+ aws_secret_access_key=self.db_passwd,
+ is_secure=self.enable_ssl)
+ # This assumes that the domain has already been created
+ # It's much more efficient to do it this way rather than
+ # having this make a roundtrip each time to validate.
+ # The downside is that if the domain doesn't exist, it breaks
+ self._domain = self._sdb.lookup(self.db_name, validate=False)
+ if not self._domain:
+ self._domain = self._sdb.create_domain(self.db_name)
+
+ def _object_lister(self, cls, query_lister):
+ for item in query_lister:
+ obj = self.get_object(cls, item.name, item)
+ if obj:
+ yield obj
+
+ def encode_value(self, prop, value):
+ if value == None:
+ return None
+ if not prop:
+ return str(value)
+ return self.converter.encode_prop(prop, value)
+
+ def decode_value(self, prop, value):
+ return self.converter.decode_prop(prop, value)
+
+ def get_s3_connection(self):
+ if not self.s3:
+ self.s3 = boto.connect_s3(self.db_user, self.db_passwd)
+ return self.s3
+
+ def get_blob_bucket(self, bucket_name=None):
+ s3 = self.get_s3_connection()
+ bucket_name = "%s-%s" % (s3.aws_access_key_id, self.domain.name)
+ bucket_name = bucket_name.lower()
+ try:
+ self.bucket = s3.get_bucket(bucket_name)
+ except:
+ self.bucket = s3.create_bucket(bucket_name)
+ return self.bucket
+
+ def load_object(self, obj):
+ if not obj._loaded:
+ a = self.domain.get_attributes(obj.id,consistent_read=self.consistent)
+ if a.has_key('__type__'):
+ for prop in obj.properties(hidden=False):
+ if a.has_key(prop.name):
+ value = self.decode_value(prop, a[prop.name])
+ value = prop.make_value_from_datastore(value)
+ try:
+ setattr(obj, prop.name, value)
+ except Exception, e:
+ boto.log.exception(e)
+ obj._loaded = True
+
+ def get_object(self, cls, id, a=None):
+ obj = None
+ if not a:
+ a = self.domain.get_attributes(id,consistent_read=self.consistent)
+ if a.has_key('__type__'):
+ if not cls or a['__type__'] != cls.__name__:
+ cls = find_class(a['__module__'], a['__type__'])
+ if cls:
+ params = {}
+ for prop in cls.properties(hidden=False):
+ if a.has_key(prop.name):
+ value = self.decode_value(prop, a[prop.name])
+ value = prop.make_value_from_datastore(value)
+ params[prop.name] = value
+ obj = cls(id, **params)
+ obj._loaded = True
+ else:
+ s = '(%s) class %s.%s not found' % (id, a['__module__'], a['__type__'])
+ boto.log.info('sdbmanager: %s' % s)
+ return obj
+
+ def get_object_from_id(self, id):
+ return self.get_object(None, id)
+
+ def query(self, query):
+ query_str = "select * from `%s` %s" % (self.domain.name, self._build_filter_part(query.model_class, query.filters, query.sort_by, query.select))
+ if query.limit:
+ query_str += " limit %s" % query.limit
+ rs = self.domain.select(query_str, max_items=query.limit, next_token = query.next_token)
+ query.rs = rs
+ return self._object_lister(query.model_class, rs)
+
+ def count(self, cls, filters, quick=True, sort_by=None, select=None):
+ """
+ Get the number of results that would
+ be returned in this query
+ """
+ query = "select count(*) from `%s` %s" % (self.domain.name, self._build_filter_part(cls, filters, sort_by, select))
+ count = 0
+ for row in self.domain.select(query):
+ count += int(row['Count'])
+ if quick:
+ return count
+ return count
+
+
+ def _build_filter(self, property, name, op, val):
+ if name == "__id__":
+ name = 'itemName()'
+ if name != "itemName()":
+ name = '`%s`' % name
+ if val == None:
+ if op in ('is','='):
+ return "%(name)s is null" % {"name": name}
+ elif op in ('is not', '!='):
+ return "%s is not null" % name
+ else:
+ val = ""
+ if property.__class__ == ListProperty:
+ if op in ("is", "="):
+ op = "like"
+ elif op in ("!=", "not"):
+ op = "not like"
+ if not(op in ["like", "not like"] and val.startswith("%")):
+ val = "%%:%s" % val
+ return "%s %s '%s'" % (name, op, val.replace("'", "''"))
+
+ def _build_filter_part(self, cls, filters, order_by=None, select=None):
+ """
+ Build the filter part
+ """
+ import types
+ query_parts = []
+ order_by_filtered = False
+ if order_by:
+ if order_by[0] == "-":
+ order_by_method = "DESC";
+ order_by = order_by[1:]
+ else:
+ order_by_method = "ASC";
+ if isinstance(filters, str) or isinstance(filters, unicode):
+ query = "WHERE `__type__` = '%s' AND %s" % (cls.__name__, filters)
+ if order_by != None:
+ query += " ORDER BY `%s` %s" % (order_by, order_by_method)
+ return query
+
+ for filter in filters:
+ filter_parts = []
+ filter_props = filter[0]
+ if type(filter_props) != list:
+ filter_props = [filter_props]
+ for filter_prop in filter_props:
+ (name, op) = filter_prop.strip().split(" ", 1)
+ value = filter[1]
+ property = cls.find_property(name)
+ if name == order_by:
+ order_by_filtered = True
+ if types.TypeType(value) == types.ListType:
+ filter_parts_sub = []
+ for val in value:
+ val = self.encode_value(property, val)
+ if isinstance(val, list):
+ for v in val:
+ filter_parts_sub.append(self._build_filter(property, name, op, v))
+ else:
+ filter_parts_sub.append(self._build_filter(property, name, op, val))
+ filter_parts.append("(%s)" % (" OR ".join(filter_parts_sub)))
+ else:
+ val = self.encode_value(property, value)
+ if isinstance(val, list):
+ for v in val:
+ filter_parts.append(self._build_filter(property, name, op, v))
+ else:
+ filter_parts.append(self._build_filter(property, name, op, val))
+ query_parts.append("(%s)" % (" or ".join(filter_parts)))
+
+
+ type_query = "(`__type__` = '%s'" % cls.__name__
+ for subclass in self._get_all_decendents(cls).keys():
+ type_query += " or `__type__` = '%s'" % subclass
+ type_query +=")"
+ query_parts.append(type_query)
+
+ order_by_query = ""
+ if order_by:
+ if not order_by_filtered:
+ query_parts.append("`%s` LIKE '%%'" % order_by)
+ order_by_query = " ORDER BY `%s` %s" % (order_by, order_by_method)
+
+ if select:
+ query_parts.append("(%s)" % select)
+
+ if len(query_parts) > 0:
+ return "WHERE %s %s" % (" AND ".join(query_parts), order_by_query)
+ else:
+ return ""
+
+
+ def _get_all_decendents(self, cls):
+ """Get all decendents for a given class"""
+ decendents = {}
+ for sc in cls.__sub_classes__:
+ decendents[sc.__name__] = sc
+ decendents.update(self._get_all_decendents(sc))
+ return decendents
+
+ def query_gql(self, query_string, *args, **kwds):
+ raise NotImplementedError, "GQL queries not supported in SimpleDB"
+
+ def save_object(self, obj):
+ if not obj.id:
+ obj.id = str(uuid.uuid4())
+
+ attrs = {'__type__' : obj.__class__.__name__,
+ '__module__' : obj.__class__.__module__,
+ '__lineage__' : obj.get_lineage()}
+ del_attrs = []
+ for property in obj.properties(hidden=False):
+ value = property.get_value_for_datastore(obj)
+ if value is not None:
+ value = self.encode_value(property, value)
+ if value == []:
+ value = None
+ if value == None:
+ del_attrs.append(property.name)
+ continue
+ attrs[property.name] = value
+ if property.unique:
+ try:
+ args = {property.name: value}
+ obj2 = obj.find(**args).next()
+ if obj2.id != obj.id:
+ raise SDBPersistenceError("Error: %s must be unique!" % property.name)
+ except(StopIteration):
+ pass
+ self.domain.put_attributes(obj.id, attrs, replace=True)
+ if len(del_attrs) > 0:
+ self.domain.delete_attributes(obj.id, del_attrs)
+ return obj
+
+ def delete_object(self, obj):
+ self.domain.delete_attributes(obj.id)
+
+ def set_property(self, prop, obj, name, value):
+ value = prop.get_value_for_datastore(obj)
+ value = self.encode_value(prop, value)
+ if prop.unique:
+ try:
+ args = {prop.name: value}
+ obj2 = obj.find(**args).next()
+ if obj2.id != obj.id:
+ raise SDBPersistenceError("Error: %s must be unique!" % prop.name)
+ except(StopIteration):
+ pass
+ self.domain.put_attributes(obj.id, {name : value}, replace=True)
+
+ def get_property(self, prop, obj, name):
+ a = self.domain.get_attributes(obj.id,consistent_read=self.consistent)
+
+ # try to get the attribute value from SDB
+ if name in a:
+ value = self.decode_value(prop, a[name])
+ value = prop.make_value_from_datastore(value)
+ setattr(obj, prop.name, value)
+ return value
+ raise AttributeError, '%s not found' % name
+
+ def set_key_value(self, obj, name, value):
+ self.domain.put_attributes(obj.id, {name : value}, replace=True)
+
+ def delete_key_value(self, obj, name):
+ self.domain.delete_attributes(obj.id, name)
+
+ def get_key_value(self, obj, name):
+ a = self.domain.get_attributes(obj.id, name,consistent_read=self.consistent)
+ if a.has_key(name):
+ return a[name]
+ else:
+ return None
+
+ def get_raw_item(self, obj):
+ return self.domain.get_item(obj.id)
+
diff --git a/boto/sdb/db/manager/xmlmanager.py b/boto/sdb/db/manager/xmlmanager.py
new file mode 100644
index 0000000..9765df1
--- /dev/null
+++ b/boto/sdb/db/manager/xmlmanager.py
@@ -0,0 +1,517 @@
+# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+import boto
+from boto.utils import find_class, Password
+from boto.sdb.db.key import Key
+from boto.sdb.db.model import Model
+from datetime import datetime
+from xml.dom.minidom import getDOMImplementation, parse, parseString, Node
+
+ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
+
+class XMLConverter:
+ """
+ Responsible for converting base Python types to format compatible with underlying
+ database. For SimpleDB, that means everything needs to be converted to a string
+ when stored in SimpleDB and from a string when retrieved.
+
+ To convert a value, pass it to the encode or decode method. The encode method
+ will take a Python native value and convert to DB format. The decode method will
+ take a DB format value and convert it to Python native format. To find the appropriate
+ method to call, the generic encode/decode methods will look for the type-specific
+ method by searching for a method called "encode_<type name>" or "decode_<type name>".
+ """
+ def __init__(self, manager):
+ self.manager = manager
+ self.type_map = { bool : (self.encode_bool, self.decode_bool),
+ int : (self.encode_int, self.decode_int),
+ long : (self.encode_long, self.decode_long),
+ Model : (self.encode_reference, self.decode_reference),
+ Key : (self.encode_reference, self.decode_reference),
+ Password : (self.encode_password, self.decode_password),
+ datetime : (self.encode_datetime, self.decode_datetime)}
+
+ def get_text_value(self, parent_node):
+ value = ''
+ for node in parent_node.childNodes:
+ if node.nodeType == node.TEXT_NODE:
+ value += node.data
+ return value
+
+ def encode(self, item_type, value):
+ if item_type in self.type_map:
+ encode = self.type_map[item_type][0]
+ return encode(value)
+ return value
+
+ def decode(self, item_type, value):
+ if item_type in self.type_map:
+ decode = self.type_map[item_type][1]
+ return decode(value)
+ else:
+ value = self.get_text_value(value)
+ return value
+
+ def encode_prop(self, prop, value):
+ if isinstance(value, list):
+ if hasattr(prop, 'item_type'):
+ new_value = []
+ for v in value:
+ item_type = getattr(prop, "item_type")
+ if Model in item_type.mro():
+ item_type = Model
+ new_value.append(self.encode(item_type, v))
+ return new_value
+ else:
+ return value
+ else:
+ return self.encode(prop.data_type, value)
+
+ def decode_prop(self, prop, value):
+ if prop.data_type == list:
+ if hasattr(prop, 'item_type'):
+ item_type = getattr(prop, "item_type")
+ if Model in item_type.mro():
+ item_type = Model
+ values = []
+ for item_node in value.getElementsByTagName('item'):
+ value = self.decode(item_type, item_node)
+ values.append(value)
+ return values
+ else:
+ return self.get_text_value(value)
+ else:
+ return self.decode(prop.data_type, value)
+
+ def encode_int(self, value):
+ value = int(value)
+ return '%d' % value
+
+ def decode_int(self, value):
+ value = self.get_text_value(value)
+ if value:
+ value = int(value)
+ else:
+ value = None
+ return value
+
+ def encode_long(self, value):
+ value = long(value)
+ return '%d' % value
+
+ def decode_long(self, value):
+ value = self.get_text_value(value)
+ return long(value)
+
+ def encode_bool(self, value):
+ if value == True:
+ return 'true'
+ else:
+ return 'false'
+
+ def decode_bool(self, value):
+ value = self.get_text_value(value)
+ if value.lower() == 'true':
+ return True
+ else:
+ return False
+
+ def encode_datetime(self, value):
+ return value.strftime(ISO8601)
+
+ def decode_datetime(self, value):
+ value = self.get_text_value(value)
+ try:
+ return datetime.strptime(value, ISO8601)
+ except:
+ return None
+
+ def encode_reference(self, value):
+ if isinstance(value, str) or isinstance(value, unicode):
+ return value
+ if value == None:
+ return ''
+ else:
+ val_node = self.manager.doc.createElement("object")
+ val_node.setAttribute('id', value.id)
+ val_node.setAttribute('class', '%s.%s' % (value.__class__.__module__, value.__class__.__name__))
+ return val_node
+
+ def decode_reference(self, value):
+ if not value:
+ return None
+ try:
+ value = value.childNodes[0]
+ class_name = value.getAttribute("class")
+ id = value.getAttribute("id")
+ cls = find_class(class_name)
+ return cls.get_by_ids(id)
+ except:
+ return None
+
+ def encode_password(self, value):
+ if value and len(value) > 0:
+ return str(value)
+ else:
+ return None
+
+ def decode_password(self, value):
+ value = self.get_text_value(value)
+ return Password(value)
+
+
+class XMLManager(object):
+
+ def __init__(self, cls, db_name, db_user, db_passwd,
+ db_host, db_port, db_table, ddl_dir, enable_ssl):
+ self.cls = cls
+ if not db_name:
+ db_name = cls.__name__.lower()
+ self.db_name = db_name
+ self.db_user = db_user
+ self.db_passwd = db_passwd
+ self.db_host = db_host
+ self.db_port = db_port
+ self.db_table = db_table
+ self.ddl_dir = ddl_dir
+ self.s3 = None
+ self.converter = XMLConverter(self)
+ self.impl = getDOMImplementation()
+ self.doc = self.impl.createDocument(None, 'objects', None)
+
+ self.connection = None
+ self.enable_ssl = enable_ssl
+ self.auth_header = None
+ if self.db_user:
+ import base64
+ base64string = base64.encodestring('%s:%s' % (self.db_user, self.db_passwd))[:-1]
+ authheader = "Basic %s" % base64string
+ self.auth_header = authheader
+
+ def _connect(self):
+ if self.db_host:
+ if self.enable_ssl:
+ from httplib import HTTPSConnection as Connection
+ else:
+ from httplib import HTTPConnection as Connection
+
+ self.connection = Connection(self.db_host, self.db_port)
+
+ def _make_request(self, method, url, post_data=None, body=None):
+ """
+ Make a request on this connection
+ """
+ if not self.connection:
+ self._connect()
+ try:
+ self.connection.close()
+ except:
+ pass
+ self.connection.connect()
+ headers = {}
+ if self.auth_header:
+ headers["Authorization"] = self.auth_header
+ self.connection.request(method, url, body, headers)
+ resp = self.connection.getresponse()
+ return resp
+
+ def new_doc(self):
+ return self.impl.createDocument(None, 'objects', None)
+
+ def _object_lister(self, cls, doc):
+ for obj_node in doc.getElementsByTagName('object'):
+ if not cls:
+ class_name = obj_node.getAttribute('class')
+ cls = find_class(class_name)
+ id = obj_node.getAttribute('id')
+ obj = cls(id)
+ for prop_node in obj_node.getElementsByTagName('property'):
+ prop_name = prop_node.getAttribute('name')
+ prop = obj.find_property(prop_name)
+ if prop:
+ if hasattr(prop, 'item_type'):
+ value = self.get_list(prop_node, prop.item_type)
+ else:
+ value = self.decode_value(prop, prop_node)
+ value = prop.make_value_from_datastore(value)
+ setattr(obj, prop.name, value)
+ yield obj
+
+ def reset(self):
+ self._connect()
+
+ def get_doc(self):
+ return self.doc
+
+ def encode_value(self, prop, value):
+ return self.converter.encode_prop(prop, value)
+
+ def decode_value(self, prop, value):
+ return self.converter.decode_prop(prop, value)
+
+ def get_s3_connection(self):
+ if not self.s3:
+ self.s3 = boto.connect_s3(self.aws_access_key_id, self.aws_secret_access_key)
+ return self.s3
+
+ def get_list(self, prop_node, item_type):
+ values = []
+ try:
+ items_node = prop_node.getElementsByTagName('items')[0]
+ except:
+ return []
+ for item_node in items_node.getElementsByTagName('item'):
+ value = self.converter.decode(item_type, item_node)
+ values.append(value)
+ return values
+
+ def get_object_from_doc(self, cls, id, doc):
+ obj_node = doc.getElementsByTagName('object')[0]
+ if not cls:
+ class_name = obj_node.getAttribute('class')
+ cls = find_class(class_name)
+ if not id:
+ id = obj_node.getAttribute('id')
+ obj = cls(id)
+ for prop_node in obj_node.getElementsByTagName('property'):
+ prop_name = prop_node.getAttribute('name')
+ prop = obj.find_property(prop_name)
+ value = self.decode_value(prop, prop_node)
+ value = prop.make_value_from_datastore(value)
+ if value != None:
+ try:
+ setattr(obj, prop.name, value)
+ except:
+ pass
+ return obj
+
+ def get_props_from_doc(self, cls, id, doc):
+ """
+ Pull out the properties from this document
+ Returns the class, the properties in a hash, and the id if provided as a tuple
+ :return: (cls, props, id)
+ """
+ obj_node = doc.getElementsByTagName('object')[0]
+ if not cls:
+ class_name = obj_node.getAttribute('class')
+ cls = find_class(class_name)
+ if not id:
+ id = obj_node.getAttribute('id')
+ props = {}
+ for prop_node in obj_node.getElementsByTagName('property'):
+ prop_name = prop_node.getAttribute('name')
+ prop = cls.find_property(prop_name)
+ value = self.decode_value(prop, prop_node)
+ value = prop.make_value_from_datastore(value)
+ if value != None:
+ props[prop.name] = value
+ return (cls, props, id)
+
+
+ def get_object(self, cls, id):
+ if not self.connection:
+ self._connect()
+
+ if not self.connection:
+ raise NotImplementedError("Can't query without a database connection")
+ url = "/%s/%s" % (self.db_name, id)
+ resp = self._make_request('GET', url)
+ if resp.status == 200:
+ doc = parse(resp)
+ else:
+ raise Exception("Error: %s" % resp.status)
+ return self.get_object_from_doc(cls, id, doc)
+
+ def query(self, cls, filters, limit=None, order_by=None):
+ if not self.connection:
+ self._connect()
+
+ if not self.connection:
+ raise NotImplementedError("Can't query without a database connection")
+
+ from urllib import urlencode
+
+ query = str(self._build_query(cls, filters, limit, order_by))
+ if query:
+ url = "/%s?%s" % (self.db_name, urlencode({"query": query}))
+ else:
+ url = "/%s" % self.db_name
+ resp = self._make_request('GET', url)
+ if resp.status == 200:
+ doc = parse(resp)
+ else:
+ raise Exception("Error: %s" % resp.status)
+ return self._object_lister(cls, doc)
+
+ def _build_query(self, cls, filters, limit, order_by):
+ import types
+ if len(filters) > 4:
+ raise Exception('Too many filters, max is 4')
+ parts = []
+ properties = cls.properties(hidden=False)
+ for filter, value in filters:
+ name, op = filter.strip().split()
+ found = False
+ for property in properties:
+ if property.name == name:
+ found = True
+ if types.TypeType(value) == types.ListType:
+ filter_parts = []
+ for val in value:
+ val = self.encode_value(property, val)
+ filter_parts.append("'%s' %s '%s'" % (name, op, val))
+ parts.append("[%s]" % " OR ".join(filter_parts))
+ else:
+ value = self.encode_value(property, value)
+ parts.append("['%s' %s '%s']" % (name, op, value))
+ if not found:
+ raise Exception('%s is not a valid field' % name)
+ if order_by:
+ if order_by.startswith("-"):
+ key = order_by[1:]
+ type = "desc"
+ else:
+ key = order_by
+ type = "asc"
+ parts.append("['%s' starts-with ''] sort '%s' %s" % (key, key, type))
+ return ' intersection '.join(parts)
+
+ def query_gql(self, query_string, *args, **kwds):
+ raise NotImplementedError, "GQL queries not supported in XML"
+
+ def save_list(self, doc, items, prop_node):
+ items_node = doc.createElement('items')
+ prop_node.appendChild(items_node)
+ for item in items:
+ item_node = doc.createElement('item')
+ items_node.appendChild(item_node)
+ if isinstance(item, Node):
+ item_node.appendChild(item)
+ else:
+ text_node = doc.createTextNode(item)
+ item_node.appendChild(text_node)
+
+ def save_object(self, obj):
+ """
+ Marshal the object and do a PUT
+ """
+ doc = self.marshal_object(obj)
+ if obj.id:
+ url = "/%s/%s" % (self.db_name, obj.id)
+ else:
+ url = "/%s" % (self.db_name)
+ resp = self._make_request("PUT", url, body=doc.toxml())
+ new_obj = self.get_object_from_doc(obj.__class__, None, parse(resp))
+ obj.id = new_obj.id
+ for prop in obj.properties():
+ try:
+ propname = prop.name
+ except AttributeError:
+ propname = None
+ if propname:
+ value = getattr(new_obj, prop.name)
+ if value:
+ setattr(obj, prop.name, value)
+ return obj
+
+
+ def marshal_object(self, obj, doc=None):
+ if not doc:
+ doc = self.new_doc()
+ if not doc:
+ doc = self.doc
+ obj_node = doc.createElement('object')
+
+ if obj.id:
+ obj_node.setAttribute('id', obj.id)
+
+ obj_node.setAttribute('class', '%s.%s' % (obj.__class__.__module__,
+ obj.__class__.__name__))
+ root = doc.documentElement
+ root.appendChild(obj_node)
+ for property in obj.properties(hidden=False):
+ prop_node = doc.createElement('property')
+ prop_node.setAttribute('name', property.name)
+ prop_node.setAttribute('type', property.type_name)
+ value = property.get_value_for_datastore(obj)
+ if value is not None:
+ value = self.encode_value(property, value)
+ if isinstance(value, list):
+ self.save_list(doc, value, prop_node)
+ elif isinstance(value, Node):
+ prop_node.appendChild(value)
+ else:
+ text_node = doc.createTextNode(unicode(value).encode("ascii", "ignore"))
+ prop_node.appendChild(text_node)
+ obj_node.appendChild(prop_node)
+
+ return doc
+
+ def unmarshal_object(self, fp, cls=None, id=None):
+ if isinstance(fp, str) or isinstance(fp, unicode):
+ doc = parseString(fp)
+ else:
+ doc = parse(fp)
+ return self.get_object_from_doc(cls, id, doc)
+
+ def unmarshal_props(self, fp, cls=None, id=None):
+ """
+ Same as unmarshalling an object, except it returns
+ from "get_props_from_doc"
+ """
+ if isinstance(fp, str) or isinstance(fp, unicode):
+ doc = parseString(fp)
+ else:
+ doc = parse(fp)
+ return self.get_props_from_doc(cls, id, doc)
+
+ def delete_object(self, obj):
+ url = "/%s/%s" % (self.db_name, obj.id)
+ return self._make_request("DELETE", url)
+
+ def set_key_value(self, obj, name, value):
+ self.domain.put_attributes(obj.id, {name : value}, replace=True)
+
+ def delete_key_value(self, obj, name):
+ self.domain.delete_attributes(obj.id, name)
+
+ def get_key_value(self, obj, name):
+ a = self.domain.get_attributes(obj.id, name)
+ if a.has_key(name):
+ return a[name]
+ else:
+ return None
+
+ def get_raw_item(self, obj):
+ return self.domain.get_item(obj.id)
+
+ def set_property(self, prop, obj, name, value):
+ pass
+
+ def get_property(self, prop, obj, name):
+ pass
+
+ def load_object(self, obj):
+ if not obj._loaded:
+ obj = obj.get_by_id(obj.id)
+ obj._loaded = True
+ return obj
+
diff --git a/boto/sdb/db/model.py b/boto/sdb/db/model.py
new file mode 100644
index 0000000..18bec4b
--- /dev/null
+++ b/boto/sdb/db/model.py
@@ -0,0 +1,248 @@
+# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.sdb.db.manager import get_manager
+from boto.sdb.db.property import Property
+from boto.sdb.db.key import Key
+from boto.sdb.db.query import Query
+import boto
+
+class ModelMeta(type):
+ "Metaclass for all Models"
+
+ def __init__(cls, name, bases, dict):
+ super(ModelMeta, cls).__init__(name, bases, dict)
+ # Make sure this is a subclass of Model - mainly copied from django ModelBase (thanks!)
+ cls.__sub_classes__ = []
+ try:
+ if filter(lambda b: issubclass(b, Model), bases):
+ for base in bases:
+ base.__sub_classes__.append(cls)
+ cls._manager = get_manager(cls)
+ # look for all of the Properties and set their names
+ for key in dict.keys():
+ if isinstance(dict[key], Property):
+ property = dict[key]
+ property.__property_config__(cls, key)
+ prop_names = []
+ props = cls.properties()
+ for prop in props:
+ if not prop.__class__.__name__.startswith('_'):
+ prop_names.append(prop.name)
+ setattr(cls, '_prop_names', prop_names)
+ except NameError:
+ # 'Model' isn't defined yet, meaning we're looking at our own
+ # Model class, defined below.
+ pass
+
+class Model(object):
+ __metaclass__ = ModelMeta
+ __consistent__ = False # Consistent is set off by default
+ id = None
+
+ @classmethod
+ def get_lineage(cls):
+ l = [c.__name__ for c in cls.mro()]
+ l.reverse()
+ return '.'.join(l)
+
+ @classmethod
+ def kind(cls):
+ return cls.__name__
+
+ @classmethod
+ def _get_by_id(cls, id, manager=None):
+ if not manager:
+ manager = cls._manager
+ return manager.get_object(cls, id)
+
+ @classmethod
+ def get_by_id(cls, ids=None, parent=None):
+ if isinstance(ids, list):
+ objs = [cls._get_by_id(id) for id in ids]
+ return objs
+ else:
+ return cls._get_by_id(ids)
+
+ get_by_ids = get_by_id
+
+ @classmethod
+ def get_by_key_name(cls, key_names, parent=None):
+ raise NotImplementedError, "Key Names are not currently supported"
+
+ @classmethod
+ def find(cls, limit=None, next_token=None, **params):
+ q = Query(cls, limit=limit, next_token=next_token)
+ for key, value in params.items():
+ q.filter('%s =' % key, value)
+ return q
+
+ @classmethod
+ def all(cls, limit=None, next_token=None):
+ return cls.find(limit=limit, next_token=next_token)
+
+ @classmethod
+ def get_or_insert(key_name, **kw):
+ raise NotImplementedError, "get_or_insert not currently supported"
+
+ @classmethod
+ def properties(cls, hidden=True):
+ properties = []
+ while cls:
+ for key in cls.__dict__.keys():
+ prop = cls.__dict__[key]
+ if isinstance(prop, Property):
+ if hidden or not prop.__class__.__name__.startswith('_'):
+ properties.append(prop)
+ if len(cls.__bases__) > 0:
+ cls = cls.__bases__[0]
+ else:
+ cls = None
+ return properties
+
+ @classmethod
+ def find_property(cls, prop_name):
+ property = None
+ while cls:
+ for key in cls.__dict__.keys():
+ prop = cls.__dict__[key]
+ if isinstance(prop, Property):
+ if not prop.__class__.__name__.startswith('_') and prop_name == prop.name:
+ property = prop
+ if len(cls.__bases__) > 0:
+ cls = cls.__bases__[0]
+ else:
+ cls = None
+ return property
+
+ @classmethod
+ def get_xmlmanager(cls):
+ if not hasattr(cls, '_xmlmanager'):
+ from boto.sdb.db.manager.xmlmanager import XMLManager
+ cls._xmlmanager = XMLManager(cls, None, None, None,
+ None, None, None, None, False)
+ return cls._xmlmanager
+
+ @classmethod
+ def from_xml(cls, fp):
+ xmlmanager = cls.get_xmlmanager()
+ return xmlmanager.unmarshal_object(fp)
+
+ def __init__(self, id=None, **kw):
+ self._loaded = False
+ # first try to initialize all properties to their default values
+ for prop in self.properties(hidden=False):
+ try:
+ setattr(self, prop.name, prop.default_value())
+ except ValueError:
+ pass
+ if kw.has_key('manager'):
+ self._manager = kw['manager']
+ self.id = id
+ for key in kw:
+ if key != 'manager':
+ # We don't want any errors populating up when loading an object,
+ # so if it fails we just revert to it's default value
+ try:
+ setattr(self, key, kw[key])
+ except Exception, e:
+ boto.log.exception(e)
+
+ def __repr__(self):
+ return '%s<%s>' % (self.__class__.__name__, self.id)
+
+ def __str__(self):
+ return str(self.id)
+
+ def __eq__(self, other):
+ return other and isinstance(other, Model) and self.id == other.id
+
+ def _get_raw_item(self):
+ return self._manager.get_raw_item(self)
+
+ def load(self):
+ if self.id and not self._loaded:
+ self._manager.load_object(self)
+
+ def reload(self):
+ if self.id:
+ self._loaded = False
+ self._manager.load_object(self)
+
+ def put(self):
+ self._manager.save_object(self)
+
+ save = put
+
+ def delete(self):
+ self._manager.delete_object(self)
+
+ def key(self):
+ return Key(obj=self)
+
+ def set_manager(self, manager):
+ self._manager = manager
+
+ def to_dict(self):
+ props = {}
+ for prop in self.properties(hidden=False):
+ props[prop.name] = getattr(self, prop.name)
+ obj = {'properties' : props,
+ 'id' : self.id}
+ return {self.__class__.__name__ : obj}
+
+ def to_xml(self, doc=None):
+ xmlmanager = self.get_xmlmanager()
+ doc = xmlmanager.marshal_object(self, doc)
+ return doc
+
+ @classmethod
+ def find_subclass(cls, name):
+ """Find a subclass with a given name"""
+ if name == cls.__name__:
+ return cls
+ for sc in cls.__sub_classes__:
+ r = sc.find_subclass(name)
+ if r != None:
+ return r
+
+class Expando(Model):
+
+ def __setattr__(self, name, value):
+ if name in self._prop_names:
+ object.__setattr__(self, name, value)
+ elif name.startswith('_'):
+ object.__setattr__(self, name, value)
+ elif name == 'id':
+ object.__setattr__(self, name, value)
+ else:
+ self._manager.set_key_value(self, name, value)
+ object.__setattr__(self, name, value)
+
+ def __getattr__(self, name):
+ if not name.startswith('_'):
+ value = self._manager.get_key_value(self, name)
+ if value:
+ object.__setattr__(self, name, value)
+ return value
+ raise AttributeError
+
+
diff --git a/boto/sdb/db/property.py b/boto/sdb/db/property.py
new file mode 100644
index 0000000..ab4f7a8
--- /dev/null
+++ b/boto/sdb/db/property.py
@@ -0,0 +1,608 @@
+# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import datetime
+from key import Key
+from boto.utils import Password
+from boto.sdb.db.query import Query
+import re
+import boto
+import boto.s3.key
+from boto.sdb.db.blob import Blob
+
+class Property(object):
+
+ data_type = str
+ type_name = ''
+ name = ''
+ verbose_name = ''
+
+ def __init__(self, verbose_name=None, name=None, default=None, required=False,
+ validator=None, choices=None, unique=False):
+ self.verbose_name = verbose_name
+ self.name = name
+ self.default = default
+ self.required = required
+ self.validator = validator
+ self.choices = choices
+ if self.name:
+ self.slot_name = '_' + self.name
+ else:
+ self.slot_name = '_'
+ self.unique = unique
+
+ def __get__(self, obj, objtype):
+ if obj:
+ obj.load()
+ return getattr(obj, self.slot_name)
+ else:
+ return None
+
+ def __set__(self, obj, value):
+ self.validate(value)
+
+ # Fire off any on_set functions
+ try:
+ if obj._loaded and hasattr(obj, "on_set_%s" % self.name):
+ fnc = getattr(obj, "on_set_%s" % self.name)
+ value = fnc(value)
+ except Exception:
+ boto.log.exception("Exception running on_set_%s" % self.name)
+
+ setattr(obj, self.slot_name, value)
+
+ def __property_config__(self, model_class, property_name):
+ self.model_class = model_class
+ self.name = property_name
+ self.slot_name = '_' + self.name
+
+ def default_validator(self, value):
+ if value == self.default_value():
+ return
+ if not isinstance(value, self.data_type):
+ raise TypeError, 'Validation Error, expecting %s, got %s' % (self.data_type, type(value))
+
+ def default_value(self):
+ return self.default
+
+ def validate(self, value):
+ if self.required and value==None:
+ raise ValueError, '%s is a required property' % self.name
+ if self.choices and value and not value in self.choices:
+ raise ValueError, '%s not a valid choice for %s.%s' % (value, self.model_class.__name__, self.name)
+ if self.validator:
+ self.validator(value)
+ else:
+ self.default_validator(value)
+ return value
+
+ def empty(self, value):
+ return not value
+
+ def get_value_for_datastore(self, model_instance):
+ return getattr(model_instance, self.name)
+
+ def make_value_from_datastore(self, value):
+ return value
+
+ def get_choices(self):
+ if callable(self.choices):
+ return self.choices()
+ return self.choices
+
+def validate_string(value):
+ if value == None:
+ return
+ elif isinstance(value, str) or isinstance(value, unicode):
+ if len(value) > 1024:
+ raise ValueError, 'Length of value greater than maxlength'
+ else:
+ raise TypeError, 'Expecting String, got %s' % type(value)
+
+class StringProperty(Property):
+
+ type_name = 'String'
+
+ def __init__(self, verbose_name=None, name=None, default='', required=False,
+ validator=validate_string, choices=None, unique=False):
+ Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
+
+class TextProperty(Property):
+
+ type_name = 'Text'
+
+ def __init__(self, verbose_name=None, name=None, default='', required=False,
+ validator=None, choices=None, unique=False, max_length=None):
+ Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
+ self.max_length = max_length
+
+ def validate(self, value):
+ if not isinstance(value, str) and not isinstance(value, unicode):
+ raise TypeError, 'Expecting Text, got %s' % type(value)
+ if self.max_length and len(value) > self.max_length:
+ raise ValueError, 'Length of value greater than maxlength %s' % self.max_length
+
+class PasswordProperty(StringProperty):
+ """
+ Hashed property who's original value can not be
+ retrieved, but still can be compaired.
+ """
+ data_type = Password
+ type_name = 'Password'
+
+ def __init__(self, verbose_name=None, name=None, default='', required=False,
+ validator=None, choices=None, unique=False):
+ StringProperty.__init__(self, verbose_name, name, default, required, validator, choices, unique)
+
+ def make_value_from_datastore(self, value):
+ p = Password(value)
+ return p
+
+ def get_value_for_datastore(self, model_instance):
+ value = StringProperty.get_value_for_datastore(self, model_instance)
+ if value and len(value):
+ return str(value)
+ else:
+ return None
+
+ def __set__(self, obj, value):
+ if not isinstance(value, Password):
+ p = Password()
+ p.set(value)
+ value = p
+ Property.__set__(self, obj, value)
+
+ def __get__(self, obj, objtype):
+ return Password(StringProperty.__get__(self, obj, objtype))
+
+ def validate(self, value):
+ value = Property.validate(self, value)
+ if isinstance(value, Password):
+ if len(value) > 1024:
+ raise ValueError, 'Length of value greater than maxlength'
+ else:
+ raise TypeError, 'Expecting Password, got %s' % type(value)
+
+class BlobProperty(Property):
+ data_type = Blob
+ type_name = "blob"
+
+ def __set__(self, obj, value):
+ if value != self.default_value():
+ if not isinstance(value, Blob):
+ oldb = self.__get__(obj, type(obj))
+ id = None
+ if oldb:
+ id = oldb.id
+ b = Blob(value=value, id=id)
+ value = b
+ Property.__set__(self, obj, value)
+
+class S3KeyProperty(Property):
+
+ data_type = boto.s3.key.Key
+ type_name = 'S3Key'
+ validate_regex = "^s3:\/\/([^\/]*)\/(.*)$"
+
+ def __init__(self, verbose_name=None, name=None, default=None,
+ required=False, validator=None, choices=None, unique=False):
+ Property.__init__(self, verbose_name, name, default, required,
+ validator, choices, unique)
+
+ def validate(self, value):
+ if value == self.default_value() or value == str(self.default_value()):
+ return self.default_value()
+ if isinstance(value, self.data_type):
+ return
+ match = re.match(self.validate_regex, value)
+ if match:
+ return
+ raise TypeError, 'Validation Error, expecting %s, got %s' % (self.data_type, type(value))
+
+ def __get__(self, obj, objtype):
+ value = Property.__get__(self, obj, objtype)
+ if value:
+ if isinstance(value, self.data_type):
+ return value
+ match = re.match(self.validate_regex, value)
+ if match:
+ s3 = obj._manager.get_s3_connection()
+ bucket = s3.get_bucket(match.group(1), validate=False)
+ k = bucket.get_key(match.group(2))
+ if not k:
+ k = bucket.new_key(match.group(2))
+ k.set_contents_from_string("")
+ return k
+ else:
+ return value
+
+ def get_value_for_datastore(self, model_instance):
+ value = Property.get_value_for_datastore(self, model_instance)
+ if value:
+ return "s3://%s/%s" % (value.bucket.name, value.name)
+ else:
+ return None
+
+class IntegerProperty(Property):
+
+ data_type = int
+ type_name = 'Integer'
+
+ def __init__(self, verbose_name=None, name=None, default=0, required=False,
+ validator=None, choices=None, unique=False, max=2147483647, min=-2147483648):
+ Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
+ self.max = max
+ self.min = min
+
+ def validate(self, value):
+ value = int(value)
+ value = Property.validate(self, value)
+ if value > self.max:
+ raise ValueError, 'Maximum value is %d' % self.max
+ if value < self.min:
+ raise ValueError, 'Minimum value is %d' % self.min
+ return value
+
+ def empty(self, value):
+ return value is None
+
+ def __set__(self, obj, value):
+ if value == "" or value == None:
+ value = 0
+ return Property.__set__(self, obj, value)
+
+
+
+class LongProperty(Property):
+
+ data_type = long
+ type_name = 'Long'
+
+ def __init__(self, verbose_name=None, name=None, default=0, required=False,
+ validator=None, choices=None, unique=False):
+ Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
+
+ def validate(self, value):
+ value = long(value)
+ value = Property.validate(self, value)
+ min = -9223372036854775808
+ max = 9223372036854775807
+ if value > max:
+ raise ValueError, 'Maximum value is %d' % max
+ if value < min:
+ raise ValueError, 'Minimum value is %d' % min
+ return value
+
+ def empty(self, value):
+ return value is None
+
+class BooleanProperty(Property):
+
+ data_type = bool
+ type_name = 'Boolean'
+
+ def __init__(self, verbose_name=None, name=None, default=False, required=False,
+ validator=None, choices=None, unique=False):
+ Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
+
+ def empty(self, value):
+ return value is None
+
+class FloatProperty(Property):
+
+ data_type = float
+ type_name = 'Float'
+
+ def __init__(self, verbose_name=None, name=None, default=0.0, required=False,
+ validator=None, choices=None, unique=False):
+ Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
+
+ def validate(self, value):
+ value = float(value)
+ value = Property.validate(self, value)
+ return value
+
+ def empty(self, value):
+ return value is None
+
+class DateTimeProperty(Property):
+
+ data_type = datetime.datetime
+ type_name = 'DateTime'
+
+ def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False, name=None,
+ default=None, required=False, validator=None, choices=None, unique=False):
+ Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
+ self.auto_now = auto_now
+ self.auto_now_add = auto_now_add
+
+ def default_value(self):
+ if self.auto_now or self.auto_now_add:
+ return self.now()
+ return Property.default_value(self)
+
+ def validate(self, value):
+ if value == None:
+ return
+ if not isinstance(value, self.data_type):
+ raise TypeError, 'Validation Error, expecting %s, got %s' % (self.data_type, type(value))
+
+ def get_value_for_datastore(self, model_instance):
+ if self.auto_now:
+ setattr(model_instance, self.name, self.now())
+ return Property.get_value_for_datastore(self, model_instance)
+
+ def now(self):
+ return datetime.datetime.utcnow()
+
+class DateProperty(Property):
+
+ data_type = datetime.date
+ type_name = 'Date'
+
+ def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False, name=None,
+ default=None, required=False, validator=None, choices=None, unique=False):
+ Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
+ self.auto_now = auto_now
+ self.auto_now_add = auto_now_add
+
+ def default_value(self):
+ if self.auto_now or self.auto_now_add:
+ return self.now()
+ return Property.default_value(self)
+
+ def validate(self, value):
+ if value == None:
+ return
+ if not isinstance(value, self.data_type):
+ raise TypeError, 'Validation Error, expecting %s, got %s' % (self.data_type, type(value))
+
+ def get_value_for_datastore(self, model_instance):
+ if self.auto_now:
+ setattr(model_instance, self.name, self.now())
+ val = Property.get_value_for_datastore(self, model_instance)
+ if isinstance(val, datetime.datetime):
+ val = val.date()
+ return val
+
+ def now(self):
+ return datetime.date.today()
+
+class ReferenceProperty(Property):
+
+ data_type = Key
+ type_name = 'Reference'
+
+ def __init__(self, reference_class=None, collection_name=None,
+ verbose_name=None, name=None, default=None, required=False, validator=None, choices=None, unique=False):
+ Property.__init__(self, verbose_name, name, default, required, validator, choices, unique)
+ self.reference_class = reference_class
+ self.collection_name = collection_name
+
+ def __get__(self, obj, objtype):
+ if obj:
+ value = getattr(obj, self.slot_name)
+ if value == self.default_value():
+ return value
+ # If the value is still the UUID for the referenced object, we need to create
+ # the object now that is the attribute has actually been accessed. This lazy
+ # instantiation saves unnecessary roundtrips to SimpleDB
+ if isinstance(value, str) or isinstance(value, unicode):
+ value = self.reference_class(value)
+ setattr(obj, self.name, value)
+ return value
+
+ def __set__(self, obj, value):
+ """Don't allow this object to be associated to itself
+ This causes bad things to happen"""
+ if value != None and (obj.id == value or (hasattr(value, "id") and obj.id == value.id)):
+ raise ValueError, "Can not associate an object with itself!"
+ return super(ReferenceProperty, self).__set__(obj,value)
+
+ def __property_config__(self, model_class, property_name):
+ Property.__property_config__(self, model_class, property_name)
+ if self.collection_name is None:
+ self.collection_name = '%s_%s_set' % (model_class.__name__.lower(), self.name)
+ if hasattr(self.reference_class, self.collection_name):
+ raise ValueError, 'duplicate property: %s' % self.collection_name
+ setattr(self.reference_class, self.collection_name,
+ _ReverseReferenceProperty(model_class, property_name, self.collection_name))
+
+ def check_uuid(self, value):
+ # This does a bit of hand waving to "type check" the string
+ t = value.split('-')
+ if len(t) != 5:
+ raise ValueError
+
+ def check_instance(self, value):
+ try:
+ obj_lineage = value.get_lineage()
+ cls_lineage = self.reference_class.get_lineage()
+ if obj_lineage.startswith(cls_lineage):
+ return
+ raise TypeError, '%s not instance of %s' % (obj_lineage, cls_lineage)
+ except:
+ raise ValueError, '%s is not a Model' % value
+
+ def validate(self, value):
+ if self.required and value==None:
+ raise ValueError, '%s is a required property' % self.name
+ if value == self.default_value():
+ return
+ if not isinstance(value, str) and not isinstance(value, unicode):
+ self.check_instance(value)
+
+class _ReverseReferenceProperty(Property):
+ data_type = Query
+ type_name = 'query'
+
+ def __init__(self, model, prop, name):
+ self.__model = model
+ self.__property = prop
+ self.collection_name = prop
+ self.name = name
+ self.item_type = model
+
+ def __get__(self, model_instance, model_class):
+ """Fetches collection of model instances of this collection property."""
+ if model_instance is not None:
+ query = Query(self.__model)
+ if type(self.__property) == list:
+ props = []
+ for prop in self.__property:
+ props.append("%s =" % prop)
+ return query.filter(props, model_instance)
+ else:
+ return query.filter(self.__property + ' =', model_instance)
+ else:
+ return self
+
+ def __set__(self, model_instance, value):
+ """Not possible to set a new collection."""
+ raise ValueError, 'Virtual property is read-only'
+
+
+class CalculatedProperty(Property):
+
+ def __init__(self, verbose_name=None, name=None, default=None,
+ required=False, validator=None, choices=None,
+ calculated_type=int, unique=False, use_method=False):
+ Property.__init__(self, verbose_name, name, default, required,
+ validator, choices, unique)
+ self.calculated_type = calculated_type
+ self.use_method = use_method
+
+ def __get__(self, obj, objtype):
+ value = self.default_value()
+ if obj:
+ try:
+ value = getattr(obj, self.slot_name)
+ if self.use_method:
+ value = value()
+ except AttributeError:
+ pass
+ return value
+
+ def __set__(self, obj, value):
+ """Not possible to set a new AutoID."""
+ pass
+
+ def _set_direct(self, obj, value):
+ if not self.use_method:
+ setattr(obj, self.slot_name, value)
+
+ def get_value_for_datastore(self, model_instance):
+ if self.calculated_type in [str, int, bool]:
+ value = self.__get__(model_instance, model_instance.__class__)
+ return value
+ else:
+ return None
+
+class ListProperty(Property):
+
+ data_type = list
+ type_name = 'List'
+
+ def __init__(self, item_type, verbose_name=None, name=None, default=None, **kwds):
+ if default is None:
+ default = []
+ self.item_type = item_type
+ Property.__init__(self, verbose_name, name, default=default, required=True, **kwds)
+
+ def validate(self, value):
+ if value is not None:
+ if not isinstance(value, list):
+ value = [value]
+
+ if self.item_type in (int, long):
+ item_type = (int, long)
+ elif self.item_type in (str, unicode):
+ item_type = (str, unicode)
+ else:
+ item_type = self.item_type
+
+ for item in value:
+ if not isinstance(item, item_type):
+ if item_type == (int, long):
+ raise ValueError, 'Items in the %s list must all be integers.' % self.name
+ else:
+ raise ValueError('Items in the %s list must all be %s instances' %
+ (self.name, self.item_type.__name__))
+ return value
+
+ def empty(self, value):
+ return value is None
+
+ def default_value(self):
+ return list(super(ListProperty, self).default_value())
+
+ def __set__(self, obj, value):
+ """Override the set method to allow them to set the property to an instance of the item_type instead of requiring a list to be passed in"""
+ if self.item_type in (int, long):
+ item_type = (int, long)
+ elif self.item_type in (str, unicode):
+ item_type = (str, unicode)
+ else:
+ item_type = self.item_type
+ if isinstance(value, item_type):
+ value = [value]
+ elif value == None: # Override to allow them to set this to "None" to remove everything
+ value = []
+ return super(ListProperty, self).__set__(obj,value)
+
+
+class MapProperty(Property):
+
+ data_type = dict
+ type_name = 'Map'
+
+ def __init__(self, item_type=str, verbose_name=None, name=None, default=None, **kwds):
+ if default is None:
+ default = {}
+ self.item_type = item_type
+ Property.__init__(self, verbose_name, name, default=default, required=True, **kwds)
+
+ def validate(self, value):
+ if value is not None:
+ if not isinstance(value, dict):
+ raise ValueError, 'Value must of type dict'
+
+ if self.item_type in (int, long):
+ item_type = (int, long)
+ elif self.item_type in (str, unicode):
+ item_type = (str, unicode)
+ else:
+ item_type = self.item_type
+
+ for key in value:
+ if not isinstance(value[key], item_type):
+ if item_type == (int, long):
+ raise ValueError, 'Values in the %s Map must all be integers.' % self.name
+ else:
+ raise ValueError('Values in the %s Map must all be %s instances' %
+ (self.name, self.item_type.__name__))
+ return value
+
+ def empty(self, value):
+ return value is None
+
+ def default_value(self):
+ return {}
diff --git a/boto/sdb/db/query.py b/boto/sdb/db/query.py
new file mode 100644
index 0000000..31b71aa
--- /dev/null
+++ b/boto/sdb/db/query.py
@@ -0,0 +1,85 @@
+# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+class Query(object):
+ __local_iter__ = None
+ def __init__(self, model_class, limit=None, next_token=None, manager=None):
+ self.model_class = model_class
+ self.limit = limit
+ self.offset = 0
+ if manager:
+ self.manager = manager
+ else:
+ self.manager = self.model_class._manager
+ self.filters = []
+ self.select = None
+ self.sort_by = None
+ self.rs = None
+ self.next_token = next_token
+
+ def __iter__(self):
+ return iter(self.manager.query(self))
+
+ def next(self):
+ if self.__local_iter__ == None:
+ self.__local_iter__ = self.__iter__()
+ return self.__local_iter__.next()
+
+ def filter(self, property_operator, value):
+ self.filters.append((property_operator, value))
+ return self
+
+ def fetch(self, limit, offset=0):
+ """Not currently fully supported, but we can use this
+ to allow them to set a limit in a chainable method"""
+ self.limit = limit
+ self.offset = offset
+ return self
+
+ def count(self, quick=True):
+ return self.manager.count(self.model_class, self.filters, quick, self.sort_by, self.select)
+
+ def get_query(self):
+ return self.manager._build_filter_part(self.model_class, self.filters, self.sort_by, self.select)
+
+ def order(self, key):
+ self.sort_by = key
+ return self
+
+ def to_xml(self, doc=None):
+ if not doc:
+ xmlmanager = self.model_class.get_xmlmanager()
+ doc = xmlmanager.new_doc()
+ for obj in self:
+ obj.to_xml(doc)
+ return doc
+
+ def get_next_token(self):
+ if self.rs:
+ return self.rs.next_token
+ if self._next_token:
+ return self._next_token
+ return None
+
+ def set_next_token(self, token):
+ self._next_token = token
+
+ next_token = property(get_next_token, set_next_token)
diff --git a/boto/sdb/db/sequence.py b/boto/sdb/db/sequence.py
new file mode 100644
index 0000000..be79c56
--- /dev/null
+++ b/boto/sdb/db/sequence.py
@@ -0,0 +1,224 @@
+# Copyright (c) 2010 Chris Moyer http://coredumped.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.exception import SDBResponseError
+
+class SequenceGenerator(object):
+ """Generic Sequence Generator object, this takes a single
+ string as the "sequence" and uses that to figure out
+ what the next value in a string is. For example
+ if you give "ABC" and pass in "A" it will give you "B",
+ and if you give it "C" it will give you "AA".
+
+ If you set "rollover" to True in the above example, passing
+ in "C" would give you "A" again.
+
+ The Sequence string can be a string or any iterable
+ that has the "index" function and is indexable.
+ """
+ __name__ = "SequenceGenerator"
+
+ def __init__(self, sequence_string, rollover=False):
+ """Create a new SequenceGenerator using the sequence_string
+ as how to generate the next item.
+
+ :param sequence_string: The string or list that explains
+ how to generate the next item in the sequence
+ :type sequence_string: str,iterable
+
+ :param rollover: Rollover instead of incrementing when
+ we hit the end of the sequence
+ :type rollover: bool
+ """
+ self.sequence_string = sequence_string
+ self.sequence_length = len(sequence_string[0])
+ self.rollover = rollover
+ self.last_item = sequence_string[-1]
+ self.__name__ = "%s('%s')" % (self.__class__.__name__, sequence_string)
+
+ def __call__(self, val, last=None):
+ """Get the next value in the sequence"""
+ # If they pass us in a string that's not at least
+ # the lenght of our sequence, then return the
+ # first element in our sequence
+ if val == None or len(val) < self.sequence_length:
+ return self.sequence_string[0]
+ last_value = val[-self.sequence_length:]
+ if (not self.rollover) and (last_value == self.last_item):
+ val = "%s%s" % (self(val[:-self.sequence_length]), self._inc(last_value))
+ else:
+ val = "%s%s" % (val[:-self.sequence_length], self._inc(last_value))
+ return val
+
+ def _inc(self, val):
+ """Increment a single value"""
+ assert(len(val) == self.sequence_length)
+ return self.sequence_string[(self.sequence_string.index(val)+1) % len(self.sequence_string)]
+
+
+
+#
+# Simple Sequence Functions
+#
+def increment_by_one(cv=None, lv=None):
+ if cv == None:
+ return 0
+ return cv + 1
+
+def double(cv=None, lv=None):
+ if cv == None:
+ return 1
+ return cv * 2
+
+def fib(cv=1, lv=0):
+ """The fibonacci sequence, this incrementer uses the
+ last value"""
+ if cv == None:
+ cv = 1
+ if lv == None:
+ lv = 0
+ return cv + lv
+
+increment_string = SequenceGenerator("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
+
+
+
+class Sequence(object):
+ """A simple Sequence using the new SDB "Consistent" features
+ Based largly off of the "Counter" example from mitch garnaat:
+ http://bitbucket.org/mitch/stupidbototricks/src/tip/counter.py"""
+
+
+ def __init__(self, id=None, domain_name=None, fnc=increment_by_one, init_val=None):
+ """Create a new Sequence, using an optional function to
+ increment to the next number, by default we just increment by one.
+ Every parameter here is optional, if you don't specify any options
+ then you'll get a new SequenceGenerator with a random ID stored in the
+ default domain that increments by one and uses the default botoweb
+ environment
+
+ :param id: Optional ID (name) for this counter
+ :type id: str
+
+ :param domain_name: Optional domain name to use, by default we get this out of the
+ environment configuration
+ :type domain_name:str
+
+ :param fnc: Optional function to use for the incrementation, by default we just increment by one
+ There are several functions defined in this module.
+ Your function must accept "None" to get the initial value
+ :type fnc: function, str
+
+ :param init_val: Initial value, by default this is the first element in your sequence,
+ but you can pass in any value, even a string if you pass in a function that uses
+ strings instead of ints to increment
+ """
+ self._db = None
+ self._value = None
+ self.last_value = None
+ self.domain_name = domain_name
+ self.id = id
+ if self.id == None:
+ import uuid
+ self.id = str(uuid.uuid4())
+ if init_val == None:
+ init_val = fnc(init_val)
+ self.val = init_val
+
+ self.item_type = type(fnc(None))
+ self.timestamp = None
+ # Allow us to pass in a full name to a function
+ if type(fnc) == str:
+ from boto.utils import find_class
+ fnc = find_class(fnc)
+ self.fnc = fnc
+
+ def set(self, val):
+ """Set the value"""
+ import time
+ now = time.time()
+ expected_value = []
+ new_val = {}
+ new_val['timestamp'] = now
+ if self._value != None:
+ new_val['last_value'] = self._value
+ expected_value = ['current_value', str(self._value)]
+ new_val['current_value'] = val
+ try:
+ self.db.put_attributes(self.id, new_val, expected_value=expected_value)
+ self.timestamp = new_val['timestamp']
+ except SDBResponseError, e:
+ if e.status == 409:
+ raise ValueError, "Sequence out of sync"
+ else:
+ raise
+
+
+ def get(self):
+ """Get the value"""
+ val = self.db.get_attributes(self.id, consistent_read=True)
+ if val and val.has_key('timestamp'):
+ self.timestamp = val['timestamp']
+ if val and val.has_key('current_value'):
+ self._value = self.item_type(val['current_value'])
+ if val.has_key("last_value") and val['last_value'] != None:
+ self.last_value = self.item_type(val['last_value'])
+ return self._value
+
+ val = property(get, set)
+
+ def __repr__(self):
+ return "%s('%s', '%s', '%s.%s', '%s')" % (
+ self.__class__.__name__,
+ self.id,
+ self.domain_name,
+ self.fnc.__module__, self.fnc.__name__,
+ self.val)
+
+
+ def _connect(self):
+ """Connect to our domain"""
+ if not self._db:
+ if not self.domain_name:
+ import boto
+ sdb = boto.connect_sdb()
+ self.domain_name = boto.config.get("DB", "sequence_db", boto.config.get("DB", "db_name", "default"))
+ try:
+ self._db = sdb.get_domain(self.domain_name)
+ except SDBResponseError, e:
+ if e.status == 400:
+ self._db = sdb.create_domain(self.domain_name)
+ else:
+ raise
+ return self._db
+
+ db = property(_connect)
+
+ def next(self):
+ self.val = self.fnc(self.val, self.last_value)
+ return self.val
+
+ def delete(self):
+ """Remove this sequence"""
+ self.db.delete_attributes(self.id)
+
+ def __del__(self):
+ self.delete()
diff --git a/boto/sdb/db/test_db.py b/boto/sdb/db/test_db.py
new file mode 100644
index 0000000..0c345ab
--- /dev/null
+++ b/boto/sdb/db/test_db.py
@@ -0,0 +1,225 @@
+from boto.sdb.db.model import Model
+from boto.sdb.db.property import StringProperty, IntegerProperty, BooleanProperty
+from boto.sdb.db.property import DateTimeProperty, FloatProperty, ReferenceProperty
+from boto.sdb.db.property import PasswordProperty, ListProperty, MapProperty
+from datetime import datetime
+import time
+from boto.exception import SDBPersistenceError
+
+_objects = {}
+
+#
+# This will eventually be moved to the boto.tests module and become a real unit test
+# but for now it will live here. It shows examples of each of the Property types in
+# use and tests the basic operations.
+#
+class TestBasic(Model):
+
+ name = StringProperty()
+ size = IntegerProperty()
+ foo = BooleanProperty()
+ date = DateTimeProperty()
+
+class TestFloat(Model):
+
+ name = StringProperty()
+ value = FloatProperty()
+
+class TestRequired(Model):
+
+ req = StringProperty(required=True, default='foo')
+
+class TestReference(Model):
+
+ ref = ReferenceProperty(reference_class=TestBasic, collection_name='refs')
+
+class TestSubClass(TestBasic):
+
+ answer = IntegerProperty()
+
+class TestPassword(Model):
+ password = PasswordProperty()
+
+class TestList(Model):
+
+ name = StringProperty()
+ nums = ListProperty(int)
+
+class TestMap(Model):
+
+ name = StringProperty()
+ map = MapProperty()
+
+class TestListReference(Model):
+
+ name = StringProperty()
+ basics = ListProperty(TestBasic)
+
+class TestAutoNow(Model):
+
+ create_date = DateTimeProperty(auto_now_add=True)
+ modified_date = DateTimeProperty(auto_now=True)
+
+class TestUnique(Model):
+ name = StringProperty(unique=True)
+
+def test_basic():
+ global _objects
+ t = TestBasic()
+ t.name = 'simple'
+ t.size = -42
+ t.foo = True
+ t.date = datetime.now()
+ print 'saving object'
+ t.put()
+ _objects['test_basic_t'] = t
+ time.sleep(5)
+ print 'now try retrieving it'
+ tt = TestBasic.get_by_id(t.id)
+ _objects['test_basic_tt'] = tt
+ assert tt.id == t.id
+ l = TestBasic.get_by_id([t.id])
+ assert len(l) == 1
+ assert l[0].id == t.id
+ assert t.size == tt.size
+ assert t.foo == tt.foo
+ assert t.name == tt.name
+ #assert t.date == tt.date
+ return t
+
+def test_float():
+ global _objects
+ t = TestFloat()
+ t.name = 'float object'
+ t.value = 98.6
+ print 'saving object'
+ t.save()
+ _objects['test_float_t'] = t
+ time.sleep(5)
+ print 'now try retrieving it'
+ tt = TestFloat.get_by_id(t.id)
+ _objects['test_float_tt'] = tt
+ assert tt.id == t.id
+ assert tt.name == t.name
+ assert tt.value == t.value
+ return t
+
+def test_required():
+ global _objects
+ t = TestRequired()
+ _objects['test_required_t'] = t
+ t.put()
+ return t
+
+def test_reference(t=None):
+ global _objects
+ if not t:
+ t = test_basic()
+ tt = TestReference()
+ tt.ref = t
+ tt.put()
+ time.sleep(10)
+ tt = TestReference.get_by_id(tt.id)
+ _objects['test_reference_tt'] = tt
+ assert tt.ref.id == t.id
+ for o in t.refs:
+ print o
+
+def test_subclass():
+ global _objects
+ t = TestSubClass()
+ _objects['test_subclass_t'] = t
+ t.name = 'a subclass'
+ t.size = -489
+ t.save()
+
+def test_password():
+ global _objects
+ t = TestPassword()
+ _objects['test_password_t'] = t
+ t.password = "foo"
+ t.save()
+ time.sleep(5)
+ # Make sure it stored ok
+ tt = TestPassword.get_by_id(t.id)
+ _objects['test_password_tt'] = tt
+ #Testing password equality
+ assert tt.password == "foo"
+ #Testing password not stored as string
+ assert str(tt.password) != "foo"
+
+def test_list():
+ global _objects
+ t = TestList()
+ _objects['test_list_t'] = t
+ t.name = 'a list of ints'
+ t.nums = [1,2,3,4,5]
+ t.put()
+ tt = TestList.get_by_id(t.id)
+ _objects['test_list_tt'] = tt
+ assert tt.name == t.name
+ for n in tt.nums:
+ assert isinstance(n, int)
+
+def test_list_reference():
+ global _objects
+ t = TestBasic()
+ t.put()
+ _objects['test_list_ref_t'] = t
+ tt = TestListReference()
+ tt.name = "foo"
+ tt.basics = [t]
+ tt.put()
+ time.sleep(5)
+ _objects['test_list_ref_tt'] = tt
+ ttt = TestListReference.get_by_id(tt.id)
+ assert ttt.basics[0].id == t.id
+
+def test_unique():
+ global _objects
+ t = TestUnique()
+ name = 'foo' + str(int(time.time()))
+ t.name = name
+ t.put()
+ _objects['test_unique_t'] = t
+ time.sleep(10)
+ tt = TestUnique()
+ _objects['test_unique_tt'] = tt
+ tt.name = name
+ try:
+ tt.put()
+ assert False
+ except(SDBPersistenceError):
+ pass
+
+def test_datetime():
+ global _objects
+ t = TestAutoNow()
+ t.put()
+ _objects['test_datetime_t'] = t
+ time.sleep(5)
+ tt = TestAutoNow.get_by_id(t.id)
+ assert tt.create_date.timetuple() == t.create_date.timetuple()
+
+def test():
+ print 'test_basic'
+ t1 = test_basic()
+ print 'test_required'
+ test_required()
+ print 'test_reference'
+ test_reference(t1)
+ print 'test_subclass'
+ test_subclass()
+ print 'test_password'
+ test_password()
+ print 'test_list'
+ test_list()
+ print 'test_list_reference'
+ test_list_reference()
+ print "test_datetime"
+ test_datetime()
+ print 'test_unique'
+ test_unique()
+
+if __name__ == "__main__":
+ test()
diff --git a/boto/sdb/domain.py b/boto/sdb/domain.py
new file mode 100644
index 0000000..e809124
--- /dev/null
+++ b/boto/sdb/domain.py
@@ -0,0 +1,377 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents an SDB Domain
+"""
+from boto.sdb.queryresultset import SelectResultSet
+
+class Domain:
+
+ def __init__(self, connection=None, name=None):
+ self.connection = connection
+ self.name = name
+ self._metadata = None
+
+ def __repr__(self):
+ return 'Domain:%s' % self.name
+
+ def __iter__(self):
+ return iter(self.select("SELECT * FROM `%s`" % self.name))
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'DomainName':
+ self.name = value
+ else:
+ setattr(self, name, value)
+
+ def get_metadata(self):
+ if not self._metadata:
+ self._metadata = self.connection.domain_metadata(self)
+ return self._metadata
+
+ def put_attributes(self, item_name, attributes,
+ replace=True, expected_value=None):
+ """
+ Store attributes for a given item.
+
+ :type item_name: string
+ :param item_name: The name of the item whose attributes are being stored.
+
+ :type attribute_names: dict or dict-like object
+ :param attribute_names: The name/value pairs to store as attributes
+
+ :type expected_value: list
+ :param expected_value: If supplied, this is a list or tuple consisting
+ of a single attribute name and expected value. The list can be
+ of the form:
+
+ * ['name', 'value']
+
+ In which case the call will first verify that the attribute
+ "name" of this item has a value of "value". If it does, the delete
+ will proceed, otherwise a ConditionalCheckFailed error will be
+ returned. The list can also be of the form:
+
+ * ['name', True|False]
+
+ which will simply check for the existence (True) or non-existence
+ (False) of the attribute.
+
+ :type replace: bool
+ :param replace: Whether the attribute values passed in will replace
+ existing values or will be added as addition values.
+ Defaults to True.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ return self.connection.put_attributes(self, item_name, attributes,
+ replace, expected_value)
+
+ def batch_put_attributes(self, items, replace=True):
+ """
+ Store attributes for multiple items.
+
+ :type items: dict or dict-like object
+ :param items: A dictionary-like object. The keys of the dictionary are
+ the item names and the values are themselves dictionaries
+ of attribute names/values, exactly the same as the
+ attribute_names parameter of the scalar put_attributes
+ call.
+
+ :type replace: bool
+ :param replace: Whether the attribute values passed in will replace
+ existing values or will be added as addition values.
+ Defaults to True.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ return self.connection.batch_put_attributes(self, items, replace)
+
+ def get_attributes(self, item_name, attribute_name=None,
+ consistent_read=False, item=None):
+ """
+ Retrieve attributes for a given item.
+
+ :type item_name: string
+ :param item_name: The name of the item whose attributes are being retrieved.
+
+ :type attribute_names: string or list of strings
+ :param attribute_names: An attribute name or list of attribute names. This
+ parameter is optional. If not supplied, all attributes
+ will be retrieved for the item.
+
+ :rtype: :class:`boto.sdb.item.Item`
+ :return: An Item mapping type containing the requested attribute name/values
+ """
+ return self.connection.get_attributes(self, item_name, attribute_name,
+ consistent_read, item)
+
+ def delete_attributes(self, item_name, attributes=None,
+ expected_values=None):
+ """
+ Delete attributes from a given item.
+
+ :type item_name: string
+ :param item_name: The name of the item whose attributes are being deleted.
+
+ :type attributes: dict, list or :class:`boto.sdb.item.Item`
+ :param attributes: Either a list containing attribute names which will cause
+ all values associated with that attribute name to be deleted or
+ a dict or Item containing the attribute names and keys and list
+ of values to delete as the value. If no value is supplied,
+ all attribute name/values for the item will be deleted.
+
+ :type expected_value: list
+ :param expected_value: If supplied, this is a list or tuple consisting
+ of a single attribute name and expected value. The list can be of
+ the form:
+
+ * ['name', 'value']
+
+ In which case the call will first verify that the attribute "name"
+ of this item has a value of "value". If it does, the delete
+ will proceed, otherwise a ConditionalCheckFailed error will be
+ returned. The list can also be of the form:
+
+ * ['name', True|False]
+
+ which will simply check for the existence (True) or
+ non-existence (False) of the attribute.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ return self.connection.delete_attributes(self, item_name, attributes,
+ expected_values)
+
+ def batch_delete_attributes(self, items):
+ """
+ Delete multiple items in this domain.
+
+ :type items: dict or dict-like object
+ :param items: A dictionary-like object. The keys of the dictionary are
+ the item names and the values are either:
+
+ * dictionaries of attribute names/values, exactly the
+ same as the attribute_names parameter of the scalar
+ put_attributes call. The attribute name/value pairs
+ will only be deleted if they match the name/value
+ pairs passed in.
+ * None which means that all attributes associated
+ with the item should be deleted.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ return self.connection.batch_delete_attributes(self, items)
+
+ def select(self, query='', next_token=None, consistent_read=False, max_items=None):
+ """
+ Returns a set of Attributes for item names within domain_name that match the query.
+ The query must be expressed in using the SELECT style syntax rather than the
+ original SimpleDB query language.
+
+ :type query: string
+ :param query: The SimpleDB query to be performed.
+
+ :rtype: iter
+ :return: An iterator containing the results. This is actually a generator
+ function that will iterate across all search results, not just the
+ first page.
+ """
+ return SelectResultSet(self, query, max_items=max_items, next_token=next_token,
+ consistent_read=consistent_read)
+
+ def get_item(self, item_name, consistent_read=False):
+ """
+ Retrieves an item from the domain, along with all of its attributes.
+
+ :param string item_name: The name of the item to retrieve.
+ :rtype: :class:`boto.sdb.item.Item` or ``None``
+ :keyword bool consistent_read: When set to true, ensures that the most
+ recent data is returned.
+ :return: The requested item, or ``None`` if there was no match found
+ """
+ item = self.get_attributes(item_name, consistent_read=consistent_read)
+ if item:
+ item.domain = self
+ return item
+ else:
+ return None
+
+ def new_item(self, item_name):
+ return self.connection.item_cls(self, item_name)
+
+ def delete_item(self, item):
+ self.delete_attributes(item.name)
+
+ def to_xml(self, f=None):
+ """Get this domain as an XML DOM Document
+ :param f: Optional File to dump directly to
+ :type f: File or Stream
+
+ :return: File object where the XML has been dumped to
+ :rtype: file
+ """
+ if not f:
+ from tempfile import TemporaryFile
+ f = TemporaryFile()
+ print >> f, '<?xml version="1.0" encoding="UTF-8"?>'
+ print >> f, '<Domain id="%s">' % self.name
+ for item in self:
+ print >> f, '\t<Item id="%s">' % item.name
+ for k in item:
+ print >> f, '\t\t<attribute id="%s">' % k
+ values = item[k]
+ if not isinstance(values, list):
+ values = [values]
+ for value in values:
+ print >> f, '\t\t\t<value><![CDATA[',
+ if isinstance(value, unicode):
+ value = value.encode('utf-8', 'replace')
+ else:
+ value = unicode(value, errors='replace').encode('utf-8', 'replace')
+ f.write(value)
+ print >> f, ']]></value>'
+ print >> f, '\t\t</attribute>'
+ print >> f, '\t</Item>'
+ print >> f, '</Domain>'
+ f.flush()
+ f.seek(0)
+ return f
+
+
+ def from_xml(self, doc):
+ """Load this domain based on an XML document"""
+ import xml.sax
+ handler = DomainDumpParser(self)
+ xml.sax.parse(doc, handler)
+ return handler
+
+ def delete(self):
+ """
+ Delete this domain, and all items under it
+ """
+ return self.connection.delete(self)
+
+
+class DomainMetaData:
+
+ def __init__(self, domain=None):
+ self.domain = domain
+ self.item_count = None
+ self.item_names_size = None
+ self.attr_name_count = None
+ self.attr_names_size = None
+ self.attr_value_count = None
+ self.attr_values_size = None
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'ItemCount':
+ self.item_count = int(value)
+ elif name == 'ItemNamesSizeBytes':
+ self.item_names_size = int(value)
+ elif name == 'AttributeNameCount':
+ self.attr_name_count = int(value)
+ elif name == 'AttributeNamesSizeBytes':
+ self.attr_names_size = int(value)
+ elif name == 'AttributeValueCount':
+ self.attr_value_count = int(value)
+ elif name == 'AttributeValuesSizeBytes':
+ self.attr_values_size = int(value)
+ elif name == 'Timestamp':
+ self.timestamp = value
+ else:
+ setattr(self, name, value)
+
+import sys
+from xml.sax.handler import ContentHandler
+class DomainDumpParser(ContentHandler):
+ """
+ SAX parser for a domain that has been dumped
+ """
+
+ def __init__(self, domain):
+ self.uploader = UploaderThread(domain)
+ self.item_id = None
+ self.attrs = {}
+ self.attribute = None
+ self.value = ""
+ self.domain = domain
+
+ def startElement(self, name, attrs):
+ if name == "Item":
+ self.item_id = attrs['id']
+ self.attrs = {}
+ elif name == "attribute":
+ self.attribute = attrs['id']
+ elif name == "value":
+ self.value = ""
+
+ def characters(self, ch):
+ self.value += ch
+
+ def endElement(self, name):
+ if name == "value":
+ if self.value and self.attribute:
+ value = self.value.strip()
+ attr_name = self.attribute.strip()
+ if self.attrs.has_key(attr_name):
+ self.attrs[attr_name].append(value)
+ else:
+ self.attrs[attr_name] = [value]
+ elif name == "Item":
+ self.uploader.items[self.item_id] = self.attrs
+ # Every 20 items we spawn off the uploader
+ if len(self.uploader.items) >= 20:
+ self.uploader.start()
+ self.uploader = UploaderThread(self.domain)
+ elif name == "Domain":
+ # If we're done, spawn off our last Uploader Thread
+ self.uploader.start()
+
+from threading import Thread
+class UploaderThread(Thread):
+ """Uploader Thread"""
+
+ def __init__(self, domain):
+ self.db = domain
+ self.items = {}
+ Thread.__init__(self)
+
+ def run(self):
+ try:
+ self.db.batch_put_attributes(self.items)
+ except:
+ print "Exception using batch put, trying regular put instead"
+ for item_name in self.items:
+ self.db.put_attributes(item_name, self.items[item_name])
+ print ".",
+ sys.stdout.flush()
diff --git a/boto/sdb/item.py b/boto/sdb/item.py
new file mode 100644
index 0000000..4705b31
--- /dev/null
+++ b/boto/sdb/item.py
@@ -0,0 +1,183 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import base64
+
+class Item(dict):
+ """
+ A ``dict`` sub-class that serves as an object representation of a
+ SimpleDB item. An item in SDB is similar to a row in a relational
+ database. Items belong to a :py:class:`Domain <boto.sdb.domain.Domain>`,
+ which is similar to a table in a relational database.
+
+ The keys on instances of this object correspond to attributes that are
+ stored on the SDB item.
+
+ .. tip::
+ While it is possible to instantiate this class directly, you may want
+ to use the convenience methods on :py:class:`boto.sdb.domain.Domain`
+ for that purpose. For example,
+ :py:meth:`boto.sdb.domain.Domain.get_item`.
+ """
+ def __init__(self, domain, name='', active=False):
+ """
+ :type domain: :py:class:`boto.sdb.domain.Domain`
+ :param domain: The domain that this item belongs to.
+
+ :param str name: The name of this item. This name will be used when
+ querying for items using methods like
+ :py:meth:`boto.sdb.domain.Domain.get_item`
+ """
+ dict.__init__(self)
+ self.domain = domain
+ self.name = name
+ self.active = active
+ self.request_id = None
+ self.encoding = None
+ self.in_attribute = False
+ self.converter = self.domain.connection.converter
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Attribute':
+ self.in_attribute = True
+ self.encoding = attrs.get('encoding', None)
+ return None
+
+ def decode_value(self, value):
+ if self.encoding == 'base64':
+ self.encoding = None
+ return base64.decodestring(value)
+ else:
+ return value
+
+ def endElement(self, name, value, connection):
+ if name == 'ItemName':
+ self.name = self.decode_value(value)
+ elif name == 'Name':
+ if self.in_attribute:
+ self.last_key = self.decode_value(value)
+ else:
+ self.name = self.decode_value(value)
+ elif name == 'Value':
+ if self.has_key(self.last_key):
+ if not isinstance(self[self.last_key], list):
+ self[self.last_key] = [self[self.last_key]]
+ value = self.decode_value(value)
+ if self.converter:
+ value = self.converter.decode(value)
+ self[self.last_key].append(value)
+ else:
+ value = self.decode_value(value)
+ if self.converter:
+ value = self.converter.decode(value)
+ self[self.last_key] = value
+ elif name == 'BoxUsage':
+ try:
+ connection.box_usage += float(value)
+ except:
+ pass
+ elif name == 'RequestId':
+ self.request_id = value
+ elif name == 'Attribute':
+ self.in_attribute = False
+ else:
+ setattr(self, name, value)
+
+ def load(self):
+ """
+ Loads or re-loads this item's attributes from SDB.
+
+ .. warning::
+ If you have changed attribute values on an Item instance,
+ this method will over-write the values if they are different in
+ SDB. For any local attributes that don't yet exist in SDB,
+ they will be safe.
+ """
+ self.domain.get_attributes(self.name, item=self)
+
+ def save(self, replace=True):
+ """
+ Saves this item to SDB.
+
+ :param bool replace: If ``True``, delete any attributes on the remote
+ SDB item that have a ``None`` value on this object.
+ """
+ self.domain.put_attributes(self.name, self, replace)
+ # Delete any attributes set to "None"
+ if replace:
+ del_attrs = []
+ for name in self:
+ if self[name] == None:
+ del_attrs.append(name)
+ if len(del_attrs) > 0:
+ self.domain.delete_attributes(self.name, del_attrs)
+
+ def add_value(self, key, value):
+ """
+ Helps set or add to attributes on this item. If you are adding a new
+ attribute that has yet to be set, it will simply create an attribute
+ named ``key`` with your given ``value`` as its value. If you are
+ adding a value to an existing attribute, this method will convert the
+ attribute to a list (if it isn't already) and append your new value
+ to said list.
+
+ For clarification, consider the following interactive session:
+
+ .. code-block:: python
+
+ >>> item = some_domain.get_item('some_item')
+ >>> item.has_key('some_attr')
+ False
+ >>> item.add_value('some_attr', 1)
+ >>> item['some_attr']
+ 1
+ >>> item.add_value('some_attr', 2)
+ >>> item['some_attr']
+ [1, 2]
+
+ :param str key: The attribute to add a value to.
+ :param object value: The value to set or append to the attribute.
+ """
+ if key in self:
+ # We already have this key on the item.
+ if not isinstance(self[key], list):
+ # The key isn't already a list, take its current value and
+ # convert it to a list with the only member being the
+ # current value.
+ self[key] = [self[key]]
+ # Add the new value to the list.
+ self[key].append(value)
+ else:
+ # This is a new attribute, just set it.
+ self[key] = value
+
+ def delete(self):
+ """
+ Deletes this item in SDB.
+
+ .. note:: This local Python object remains in its current state
+ after deletion, this only deletes the remote item in SDB.
+ """
+ self.domain.delete_item(self)
+
+
+
+
diff --git a/boto/sdb/persist/__init__.py b/boto/sdb/persist/__init__.py
new file mode 100644
index 0000000..2f2b0c1
--- /dev/null
+++ b/boto/sdb/persist/__init__.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import boto
+from boto.utils import find_class
+
+class Manager(object):
+
+ DefaultDomainName = boto.config.get('Persist', 'default_domain', None)
+
+ def __init__(self, domain_name=None, aws_access_key_id=None, aws_secret_access_key=None, debug=0):
+ self.domain_name = domain_name
+ self.aws_access_key_id = aws_access_key_id
+ self.aws_secret_access_key = aws_secret_access_key
+ self.domain = None
+ self.sdb = None
+ self.s3 = None
+ if not self.domain_name:
+ self.domain_name = self.DefaultDomainName
+ if self.domain_name:
+ boto.log.info('No SimpleDB domain set, using default_domain: %s' % self.domain_name)
+ else:
+ boto.log.warning('No SimpleDB domain set, persistance is disabled')
+ if self.domain_name:
+ self.sdb = boto.connect_sdb(aws_access_key_id=self.aws_access_key_id,
+ aws_secret_access_key=self.aws_secret_access_key,
+ debug=debug)
+ self.domain = self.sdb.lookup(self.domain_name)
+ if not self.domain:
+ self.domain = self.sdb.create_domain(self.domain_name)
+
+ def get_s3_connection(self):
+ if not self.s3:
+ self.s3 = boto.connect_s3(self.aws_access_key_id, self.aws_secret_access_key)
+ return self.s3
+
+def get_manager(domain_name=None, aws_access_key_id=None, aws_secret_access_key=None, debug=0):
+ return Manager(domain_name, aws_access_key_id, aws_secret_access_key, debug=debug)
+
+def set_domain(domain_name):
+ Manager.DefaultDomainName = domain_name
+
+def get_domain():
+ return Manager.DefaultDomainName
+
+def revive_object_from_id(id, manager):
+ if not manager.domain:
+ return None
+ attrs = manager.domain.get_attributes(id, ['__module__', '__type__', '__lineage__'])
+ try:
+ cls = find_class(attrs['__module__'], attrs['__type__'])
+ return cls(id, manager=manager)
+ except ImportError:
+ return None
+
+def object_lister(cls, query_lister, manager):
+ for item in query_lister:
+ if cls:
+ yield cls(item.name)
+ else:
+ o = revive_object_from_id(item.name, manager)
+ if o:
+ yield o
+
+
diff --git a/boto/sdb/persist/checker.py b/boto/sdb/persist/checker.py
new file mode 100644
index 0000000..e2146c9
--- /dev/null
+++ b/boto/sdb/persist/checker.py
@@ -0,0 +1,302 @@
+# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from datetime import datetime
+from boto.s3.key import Key
+from boto.s3.bucket import Bucket
+from boto.sdb.persist import revive_object_from_id
+from boto.exception import SDBPersistenceError
+from boto.utils import Password
+
+ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
+
+class ValueChecker:
+
+ def check(self, value):
+ """
+ Checks a value to see if it is of the right type.
+
+ Should raise a TypeError exception if an in appropriate value is passed in.
+ """
+ raise TypeError
+
+ def from_string(self, str_value, obj):
+ """
+ Takes a string as input and returns the type-specific value represented by that string.
+
+ Should raise a ValueError if the value cannot be converted to the appropriate type.
+ """
+ raise ValueError
+
+ def to_string(self, value):
+ """
+ Convert a value to it's string representation.
+
+ Should raise a ValueError if the value cannot be converted to a string representation.
+ """
+ raise ValueError
+
+class StringChecker(ValueChecker):
+
+ def __init__(self, **params):
+ if params.has_key('maxlength'):
+ self.maxlength = params['maxlength']
+ else:
+ self.maxlength = 1024
+ if params.has_key('default'):
+ self.check(params['default'])
+ self.default = params['default']
+ else:
+ self.default = ''
+
+ def check(self, value):
+ if isinstance(value, str) or isinstance(value, unicode):
+ if len(value) > self.maxlength:
+ raise ValueError, 'Length of value greater than maxlength'
+ else:
+ raise TypeError, 'Expecting String, got %s' % type(value)
+
+ def from_string(self, str_value, obj):
+ return str_value
+
+ def to_string(self, value):
+ self.check(value)
+ return value
+
+class PasswordChecker(StringChecker):
+ def check(self, value):
+ if isinstance(value, str) or isinstance(value, unicode) or isinstance(value, Password):
+ if len(value) > self.maxlength:
+ raise ValueError, 'Length of value greater than maxlength'
+ else:
+ raise TypeError, 'Expecting String, got %s' % type(value)
+
+class IntegerChecker(ValueChecker):
+
+ __sizes__ = { 'small' : (65535, 32767, -32768, 5),
+ 'medium' : (4294967295, 2147483647, -2147483648, 10),
+ 'large' : (18446744073709551615, 9223372036854775807, -9223372036854775808, 20)}
+
+ def __init__(self, **params):
+ self.size = params.get('size', 'medium')
+ if self.size not in self.__sizes__.keys():
+ raise ValueError, 'size must be one of %s' % self.__sizes__.keys()
+ self.signed = params.get('signed', True)
+ self.default = params.get('default', 0)
+ self.format_string = '%%0%dd' % self.__sizes__[self.size][-1]
+
+ def check(self, value):
+ if not isinstance(value, int) and not isinstance(value, long):
+ raise TypeError, 'Expecting int or long, got %s' % type(value)
+ if self.signed:
+ min = self.__sizes__[self.size][2]
+ max = self.__sizes__[self.size][1]
+ else:
+ min = 0
+ max = self.__sizes__[self.size][0]
+ if value > max:
+ raise ValueError, 'Maximum value is %d' % max
+ if value < min:
+ raise ValueError, 'Minimum value is %d' % min
+
+ def from_string(self, str_value, obj):
+ val = int(str_value)
+ if self.signed:
+ val = val + self.__sizes__[self.size][2]
+ return val
+
+ def to_string(self, value):
+ self.check(value)
+ if self.signed:
+ value += -self.__sizes__[self.size][2]
+ return self.format_string % value
+
+class BooleanChecker(ValueChecker):
+
+ def __init__(self, **params):
+ if params.has_key('default'):
+ self.default = params['default']
+ else:
+ self.default = False
+
+ def check(self, value):
+ if not isinstance(value, bool):
+ raise TypeError, 'Expecting bool, got %s' % type(value)
+
+ def from_string(self, str_value, obj):
+ if str_value.lower() == 'true':
+ return True
+ else:
+ return False
+
+ def to_string(self, value):
+ self.check(value)
+ if value == True:
+ return 'true'
+ else:
+ return 'false'
+
+class DateTimeChecker(ValueChecker):
+
+ def __init__(self, **params):
+ if params.has_key('maxlength'):
+ self.maxlength = params['maxlength']
+ else:
+ self.maxlength = 1024
+ if params.has_key('default'):
+ self.default = params['default']
+ else:
+ self.default = datetime.now()
+
+ def check(self, value):
+ if not isinstance(value, datetime):
+ raise TypeError, 'Expecting datetime, got %s' % type(value)
+
+ def from_string(self, str_value, obj):
+ try:
+ return datetime.strptime(str_value, ISO8601)
+ except:
+ raise ValueError, 'Unable to convert %s to DateTime' % str_value
+
+ def to_string(self, value):
+ self.check(value)
+ return value.strftime(ISO8601)
+
+class ObjectChecker(ValueChecker):
+
+ def __init__(self, **params):
+ self.default = None
+ self.ref_class = params.get('ref_class', None)
+ if self.ref_class == None:
+ raise SDBPersistenceError('ref_class parameter is required')
+
+ def check(self, value):
+ if value == None:
+ return
+ if isinstance(value, str) or isinstance(value, unicode):
+ # ugly little hack - sometimes I want to just stick a UUID string
+ # in here rather than instantiate an object.
+ # This does a bit of hand waving to "type check" the string
+ t = value.split('-')
+ if len(t) != 5:
+ raise ValueError
+ else:
+ try:
+ obj_lineage = value.get_lineage()
+ cls_lineage = self.ref_class.get_lineage()
+ if obj_lineage.startswith(cls_lineage):
+ return
+ raise TypeError, '%s not instance of %s' % (obj_lineage, cls_lineage)
+ except:
+ raise ValueError, '%s is not an SDBObject' % value
+
+ def from_string(self, str_value, obj):
+ if not str_value:
+ return None
+ try:
+ return revive_object_from_id(str_value, obj._manager)
+ except:
+ raise ValueError, 'Unable to convert %s to Object' % str_value
+
+ def to_string(self, value):
+ self.check(value)
+ if isinstance(value, str) or isinstance(value, unicode):
+ return value
+ if value == None:
+ return ''
+ else:
+ return value.id
+
+class S3KeyChecker(ValueChecker):
+
+ def __init__(self, **params):
+ self.default = None
+
+ def check(self, value):
+ if value == None:
+ return
+ if isinstance(value, str) or isinstance(value, unicode):
+ try:
+ bucket_name, key_name = value.split('/', 1)
+ except:
+ raise ValueError
+ elif not isinstance(value, Key):
+ raise TypeError, 'Expecting Key, got %s' % type(value)
+
+ def from_string(self, str_value, obj):
+ if not str_value:
+ return None
+ if str_value == 'None':
+ return None
+ try:
+ bucket_name, key_name = str_value.split('/', 1)
+ if obj:
+ s3 = obj._manager.get_s3_connection()
+ bucket = s3.get_bucket(bucket_name)
+ key = bucket.get_key(key_name)
+ if not key:
+ key = bucket.new_key(key_name)
+ return key
+ except:
+ raise ValueError, 'Unable to convert %s to S3Key' % str_value
+
+ def to_string(self, value):
+ self.check(value)
+ if isinstance(value, str) or isinstance(value, unicode):
+ return value
+ if value == None:
+ return ''
+ else:
+ return '%s/%s' % (value.bucket.name, value.name)
+
+class S3BucketChecker(ValueChecker):
+
+ def __init__(self, **params):
+ self.default = None
+
+ def check(self, value):
+ if value == None:
+ return
+ if isinstance(value, str) or isinstance(value, unicode):
+ return
+ elif not isinstance(value, Bucket):
+ raise TypeError, 'Expecting Bucket, got %s' % type(value)
+
+ def from_string(self, str_value, obj):
+ if not str_value:
+ return None
+ if str_value == 'None':
+ return None
+ try:
+ if obj:
+ s3 = obj._manager.get_s3_connection()
+ bucket = s3.get_bucket(str_value)
+ return bucket
+ except:
+ raise ValueError, 'Unable to convert %s to S3Bucket' % str_value
+
+ def to_string(self, value):
+ self.check(value)
+ if value == None:
+ return ''
+ else:
+ return '%s' % value.name
+
diff --git a/boto/sdb/persist/object.py b/boto/sdb/persist/object.py
new file mode 100644
index 0000000..993df1e
--- /dev/null
+++ b/boto/sdb/persist/object.py
@@ -0,0 +1,207 @@
+# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.exception import SDBPersistenceError
+from boto.sdb.persist import get_manager, object_lister
+from boto.sdb.persist.property import Property, ScalarProperty
+import uuid
+
+class SDBBase(type):
+ "Metaclass for all SDBObjects"
+ def __init__(cls, name, bases, dict):
+ super(SDBBase, cls).__init__(name, bases, dict)
+ # Make sure this is a subclass of SDBObject - mainly copied from django ModelBase (thanks!)
+ try:
+ if filter(lambda b: issubclass(b, SDBObject), bases):
+ # look for all of the Properties and set their names
+ for key in dict.keys():
+ if isinstance(dict[key], Property):
+ property = dict[key]
+ property.set_name(key)
+ prop_names = []
+ props = cls.properties()
+ for prop in props:
+ prop_names.append(prop.name)
+ setattr(cls, '_prop_names', prop_names)
+ except NameError:
+ # 'SDBObject' isn't defined yet, meaning we're looking at our own
+ # SDBObject class, defined below.
+ pass
+
+class SDBObject(object):
+ __metaclass__ = SDBBase
+
+ _manager = get_manager()
+
+ @classmethod
+ def get_lineage(cls):
+ l = [c.__name__ for c in cls.mro()]
+ l.reverse()
+ return '.'.join(l)
+
+ @classmethod
+ def get(cls, id=None, **params):
+ if params.has_key('manager'):
+ manager = params['manager']
+ else:
+ manager = cls._manager
+ if manager.domain and id:
+ a = cls._manager.domain.get_attributes(id, '__type__')
+ if a.has_key('__type__'):
+ return cls(id, manager)
+ else:
+ raise SDBPersistenceError('%s object with id=%s does not exist' % (cls.__name__, id))
+ else:
+ rs = cls.find(**params)
+ try:
+ obj = rs.next()
+ except StopIteration:
+ raise SDBPersistenceError('%s object matching query does not exist' % cls.__name__)
+ try:
+ rs.next()
+ except StopIteration:
+ return obj
+ raise SDBPersistenceError('Query matched more than 1 item')
+
+ @classmethod
+ def find(cls, **params):
+ if params.has_key('manager'):
+ manager = params['manager']
+ del params['manager']
+ else:
+ manager = cls._manager
+ keys = params.keys()
+ if len(keys) > 4:
+ raise SDBPersistenceError('Too many fields, max is 4')
+ parts = ["['__type__'='%s'] union ['__lineage__'starts-with'%s']" % (cls.__name__, cls.get_lineage())]
+ properties = cls.properties()
+ for key in keys:
+ found = False
+ for property in properties:
+ if property.name == key:
+ found = True
+ if isinstance(property, ScalarProperty):
+ checker = property.checker
+ parts.append("['%s' = '%s']" % (key, checker.to_string(params[key])))
+ else:
+ raise SDBPersistenceError('%s is not a searchable field' % key)
+ if not found:
+ raise SDBPersistenceError('%s is not a valid field' % key)
+ query = ' intersection '.join(parts)
+ if manager.domain:
+ rs = manager.domain.query(query)
+ else:
+ rs = []
+ return object_lister(None, rs, manager)
+
+ @classmethod
+ def list(cls, max_items=None, manager=None):
+ if not manager:
+ manager = cls._manager
+ if manager.domain:
+ rs = manager.domain.query("['__type__' = '%s']" % cls.__name__, max_items=max_items)
+ else:
+ rs = []
+ return object_lister(cls, rs, manager)
+
+ @classmethod
+ def properties(cls):
+ properties = []
+ while cls:
+ for key in cls.__dict__.keys():
+ if isinstance(cls.__dict__[key], Property):
+ properties.append(cls.__dict__[key])
+ if len(cls.__bases__) > 0:
+ cls = cls.__bases__[0]
+ else:
+ cls = None
+ return properties
+
+ # for backwards compatibility
+ find_properties = properties
+
+ def __init__(self, id=None, manager=None):
+ if manager:
+ self._manager = manager
+ self.id = id
+ if self.id:
+ self._auto_update = True
+ if self._manager.domain:
+ attrs = self._manager.domain.get_attributes(self.id, '__type__')
+ if len(attrs.keys()) == 0:
+ raise SDBPersistenceError('Object %s: not found' % self.id)
+ else:
+ self.id = str(uuid.uuid4())
+ self._auto_update = False
+
+ def __setattr__(self, name, value):
+ if name in self._prop_names:
+ object.__setattr__(self, name, value)
+ elif name.startswith('_'):
+ object.__setattr__(self, name, value)
+ elif name == 'id':
+ object.__setattr__(self, name, value)
+ else:
+ self._persist_attribute(name, value)
+ object.__setattr__(self, name, value)
+
+ def __getattr__(self, name):
+ if not name.startswith('_'):
+ a = self._manager.domain.get_attributes(self.id, name)
+ if a.has_key(name):
+ object.__setattr__(self, name, a[name])
+ return a[name]
+ raise AttributeError
+
+ def __repr__(self):
+ return '%s<%s>' % (self.__class__.__name__, self.id)
+
+ def _persist_attribute(self, name, value):
+ if self.id:
+ self._manager.domain.put_attributes(self.id, {name : value}, replace=True)
+
+ def _get_sdb_item(self):
+ return self._manager.domain.get_item(self.id)
+
+ def save(self):
+ attrs = {'__type__' : self.__class__.__name__,
+ '__module__' : self.__class__.__module__,
+ '__lineage__' : self.get_lineage()}
+ for property in self.properties():
+ attrs[property.name] = property.to_string(self)
+ if self._manager.domain:
+ self._manager.domain.put_attributes(self.id, attrs, replace=True)
+ self._auto_update = True
+
+ def delete(self):
+ if self._manager.domain:
+ self._manager.domain.delete_attributes(self.id)
+
+ def get_related_objects(self, ref_name, ref_cls=None):
+ if self._manager.domain:
+ query = "['%s' = '%s']" % (ref_name, self.id)
+ if ref_cls:
+ query += " intersection ['__type__'='%s']" % ref_cls.__name__
+ rs = self._manager.domain.query(query)
+ else:
+ rs = []
+ return object_lister(ref_cls, rs, self._manager)
+
diff --git a/boto/sdb/persist/property.py b/boto/sdb/persist/property.py
new file mode 100644
index 0000000..4776d35
--- /dev/null
+++ b/boto/sdb/persist/property.py
@@ -0,0 +1,371 @@
+# Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.exception import SDBPersistenceError
+from boto.sdb.persist.checker import StringChecker, PasswordChecker, IntegerChecker, BooleanChecker
+from boto.sdb.persist.checker import DateTimeChecker, ObjectChecker, S3KeyChecker, S3BucketChecker
+from boto.utils import Password
+
+class Property(object):
+
+ def __init__(self, checker_class, **params):
+ self.name = ''
+ self.checker = checker_class(**params)
+ self.slot_name = '__'
+
+ def set_name(self, name):
+ self.name = name
+ self.slot_name = '__' + self.name
+
+class ScalarProperty(Property):
+
+ def save(self, obj):
+ domain = obj._manager.domain
+ domain.put_attributes(obj.id, {self.name : self.to_string(obj)}, replace=True)
+
+ def to_string(self, obj):
+ return self.checker.to_string(getattr(obj, self.name))
+
+ def load(self, obj):
+ domain = obj._manager.domain
+ a = domain.get_attributes(obj.id, self.name)
+ # try to get the attribute value from SDB
+ if self.name in a:
+ value = self.checker.from_string(a[self.name], obj)
+ setattr(obj, self.slot_name, value)
+ # if it's not there, set the value to the default value
+ else:
+ self.__set__(obj, self.checker.default)
+
+ def __get__(self, obj, objtype):
+ if obj:
+ try:
+ value = getattr(obj, self.slot_name)
+ except AttributeError:
+ if obj._auto_update:
+ self.load(obj)
+ value = getattr(obj, self.slot_name)
+ else:
+ value = self.checker.default
+ setattr(obj, self.slot_name, self.checker.default)
+ return value
+
+ def __set__(self, obj, value):
+ self.checker.check(value)
+ try:
+ old_value = getattr(obj, self.slot_name)
+ except:
+ old_value = self.checker.default
+ setattr(obj, self.slot_name, value)
+ if obj._auto_update:
+ try:
+ self.save(obj)
+ except:
+ setattr(obj, self.slot_name, old_value)
+ raise
+
+class StringProperty(ScalarProperty):
+
+ def __init__(self, **params):
+ ScalarProperty.__init__(self, StringChecker, **params)
+
+class PasswordProperty(ScalarProperty):
+ """
+ Hashed password
+ """
+
+ def __init__(self, **params):
+ ScalarProperty.__init__(self, PasswordChecker, **params)
+
+ def __set__(self, obj, value):
+ p = Password()
+ p.set(value)
+ ScalarProperty.__set__(self, obj, p)
+
+ def __get__(self, obj, objtype):
+ return Password(ScalarProperty.__get__(self, obj, objtype))
+
+class SmallPositiveIntegerProperty(ScalarProperty):
+
+ def __init__(self, **params):
+ params['size'] = 'small'
+ params['signed'] = False
+ ScalarProperty.__init__(self, IntegerChecker, **params)
+
+class SmallIntegerProperty(ScalarProperty):
+
+ def __init__(self, **params):
+ params['size'] = 'small'
+ params['signed'] = True
+ ScalarProperty.__init__(self, IntegerChecker, **params)
+
+class PositiveIntegerProperty(ScalarProperty):
+
+ def __init__(self, **params):
+ params['size'] = 'medium'
+ params['signed'] = False
+ ScalarProperty.__init__(self, IntegerChecker, **params)
+
+class IntegerProperty(ScalarProperty):
+
+ def __init__(self, **params):
+ params['size'] = 'medium'
+ params['signed'] = True
+ ScalarProperty.__init__(self, IntegerChecker, **params)
+
+class LargePositiveIntegerProperty(ScalarProperty):
+
+ def __init__(self, **params):
+ params['size'] = 'large'
+ params['signed'] = False
+ ScalarProperty.__init__(self, IntegerChecker, **params)
+
+class LargeIntegerProperty(ScalarProperty):
+
+ def __init__(self, **params):
+ params['size'] = 'large'
+ params['signed'] = True
+ ScalarProperty.__init__(self, IntegerChecker, **params)
+
+class BooleanProperty(ScalarProperty):
+
+ def __init__(self, **params):
+ ScalarProperty.__init__(self, BooleanChecker, **params)
+
+class DateTimeProperty(ScalarProperty):
+
+ def __init__(self, **params):
+ ScalarProperty.__init__(self, DateTimeChecker, **params)
+
+class ObjectProperty(ScalarProperty):
+
+ def __init__(self, **params):
+ ScalarProperty.__init__(self, ObjectChecker, **params)
+
+class S3KeyProperty(ScalarProperty):
+
+ def __init__(self, **params):
+ ScalarProperty.__init__(self, S3KeyChecker, **params)
+
+ def __set__(self, obj, value):
+ self.checker.check(value)
+ try:
+ old_value = getattr(obj, self.slot_name)
+ except:
+ old_value = self.checker.default
+ if isinstance(value, str):
+ value = self.checker.from_string(value, obj)
+ setattr(obj, self.slot_name, value)
+ if obj._auto_update:
+ try:
+ self.save(obj)
+ except:
+ setattr(obj, self.slot_name, old_value)
+ raise
+
+class S3BucketProperty(ScalarProperty):
+
+ def __init__(self, **params):
+ ScalarProperty.__init__(self, S3BucketChecker, **params)
+
+ def __set__(self, obj, value):
+ self.checker.check(value)
+ try:
+ old_value = getattr(obj, self.slot_name)
+ except:
+ old_value = self.checker.default
+ if isinstance(value, str):
+ value = self.checker.from_string(value, obj)
+ setattr(obj, self.slot_name, value)
+ if obj._auto_update:
+ try:
+ self.save(obj)
+ except:
+ setattr(obj, self.slot_name, old_value)
+ raise
+
+class MultiValueProperty(Property):
+
+ def __init__(self, checker_class, **params):
+ Property.__init__(self, checker_class, **params)
+
+ def __get__(self, obj, objtype):
+ if obj:
+ try:
+ value = getattr(obj, self.slot_name)
+ except AttributeError:
+ if obj._auto_update:
+ self.load(obj)
+ value = getattr(obj, self.slot_name)
+ else:
+ value = MultiValue(self, obj, [])
+ setattr(obj, self.slot_name, value)
+ return value
+
+ def load(self, obj):
+ if obj != None:
+ _list = []
+ domain = obj._manager.domain
+ a = domain.get_attributes(obj.id, self.name)
+ if self.name in a:
+ lst = a[self.name]
+ if not isinstance(lst, list):
+ lst = [lst]
+ for value in lst:
+ value = self.checker.from_string(value, obj)
+ _list.append(value)
+ setattr(obj, self.slot_name, MultiValue(self, obj, _list))
+
+ def __set__(self, obj, value):
+ if not isinstance(value, list):
+ raise SDBPersistenceError('Value must be a list')
+ setattr(obj, self.slot_name, MultiValue(self, obj, value))
+ str_list = self.to_string(obj)
+ domain = obj._manager.domain
+ if obj._auto_update:
+ if len(str_list) == 1:
+ domain.put_attributes(obj.id, {self.name : str_list[0]}, replace=True)
+ else:
+ try:
+ self.__delete__(obj)
+ except:
+ pass
+ domain.put_attributes(obj.id, {self.name : str_list}, replace=True)
+ setattr(obj, self.slot_name, MultiValue(self, obj, value))
+
+ def __delete__(self, obj):
+ if obj._auto_update:
+ domain = obj._manager.domain
+ domain.delete_attributes(obj.id, [self.name])
+ setattr(obj, self.slot_name, MultiValue(self, obj, []))
+
+ def to_string(self, obj):
+ str_list = []
+ for value in self.__get__(obj, type(obj)):
+ str_list.append(self.checker.to_string(value))
+ return str_list
+
+class StringListProperty(MultiValueProperty):
+
+ def __init__(self, **params):
+ MultiValueProperty.__init__(self, StringChecker, **params)
+
+class SmallIntegerListProperty(MultiValueProperty):
+
+ def __init__(self, **params):
+ params['size'] = 'small'
+ params['signed'] = True
+ MultiValueProperty.__init__(self, IntegerChecker, **params)
+
+class SmallPositiveIntegerListProperty(MultiValueProperty):
+
+ def __init__(self, **params):
+ params['size'] = 'small'
+ params['signed'] = False
+ MultiValueProperty.__init__(self, IntegerChecker, **params)
+
+class IntegerListProperty(MultiValueProperty):
+
+ def __init__(self, **params):
+ params['size'] = 'medium'
+ params['signed'] = True
+ MultiValueProperty.__init__(self, IntegerChecker, **params)
+
+class PositiveIntegerListProperty(MultiValueProperty):
+
+ def __init__(self, **params):
+ params['size'] = 'medium'
+ params['signed'] = False
+ MultiValueProperty.__init__(self, IntegerChecker, **params)
+
+class LargeIntegerListProperty(MultiValueProperty):
+
+ def __init__(self, **params):
+ params['size'] = 'large'
+ params['signed'] = True
+ MultiValueProperty.__init__(self, IntegerChecker, **params)
+
+class LargePositiveIntegerListProperty(MultiValueProperty):
+
+ def __init__(self, **params):
+ params['size'] = 'large'
+ params['signed'] = False
+ MultiValueProperty.__init__(self, IntegerChecker, **params)
+
+class BooleanListProperty(MultiValueProperty):
+
+ def __init__(self, **params):
+ MultiValueProperty.__init__(self, BooleanChecker, **params)
+
+class ObjectListProperty(MultiValueProperty):
+
+ def __init__(self, **params):
+ MultiValueProperty.__init__(self, ObjectChecker, **params)
+
+class HasManyProperty(Property):
+
+ def set_name(self, name):
+ self.name = name
+ self.slot_name = '__' + self.name
+
+ def __get__(self, obj, objtype):
+ return self
+
+
+class MultiValue:
+ """
+ Special Multi Value for boto persistence layer to allow us to do
+ obj.list.append(foo)
+ """
+ def __init__(self, property, obj, _list):
+ self.checker = property.checker
+ self.name = property.name
+ self.object = obj
+ self._list = _list
+
+ def __repr__(self):
+ return repr(self._list)
+
+ def __getitem__(self, key):
+ return self._list.__getitem__(key)
+
+ def __delitem__(self, key):
+ item = self[key]
+ self._list.__delitem__(key)
+ domain = self.object._manager.domain
+ domain.delete_attributes(self.object.id, {self.name: [self.checker.to_string(item)]})
+
+ def __len__(self):
+ return len(self._list)
+
+ def append(self, value):
+ self.checker.check(value)
+ self._list.append(value)
+ domain = self.object._manager.domain
+ domain.put_attributes(self.object.id, {self.name: self.checker.to_string(value)}, replace=False)
+
+ def index(self, value):
+ for x in self._list:
+ if x.id == value.id:
+ return self._list.index(x)
+
+ def remove(self, value):
+ del(self[self.index(value)])
diff --git a/boto/sdb/persist/test_persist.py b/boto/sdb/persist/test_persist.py
new file mode 100644
index 0000000..080935d
--- /dev/null
+++ b/boto/sdb/persist/test_persist.py
@@ -0,0 +1,141 @@
+from boto.sdb.persist.object import SDBObject
+from boto.sdb.persist.property import StringProperty, PositiveIntegerProperty, IntegerProperty
+from boto.sdb.persist.property import BooleanProperty, DateTimeProperty, S3KeyProperty
+from boto.sdb.persist.property import ObjectProperty, StringListProperty
+from boto.sdb.persist.property import PositiveIntegerListProperty, BooleanListProperty, ObjectListProperty
+from boto.sdb.persist import Manager
+from datetime import datetime
+import time
+
+#
+# This will eventually be moved to the boto.tests module and become a real unit test
+# but for now it will live here. It shows examples of each of the Property types in
+# use and tests the basic operations.
+#
+class TestScalar(SDBObject):
+
+ name = StringProperty()
+ description = StringProperty()
+ size = PositiveIntegerProperty()
+ offset = IntegerProperty()
+ foo = BooleanProperty()
+ date = DateTimeProperty()
+ file = S3KeyProperty()
+
+class TestRef(SDBObject):
+
+ name = StringProperty()
+ ref = ObjectProperty(ref_class=TestScalar)
+
+class TestSubClass1(TestRef):
+
+ answer = PositiveIntegerProperty()
+
+class TestSubClass2(TestScalar):
+
+ flag = BooleanProperty()
+
+class TestList(SDBObject):
+
+ names = StringListProperty()
+ numbers = PositiveIntegerListProperty()
+ bools = BooleanListProperty()
+ objects = ObjectListProperty(ref_class=TestScalar)
+
+def test1():
+ s = TestScalar()
+ s.name = 'foo'
+ s.description = 'This is foo'
+ s.size = 42
+ s.offset = -100
+ s.foo = True
+ s.date = datetime.now()
+ s.save()
+ return s
+
+def test2(ref_name):
+ s = TestRef()
+ s.name = 'testref'
+ rs = TestScalar.find(name=ref_name)
+ s.ref = rs.next()
+ s.save()
+ return s
+
+def test3():
+ s = TestScalar()
+ s.name = 'bar'
+ s.description = 'This is bar'
+ s.size = 24
+ s.foo = False
+ s.date = datetime.now()
+ s.save()
+ return s
+
+def test4(ref1, ref2):
+ s = TestList()
+ s.names.append(ref1.name)
+ s.names.append(ref2.name)
+ s.numbers.append(ref1.size)
+ s.numbers.append(ref2.size)
+ s.bools.append(ref1.foo)
+ s.bools.append(ref2.foo)
+ s.objects.append(ref1)
+ s.objects.append(ref2)
+ s.save()
+ return s
+
+def test5(ref):
+ s = TestSubClass1()
+ s.answer = 42
+ s.ref = ref
+ s.save()
+ # test out free form attribute
+ s.fiddlefaddle = 'this is fiddlefaddle'
+ s._fiddlefaddle = 'this is not fiddlefaddle'
+ return s
+
+def test6():
+ s = TestSubClass2()
+ s.name = 'fie'
+ s.description = 'This is fie'
+ s.size = 4200
+ s.offset = -820
+ s.foo = False
+ s.date = datetime.now()
+ s.flag = True
+ s.save()
+ return s
+
+def test(domain_name):
+ print 'Initialize the Persistance system'
+ Manager.DefaultDomainName = domain_name
+ print 'Call test1'
+ s1 = test1()
+ # now create a new instance and read the saved data from SDB
+ print 'Now sleep to wait for things to converge'
+ time.sleep(5)
+ print 'Now lookup the object and compare the fields'
+ s2 = TestScalar(s1.id)
+ assert s1.name == s2.name
+ assert s1.description == s2.description
+ assert s1.size == s2.size
+ assert s1.offset == s2.offset
+ assert s1.foo == s2.foo
+ #assert s1.date == s2.date
+ print 'Call test2'
+ s2 = test2(s1.name)
+ print 'Call test3'
+ s3 = test3()
+ print 'Call test4'
+ s4 = test4(s1, s3)
+ print 'Call test5'
+ s6 = test6()
+ s5 = test5(s6)
+ domain = s5._manager.domain
+ item1 = domain.get_item(s1.id)
+ item2 = domain.get_item(s2.id)
+ item3 = domain.get_item(s3.id)
+ item4 = domain.get_item(s4.id)
+ item5 = domain.get_item(s5.id)
+ item6 = domain.get_item(s6.id)
+ return [(s1, item1), (s2, item2), (s3, item3), (s4, item4), (s5, item5), (s6, item6)]
diff --git a/boto/sdb/queryresultset.py b/boto/sdb/queryresultset.py
new file mode 100644
index 0000000..10bafd1
--- /dev/null
+++ b/boto/sdb/queryresultset.py
@@ -0,0 +1,92 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+def query_lister(domain, query='', max_items=None, attr_names=None):
+ more_results = True
+ num_results = 0
+ next_token = None
+ while more_results:
+ rs = domain.connection.query_with_attributes(domain, query, attr_names,
+ next_token=next_token)
+ for item in rs:
+ if max_items:
+ if num_results == max_items:
+ raise StopIteration
+ yield item
+ num_results += 1
+ next_token = rs.next_token
+ more_results = next_token != None
+
+class QueryResultSet:
+
+ def __init__(self, domain=None, query='', max_items=None, attr_names=None):
+ self.max_items = max_items
+ self.domain = domain
+ self.query = query
+ self.attr_names = attr_names
+
+ def __iter__(self):
+ return query_lister(self.domain, self.query, self.max_items, self.attr_names)
+
+def select_lister(domain, query='', max_items=None):
+ more_results = True
+ num_results = 0
+ next_token = None
+ while more_results:
+ rs = domain.connection.select(domain, query, next_token=next_token)
+ for item in rs:
+ if max_items:
+ if num_results == max_items:
+ raise StopIteration
+ yield item
+ num_results += 1
+ next_token = rs.next_token
+ more_results = next_token != None
+
+class SelectResultSet(object):
+
+ def __init__(self, domain=None, query='', max_items=None,
+ next_token=None, consistent_read=False):
+ self.domain = domain
+ self.query = query
+ self.consistent_read = consistent_read
+ self.max_items = max_items
+ self.next_token = next_token
+
+ def __iter__(self):
+ more_results = True
+ num_results = 0
+ while more_results:
+ rs = self.domain.connection.select(self.domain, self.query,
+ next_token=self.next_token,
+ consistent_read=self.consistent_read)
+ for item in rs:
+ if self.max_items and num_results >= self.max_items:
+ raise StopIteration
+ yield item
+ num_results += 1
+ self.next_token = rs.next_token
+ if self.max_items and num_results >= self.max_items:
+ raise StopIteration
+ more_results = self.next_token != None
+
+ def next(self):
+ return self.__iter__().next()
diff --git a/boto/sdb/regioninfo.py b/boto/sdb/regioninfo.py
new file mode 100644
index 0000000..5c32864
--- /dev/null
+++ b/boto/sdb/regioninfo.py
@@ -0,0 +1,32 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from boto.regioninfo import RegionInfo
+
+class SDBRegionInfo(RegionInfo):
+
+ def __init__(self, connection=None, name=None, endpoint=None):
+ from boto.sdb.connection import SDBConnection
+ RegionInfo.__init__(self, connection, name, endpoint,
+ SDBConnection)
diff --git a/boto/services/__init__.py b/boto/services/__init__.py
new file mode 100644
index 0000000..449bd16
--- /dev/null
+++ b/boto/services/__init__.py
@@ -0,0 +1,23 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+
diff --git a/boto/services/bs.py b/boto/services/bs.py
new file mode 100755
index 0000000..3d70031
--- /dev/null
+++ b/boto/services/bs.py
@@ -0,0 +1,179 @@
+#!/usr/bin/env python
+# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+from optparse import OptionParser
+from boto.services.servicedef import ServiceDef
+from boto.services.submit import Submitter
+from boto.services.result import ResultProcessor
+import boto
+import sys, os, StringIO
+
+class BS(object):
+
+ Usage = "usage: %prog [options] config_file command"
+
+ Commands = {'reset' : 'Clear input queue and output bucket',
+ 'submit' : 'Submit local files to the service',
+ 'start' : 'Start the service',
+ 'status' : 'Report on the status of the service buckets and queues',
+ 'retrieve' : 'Retrieve output generated by a batch',
+ 'batches' : 'List all batches stored in current output_domain'}
+
+ def __init__(self):
+ self.service_name = None
+ self.parser = OptionParser(usage=self.Usage)
+ self.parser.add_option("--help-commands", action="store_true", dest="help_commands",
+ help="provides help on the available commands")
+ self.parser.add_option("-a", "--access-key", action="store", type="string",
+ help="your AWS Access Key")
+ self.parser.add_option("-s", "--secret-key", action="store", type="string",
+ help="your AWS Secret Access Key")
+ self.parser.add_option("-p", "--path", action="store", type="string", dest="path",
+ help="the path to local directory for submit and retrieve")
+ self.parser.add_option("-k", "--keypair", action="store", type="string", dest="keypair",
+ help="the SSH keypair used with launched instance(s)")
+ self.parser.add_option("-l", "--leave", action="store_true", dest="leave",
+ help="leave the files (don't retrieve) files during retrieve command")
+ self.parser.set_defaults(leave=False)
+ self.parser.add_option("-n", "--num-instances", action="store", type="string", dest="num_instances",
+ help="the number of launched instance(s)")
+ self.parser.set_defaults(num_instances=1)
+ self.parser.add_option("-i", "--ignore-dirs", action="append", type="string", dest="ignore",
+ help="directories that should be ignored by submit command")
+ self.parser.add_option("-b", "--batch-id", action="store", type="string", dest="batch",
+ help="batch identifier required by the retrieve command")
+
+ def print_command_help(self):
+ print '\nCommands:'
+ for key in self.Commands.keys():
+ print ' %s\t\t%s' % (key, self.Commands[key])
+
+ def do_reset(self):
+ iq = self.sd.get_obj('input_queue')
+ if iq:
+ print 'clearing out input queue'
+ i = 0
+ m = iq.read()
+ while m:
+ i += 1
+ iq.delete_message(m)
+ m = iq.read()
+ print 'deleted %d messages' % i
+ ob = self.sd.get_obj('output_bucket')
+ ib = self.sd.get_obj('input_bucket')
+ if ob:
+ if ib and ob.name == ib.name:
+ return
+ print 'delete generated files in output bucket'
+ i = 0
+ for k in ob:
+ i += 1
+ k.delete()
+ print 'deleted %d keys' % i
+
+ def do_submit(self):
+ if not self.options.path:
+ self.parser.error('No path provided')
+ if not os.path.exists(self.options.path):
+ self.parser.error('Invalid path (%s)' % self.options.path)
+ s = Submitter(self.sd)
+ t = s.submit_path(self.options.path, None, self.options.ignore, None,
+ None, True, self.options.path)
+ print 'A total of %d files were submitted' % t[1]
+ print 'Batch Identifier: %s' % t[0]
+
+ def do_start(self):
+ ami_id = self.sd.get('ami_id')
+ instance_type = self.sd.get('instance_type', 'm1.small')
+ security_group = self.sd.get('security_group', 'default')
+ if not ami_id:
+ self.parser.error('ami_id option is required when starting the service')
+ ec2 = boto.connect_ec2()
+ if not self.sd.has_section('Credentials'):
+ self.sd.add_section('Credentials')
+ self.sd.set('Credentials', 'aws_access_key_id', ec2.aws_access_key_id)
+ self.sd.set('Credentials', 'aws_secret_access_key', ec2.aws_secret_access_key)
+ s = StringIO.StringIO()
+ self.sd.write(s)
+ rs = ec2.get_all_images([ami_id])
+ img = rs[0]
+ r = img.run(user_data=s.getvalue(), key_name=self.options.keypair,
+ max_count=self.options.num_instances,
+ instance_type=instance_type,
+ security_groups=[security_group])
+ print 'Starting AMI: %s' % ami_id
+ print 'Reservation %s contains the following instances:' % r.id
+ for i in r.instances:
+ print '\t%s' % i.id
+
+ def do_status(self):
+ iq = self.sd.get_obj('input_queue')
+ if iq:
+ print 'The input_queue (%s) contains approximately %s messages' % (iq.id, iq.count())
+ ob = self.sd.get_obj('output_bucket')
+ ib = self.sd.get_obj('input_bucket')
+ if ob:
+ if ib and ob.name == ib.name:
+ return
+ total = 0
+ for k in ob:
+ total += 1
+ print 'The output_bucket (%s) contains %d keys' % (ob.name, total)
+
+ def do_retrieve(self):
+ if not self.options.path:
+ self.parser.error('No path provided')
+ if not os.path.exists(self.options.path):
+ self.parser.error('Invalid path (%s)' % self.options.path)
+ if not self.options.batch:
+ self.parser.error('batch identifier is required for retrieve command')
+ s = ResultProcessor(self.options.batch, self.sd)
+ s.get_results(self.options.path, get_file=(not self.options.leave))
+
+ def do_batches(self):
+ d = self.sd.get_obj('output_domain')
+ if d:
+ print 'Available Batches:'
+ rs = d.query("['type'='Batch']")
+ for item in rs:
+ print ' %s' % item.name
+ else:
+ self.parser.error('No output_domain specified for service')
+
+ def main(self):
+ self.options, self.args = self.parser.parse_args()
+ if self.options.help_commands:
+ self.print_command_help()
+ sys.exit(0)
+ if len(self.args) != 2:
+ self.parser.error("config_file and command are required")
+ self.config_file = self.args[0]
+ self.sd = ServiceDef(self.config_file)
+ self.command = self.args[1]
+ if hasattr(self, 'do_%s' % self.command):
+ method = getattr(self, 'do_%s' % self.command)
+ method()
+ else:
+ self.parser.error('command (%s) not recognized' % self.command)
+
+if __name__ == "__main__":
+ bs = BS()
+ bs.main()
diff --git a/boto/services/message.py b/boto/services/message.py
new file mode 100644
index 0000000..79f6d19
--- /dev/null
+++ b/boto/services/message.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.sqs.message import MHMessage
+from boto.utils import get_ts
+from socket import gethostname
+import os, mimetypes, time
+
+class ServiceMessage(MHMessage):
+
+ def for_key(self, key, params=None, bucket_name=None):
+ if params:
+ self.update(params)
+ if key.path:
+ t = os.path.split(key.path)
+ self['OriginalLocation'] = t[0]
+ self['OriginalFileName'] = t[1]
+ mime_type = mimetypes.guess_type(t[1])[0]
+ if mime_type == None:
+ mime_type = 'application/octet-stream'
+ self['Content-Type'] = mime_type
+ s = os.stat(key.path)
+ t = time.gmtime(s[7])
+ self['FileAccessedDate'] = get_ts(t)
+ t = time.gmtime(s[8])
+ self['FileModifiedDate'] = get_ts(t)
+ t = time.gmtime(s[9])
+ self['FileCreateDate'] = get_ts(t)
+ else:
+ self['OriginalFileName'] = key.name
+ self['OriginalLocation'] = key.bucket.name
+ self['ContentType'] = key.content_type
+ self['Host'] = gethostname()
+ if bucket_name:
+ self['Bucket'] = bucket_name
+ else:
+ self['Bucket'] = key.bucket.name
+ self['InputKey'] = key.name
+ self['Size'] = key.size
+
diff --git a/boto/services/result.py b/boto/services/result.py
new file mode 100644
index 0000000..32a6d6a
--- /dev/null
+++ b/boto/services/result.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import os
+from datetime import datetime, timedelta
+from boto.utils import parse_ts
+import boto
+
+class ResultProcessor:
+
+ LogFileName = 'log.csv'
+
+ def __init__(self, batch_name, sd, mimetype_files=None):
+ self.sd = sd
+ self.batch = batch_name
+ self.log_fp = None
+ self.num_files = 0
+ self.total_time = 0
+ self.min_time = timedelta.max
+ self.max_time = timedelta.min
+ self.earliest_time = datetime.max
+ self.latest_time = datetime.min
+ self.queue = self.sd.get_obj('output_queue')
+ self.domain = self.sd.get_obj('output_domain')
+
+ def calculate_stats(self, msg):
+ start_time = parse_ts(msg['Service-Read'])
+ end_time = parse_ts(msg['Service-Write'])
+ elapsed_time = end_time - start_time
+ if elapsed_time > self.max_time:
+ self.max_time = elapsed_time
+ if elapsed_time < self.min_time:
+ self.min_time = elapsed_time
+ self.total_time += elapsed_time.seconds
+ if start_time < self.earliest_time:
+ self.earliest_time = start_time
+ if end_time > self.latest_time:
+ self.latest_time = end_time
+
+ def log_message(self, msg, path):
+ keys = msg.keys()
+ keys.sort()
+ if not self.log_fp:
+ self.log_fp = open(os.path.join(path, self.LogFileName), 'a')
+ line = ','.join(keys)
+ self.log_fp.write(line+'\n')
+ values = []
+ for key in keys:
+ value = msg[key]
+ if value.find(',') > 0:
+ value = '"%s"' % value
+ values.append(value)
+ line = ','.join(values)
+ self.log_fp.write(line+'\n')
+
+ def process_record(self, record, path, get_file=True):
+ self.log_message(record, path)
+ self.calculate_stats(record)
+ outputs = record['OutputKey'].split(',')
+ if record.has_key('OutputBucket'):
+ bucket = boto.lookup('s3', record['OutputBucket'])
+ else:
+ bucket = boto.lookup('s3', record['Bucket'])
+ for output in outputs:
+ if get_file:
+ key_name = output.split(';')[0]
+ key = bucket.lookup(key_name)
+ file_name = os.path.join(path, key_name)
+ print 'retrieving file: %s to %s' % (key_name, file_name)
+ key.get_contents_to_filename(file_name)
+ self.num_files += 1
+
+ def get_results_from_queue(self, path, get_file=True, delete_msg=True):
+ m = self.queue.read()
+ while m:
+ if m.has_key('Batch') and m['Batch'] == self.batch:
+ self.process_record(m, path, get_file)
+ if delete_msg:
+ self.queue.delete_message(m)
+ m = self.queue.read()
+
+ def get_results_from_domain(self, path, get_file=True):
+ rs = self.domain.query("['Batch'='%s']" % self.batch)
+ for item in rs:
+ self.process_record(item, path, get_file)
+
+ def get_results_from_bucket(self, path):
+ bucket = self.sd.get_obj('output_bucket')
+ if bucket:
+ print 'No output queue or domain, just retrieving files from output_bucket'
+ for key in bucket:
+ file_name = os.path.join(path, key)
+ print 'retrieving file: %s to %s' % (key, file_name)
+ key.get_contents_to_filename(file_name)
+ self.num_files + 1
+
+ def get_results(self, path, get_file=True, delete_msg=True):
+ if not os.path.isdir(path):
+ os.mkdir(path)
+ if self.queue:
+ self.get_results_from_queue(path, get_file)
+ elif self.domain:
+ self.get_results_from_domain(path, get_file)
+ else:
+ self.get_results_from_bucket(path)
+ if self.log_fp:
+ self.log_fp.close()
+ print '%d results successfully retrieved.' % self.num_files
+ if self.num_files > 0:
+ self.avg_time = float(self.total_time)/self.num_files
+ print 'Minimum Processing Time: %d' % self.min_time.seconds
+ print 'Maximum Processing Time: %d' % self.max_time.seconds
+ print 'Average Processing Time: %f' % self.avg_time
+ self.elapsed_time = self.latest_time-self.earliest_time
+ print 'Elapsed Time: %d' % self.elapsed_time.seconds
+ tput = 1.0 / ((self.elapsed_time.seconds/60.0) / self.num_files)
+ print 'Throughput: %f transactions / minute' % tput
+
diff --git a/boto/services/service.py b/boto/services/service.py
new file mode 100644
index 0000000..8ee1a8b
--- /dev/null
+++ b/boto/services/service.py
@@ -0,0 +1,161 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import boto
+from boto.services.message import ServiceMessage
+from boto.services.servicedef import ServiceDef
+from boto.pyami.scriptbase import ScriptBase
+from boto.utils import get_ts
+import time
+import os
+import mimetypes
+
+
+class Service(ScriptBase):
+
+ # Time required to process a transaction
+ ProcessingTime = 60
+
+ def __init__(self, config_file=None, mimetype_files=None):
+ ScriptBase.__init__(self, config_file)
+ self.name = self.__class__.__name__
+ self.working_dir = boto.config.get('Pyami', 'working_dir')
+ self.sd = ServiceDef(config_file)
+ self.retry_count = self.sd.getint('retry_count', 5)
+ self.loop_delay = self.sd.getint('loop_delay', 30)
+ self.processing_time = self.sd.getint('processing_time', 60)
+ self.input_queue = self.sd.get_obj('input_queue')
+ self.output_queue = self.sd.get_obj('output_queue')
+ self.output_domain = self.sd.get_obj('output_domain')
+ if mimetype_files:
+ mimetypes.init(mimetype_files)
+
+ def split_key(key):
+ if key.find(';') < 0:
+ t = (key, '')
+ else:
+ key, type = key.split(';')
+ label, mtype = type.split('=')
+ t = (key, mtype)
+ return t
+
+ def read_message(self):
+ boto.log.info('read_message')
+ message = self.input_queue.read(self.processing_time)
+ if message:
+ boto.log.info(message.get_body())
+ key = 'Service-Read'
+ message[key] = get_ts()
+ return message
+
+ # retrieve the source file from S3
+ def get_file(self, message):
+ bucket_name = message['Bucket']
+ key_name = message['InputKey']
+ file_name = os.path.join(self.working_dir, message.get('OriginalFileName', 'in_file'))
+ boto.log.info('get_file: %s/%s to %s' % (bucket_name, key_name, file_name))
+ bucket = boto.lookup('s3', bucket_name)
+ key = bucket.new_key(key_name)
+ key.get_contents_to_filename(os.path.join(self.working_dir, file_name))
+ return file_name
+
+ # process source file, return list of output files
+ def process_file(self, in_file_name, msg):
+ return []
+
+ # store result file in S3
+ def put_file(self, bucket_name, file_path, key_name=None):
+ boto.log.info('putting file %s as %s.%s' % (file_path, bucket_name, key_name))
+ bucket = boto.lookup('s3', bucket_name)
+ key = bucket.new_key(key_name)
+ key.set_contents_from_filename(file_path)
+ return key
+
+ def save_results(self, results, input_message, output_message):
+ output_keys = []
+ for file, type in results:
+ if input_message.has_key('OutputBucket'):
+ output_bucket = input_message['OutputBucket']
+ else:
+ output_bucket = input_message['Bucket']
+ key_name = os.path.split(file)[1]
+ key = self.put_file(output_bucket, file, key_name)
+ output_keys.append('%s;type=%s' % (key.name, type))
+ output_message['OutputKey'] = ','.join(output_keys)
+
+ # write message to each output queue
+ def write_message(self, message):
+ message['Service-Write'] = get_ts()
+ message['Server'] = self.name
+ if os.environ.has_key('HOSTNAME'):
+ message['Host'] = os.environ['HOSTNAME']
+ else:
+ message['Host'] = 'unknown'
+ message['Instance-ID'] = self.instance_id
+ if self.output_queue:
+ boto.log.info('Writing message to SQS queue: %s' % self.output_queue.id)
+ self.output_queue.write(message)
+ if self.output_domain:
+ boto.log.info('Writing message to SDB domain: %s' % self.output_domain.name)
+ item_name = '/'.join([message['Service-Write'], message['Bucket'], message['InputKey']])
+ self.output_domain.put_attributes(item_name, message)
+
+ # delete message from input queue
+ def delete_message(self, message):
+ boto.log.info('deleting message from %s' % self.input_queue.id)
+ self.input_queue.delete_message(message)
+
+ # to clean up any files, etc. after each iteration
+ def cleanup(self):
+ pass
+
+ def shutdown(self):
+ on_completion = self.sd.get('on_completion', 'shutdown')
+ if on_completion == 'shutdown':
+ if self.instance_id:
+ time.sleep(60)
+ c = boto.connect_ec2()
+ c.terminate_instances([self.instance_id])
+
+ def main(self, notify=False):
+ self.notify('Service: %s Starting' % self.name)
+ empty_reads = 0
+ while self.retry_count < 0 or empty_reads < self.retry_count:
+ try:
+ input_message = self.read_message()
+ if input_message:
+ empty_reads = 0
+ output_message = ServiceMessage(None, input_message.get_body())
+ input_file = self.get_file(input_message)
+ results = self.process_file(input_file, output_message)
+ self.save_results(results, input_message, output_message)
+ self.write_message(output_message)
+ self.delete_message(input_message)
+ self.cleanup()
+ else:
+ empty_reads += 1
+ time.sleep(self.loop_delay)
+ except Exception:
+ boto.log.exception('Service Failed')
+ empty_reads += 1
+ self.notify('Service: %s Shutting Down' % self.name)
+ self.shutdown()
+
diff --git a/boto/services/servicedef.py b/boto/services/servicedef.py
new file mode 100644
index 0000000..1cb01aa
--- /dev/null
+++ b/boto/services/servicedef.py
@@ -0,0 +1,91 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.pyami.config import Config
+from boto.services.message import ServiceMessage
+import boto
+
+class ServiceDef(Config):
+
+ def __init__(self, config_file, aws_access_key_id=None, aws_secret_access_key=None):
+ Config.__init__(self, config_file)
+ self.aws_access_key_id = aws_access_key_id
+ self.aws_secret_access_key = aws_secret_access_key
+ script = Config.get(self, 'Pyami', 'scripts')
+ if script:
+ self.name = script.split('.')[-1]
+ else:
+ self.name = None
+
+
+ def get(self, name, default=None):
+ return Config.get(self, self.name, name, default)
+
+ def has_option(self, option):
+ return Config.has_option(self, self.name, option)
+
+ def getint(self, option, default=0):
+ try:
+ val = Config.get(self, self.name, option)
+ val = int(val)
+ except:
+ val = int(default)
+ return val
+
+ def getbool(self, option, default=False):
+ try:
+ val = Config.get(self, self.name, option)
+ if val.lower() == 'true':
+ val = True
+ else:
+ val = False
+ except:
+ val = default
+ return val
+
+ def get_obj(self, name):
+ """
+ Returns the AWS object associated with a given option.
+
+ The heuristics used are a bit lame. If the option name contains
+ the word 'bucket' it is assumed to be an S3 bucket, if the name
+ contains the word 'queue' it is assumed to be an SQS queue and
+ if it contains the word 'domain' it is assumed to be a SimpleDB
+ domain. If the option name specified does not exist in the
+ config file or if the AWS object cannot be retrieved this
+ returns None.
+ """
+ val = self.get(name)
+ if not val:
+ return None
+ if name.find('queue') >= 0:
+ obj = boto.lookup('sqs', val)
+ if obj:
+ obj.set_message_class(ServiceMessage)
+ elif name.find('bucket') >= 0:
+ obj = boto.lookup('s3', val)
+ elif name.find('domain') >= 0:
+ obj = boto.lookup('sdb', val)
+ else:
+ obj = None
+ return obj
+
+
diff --git a/boto/services/sonofmmm.cfg b/boto/services/sonofmmm.cfg
new file mode 100644
index 0000000..d70d379
--- /dev/null
+++ b/boto/services/sonofmmm.cfg
@@ -0,0 +1,43 @@
+#
+# Your AWS Credentials
+# You only need to supply these in this file if you are not using
+# the boto tools to start your service
+#
+#[Credentials]
+#aws_access_key_id = <AWS Access Key Here>
+#aws_secret_access_key = <AWS Secret Key Here>
+
+#
+# Fill out this section if you want emails from the service
+# when it starts and stops
+#
+#[Notification]
+#smtp_host = <your smtp host>
+#smtp_user = <your smtp username, if necessary>
+#smtp_pass = <your smtp password, if necessary>
+#smtp_from = <email address for From: field>
+#smtp_to = <email address for To: field>
+
+[Pyami]
+scripts = boto.services.sonofmmm.SonOfMMM
+
+[SonOfMMM]
+# id of the AMI to be launched
+ami_id = ami-dc799cb5
+# number of times service will read an empty queue before exiting
+# a negative value will cause the service to run forever
+retry_count = 5
+# seconds to wait after empty queue read before reading again
+loop_delay = 10
+# average time it takes to process a transaction
+# controls invisibility timeout of messages
+processing_time = 60
+ffmpeg_args = -y -i %%s -f mov -r 29.97 -b 1200kb -mbd 2 -flags +4mv+trell -aic 2 -cmp 2 -subcmp 2 -ar 48000 -ab 19200 -s 320x240 -vcodec mpeg4 -acodec libfaac %%s
+output_mimetype = video/quicktime
+output_ext = .mov
+input_bucket = <S3 bucket where source videos live>
+output_bucket = <S3 bucket where converted videos should be stored>
+output_domain = <SimpleDB domain to store results - optional>
+output_queue = <SQS queue to store results - optional>
+input_queue = <SQS queue where work to be done will be queued up>
+
diff --git a/boto/services/sonofmmm.py b/boto/services/sonofmmm.py
new file mode 100644
index 0000000..acb7e61
--- /dev/null
+++ b/boto/services/sonofmmm.py
@@ -0,0 +1,81 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import boto
+from boto.services.service import Service
+from boto.services.message import ServiceMessage
+import os
+import mimetypes
+
+class SonOfMMM(Service):
+
+ def __init__(self, config_file=None):
+ Service.__init__(self, config_file)
+ self.log_file = '%s.log' % self.instance_id
+ self.log_path = os.path.join(self.working_dir, self.log_file)
+ boto.set_file_logger(self.name, self.log_path)
+ if self.sd.has_option('ffmpeg_args'):
+ self.command = '/usr/local/bin/ffmpeg ' + self.sd.get('ffmpeg_args')
+ else:
+ self.command = '/usr/local/bin/ffmpeg -y -i %s %s'
+ self.output_mimetype = self.sd.get('output_mimetype')
+ if self.sd.has_option('output_ext'):
+ self.output_ext = self.sd.get('output_ext')
+ else:
+ self.output_ext = mimetypes.guess_extension(self.output_mimetype)
+ self.output_bucket = self.sd.get_obj('output_bucket')
+ self.input_bucket = self.sd.get_obj('input_bucket')
+ # check to see if there are any messages queue
+ # if not, create messages for all files in input_bucket
+ m = self.input_queue.read(1)
+ if not m:
+ self.queue_files()
+
+ def queue_files(self):
+ boto.log.info('Queueing files from %s' % self.input_bucket.name)
+ for key in self.input_bucket:
+ boto.log.info('Queueing %s' % key.name)
+ m = ServiceMessage()
+ if self.output_bucket:
+ d = {'OutputBucket' : self.output_bucket.name}
+ else:
+ d = None
+ m.for_key(key, d)
+ self.input_queue.write(m)
+
+ def process_file(self, in_file_name, msg):
+ base, ext = os.path.splitext(in_file_name)
+ out_file_name = os.path.join(self.working_dir,
+ base+self.output_ext)
+ command = self.command % (in_file_name, out_file_name)
+ boto.log.info('running:\n%s' % command)
+ status = self.run(command)
+ if status == 0:
+ return [(out_file_name, self.output_mimetype)]
+ else:
+ return []
+
+ def shutdown(self):
+ if os.path.isfile(self.log_path):
+ if self.output_bucket:
+ key = self.output_bucket.new_key(self.log_file)
+ key.set_contents_from_filename(self.log_path)
+ Service.shutdown(self)
diff --git a/boto/services/submit.py b/boto/services/submit.py
new file mode 100644
index 0000000..89c439c
--- /dev/null
+++ b/boto/services/submit.py
@@ -0,0 +1,88 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import time
+import os
+
+
+class Submitter:
+
+ def __init__(self, sd):
+ self.sd = sd
+ self.input_bucket = self.sd.get_obj('input_bucket')
+ self.output_bucket = self.sd.get_obj('output_bucket')
+ self.output_domain = self.sd.get_obj('output_domain')
+ self.queue = self.sd.get_obj('input_queue')
+
+ def get_key_name(self, fullpath, prefix):
+ key_name = fullpath[len(prefix):]
+ l = key_name.split(os.sep)
+ return '/'.join(l)
+
+ def write_message(self, key, metadata):
+ if self.queue:
+ m = self.queue.new_message()
+ m.for_key(key, metadata)
+ if self.output_bucket:
+ m['OutputBucket'] = self.output_bucket.name
+ self.queue.write(m)
+
+ def submit_file(self, path, metadata=None, cb=None, num_cb=0, prefix='/'):
+ if not metadata:
+ metadata = {}
+ key_name = self.get_key_name(path, prefix)
+ k = self.input_bucket.new_key(key_name)
+ k.update_metadata(metadata)
+ k.set_contents_from_filename(path, replace=False, cb=cb, num_cb=num_cb)
+ self.write_message(k, metadata)
+
+ def submit_path(self, path, tags=None, ignore_dirs=None, cb=None, num_cb=0, status=False, prefix='/'):
+ path = os.path.expanduser(path)
+ path = os.path.expandvars(path)
+ path = os.path.abspath(path)
+ total = 0
+ metadata = {}
+ if tags:
+ metadata['Tags'] = tags
+ l = []
+ for t in time.gmtime():
+ l.append(str(t))
+ metadata['Batch'] = '_'.join(l)
+ if self.output_domain:
+ self.output_domain.put_attributes(metadata['Batch'], {'type' : 'Batch'})
+ if os.path.isdir(path):
+ for root, dirs, files in os.walk(path):
+ if ignore_dirs:
+ for ignore in ignore_dirs:
+ if ignore in dirs:
+ dirs.remove(ignore)
+ for file in files:
+ fullpath = os.path.join(root, file)
+ if status:
+ print 'Submitting %s' % fullpath
+ self.submit_file(fullpath, metadata, cb, num_cb, prefix)
+ total += 1
+ elif os.path.isfile(path):
+ self.submit_file(path, metadata, cb, num_cb)
+ total += 1
+ else:
+ print 'problem with %s' % path
+ return (metadata['Batch'], total)
diff --git a/boto/ses/__init__.py b/boto/ses/__init__.py
new file mode 100644
index 0000000..167080b
--- /dev/null
+++ b/boto/ses/__init__.py
@@ -0,0 +1,24 @@
+# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2011 Harry Marr http://hmarr.com/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from connection import SESConnection
+
diff --git a/boto/ses/connection.py b/boto/ses/connection.py
new file mode 100644
index 0000000..57a2c7e
--- /dev/null
+++ b/boto/ses/connection.py
@@ -0,0 +1,248 @@
+# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2011 Harry Marr http://hmarr.com/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.connection import AWSAuthConnection
+from boto.exception import BotoServerError
+import boto
+import boto.jsonresponse
+
+import urllib
+import base64
+
+
+class SESConnection(AWSAuthConnection):
+
+ ResponseError = BotoServerError
+ DefaultHost = 'email.us-east-1.amazonaws.com'
+ APIVersion = '2010-12-01'
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ port=None, proxy=None, proxy_port=None,
+ host=DefaultHost, debug=0):
+ AWSAuthConnection.__init__(self, host, aws_access_key_id,
+ aws_secret_access_key, True, port, proxy,
+ proxy_port, debug=debug)
+
+ def _required_auth_capability(self):
+ return ['ses']
+
+ def _build_list_params(self, params, items, label):
+ """Add an AWS API-compatible parameter list to a dictionary.
+
+ :type params: dict
+ :param params: The parameter dictionary
+
+ :type items: list
+ :param items: Items to be included in the list
+
+ :type label: string
+ :param label: The parameter list's name
+ """
+ if isinstance(items, str):
+ items = [items]
+ for i in range(1, len(items) + 1):
+ params['%s.%d' % (label, i)] = items[i - 1]
+
+
+ def _make_request(self, action, params=None):
+ """Make a call to the SES API.
+
+ :type action: string
+ :param action: The API method to use (e.g. SendRawEmail)
+
+ :type params: dict
+ :param params: Parameters that will be sent as POST data with the API
+ call.
+ """
+ headers = {'Content-Type': 'application/x-www-form-urlencoded'}
+ params = params or {}
+ params['Action'] = action
+ response = super(SESConnection, self).make_request(
+ 'POST',
+ '/',
+ headers=headers,
+ data=urllib.urlencode(params)
+ )
+ body = response.read()
+ if response.status == 200:
+ list_markers = ('VerifiedEmailAddresses', 'SendDataPoints')
+ e = boto.jsonresponse.Element(list_marker=list_markers)
+ h = boto.jsonresponse.XmlHandler(e, None)
+ h.parse(body)
+ return e
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
+
+ def send_email(self, source, subject, body, to_addresses, cc_addresses=None,
+ bcc_addresses=None, format='text'):
+ """Composes an email message based on input data, and then immediately
+ queues the message for sending.
+
+ :type source: string
+ :param source: The sender's email address.
+
+ :type subject: string
+ :param subject: The subject of the message: A short summary of the
+ content, which will appear in the recipient's inbox.
+
+ :type body: string
+ :param body: The message body.
+
+ :type to_addresses: list of strings or string
+ :param to_addresses: The To: field(s) of the message.
+
+ :type cc_addresses: list of strings or string
+ :param cc_addresses: The CC: field(s) of the message.
+
+ :type bcc_addresses: list of strings or string
+ :param bcc_addresses: The BCC: field(s) of the message.
+
+ :type format: string
+ :param format: The format of the message's body, must be either "text"
+ or "html".
+
+ """
+ params = {
+ 'Source': source,
+ 'Message.Subject.Data': subject,
+ }
+
+ format = format.lower().strip()
+ if format == 'html':
+ params['Message.Body.Html.Data'] = body
+ elif format == 'text':
+ params['Message.Body.Text.Data'] = body
+ else:
+ raise ValueError("'format' argument must be 'text' or 'html'")
+
+ self._build_list_params(params, to_addresses,
+ 'Destination.ToAddresses.member')
+ if cc_addresses:
+ self._build_list_params(params, cc_addresses,
+ 'Destination.CcAddresses.member')
+
+ if bcc_addresses:
+ self._build_list_params(params, bcc_addresses,
+ 'Destination.BccAddresses.member')
+
+ return self._make_request('SendEmail', params)
+
+ def send_raw_email(self, source, raw_message, destinations=None):
+ """Sends an email message, with header and content specified by the
+ client. The SendRawEmail action is useful for sending multipart MIME
+ emails, with attachments or inline content. The raw text of the message
+ must comply with Internet email standards; otherwise, the message
+ cannot be sent.
+
+ :type source: string
+ :param source: The sender's email address.
+
+ :type raw_message: string
+ :param raw_message: The raw text of the message. The client is
+ responsible for ensuring the following:
+
+ - Message must contain a header and a body, separated by a blank line.
+ - All required header fields must be present.
+ - Each part of a multipart MIME message must be formatted properly.
+ - MIME content types must be among those supported by Amazon SES.
+ Refer to the Amazon SES Developer Guide for more details.
+ - Content must be base64-encoded, if MIME requires it.
+
+ :type destinations: list of strings or string
+ :param destinations: A list of destinations for the message.
+
+ """
+ params = {
+ 'Source': source,
+ 'RawMessage.Data': base64.b64encode(raw_message),
+ }
+
+ self._build_list_params(params, destinations,
+ 'Destinations.member')
+
+ return self._make_request('SendRawEmail', params)
+
+ def list_verified_email_addresses(self):
+ """Fetch a list of the email addresses that have been verified.
+
+ :rtype: dict
+ :returns: A ListVerifiedEmailAddressesResponse structure. Note that
+ keys must be unicode strings.
+ """
+ return self._make_request('ListVerifiedEmailAddresses')
+
+ def get_send_quota(self):
+ """Fetches the user's current activity limits.
+
+ :rtype: dict
+ :returns: A GetSendQuotaResponse structure. Note that keys must be
+ unicode strings.
+ """
+ return self._make_request('GetSendQuota')
+
+ def get_send_statistics(self):
+ """Fetches the user's sending statistics. The result is a list of data
+ points, representing the last two weeks of sending activity.
+
+ Each data point in the list contains statistics for a 15-minute
+ interval.
+
+ :rtype: dict
+ :returns: A GetSendStatisticsResponse structure. Note that keys must be
+ unicode strings.
+ """
+ return self._make_request('GetSendStatistics')
+
+ def delete_verified_email_address(self, email_address):
+ """Deletes the specified email address from the list of verified
+ addresses.
+
+ :type email_adddress: string
+ :param email_address: The email address to be removed from the list of
+ verified addreses.
+
+ :rtype: dict
+ :returns: A DeleteVerifiedEmailAddressResponse structure. Note that
+ keys must be unicode strings.
+ """
+ return self._make_request('DeleteVerifiedEmailAddress', {
+ 'EmailAddress': email_address,
+ })
+
+ def verify_email_address(self, email_address):
+ """Verifies an email address. This action causes a confirmation email
+ message to be sent to the specified address.
+
+ :type email_adddress: string
+ :param email_address: The email address to be verified.
+
+ :rtype: dict
+ :returns: A VerifyEmailAddressResponse structure. Note that keys must
+ be unicode strings.
+ """
+ return self._make_request('VerifyEmailAddress', {
+ 'EmailAddress': email_address,
+ })
+
diff --git a/boto/sns/__init__.py b/boto/sns/__init__.py
new file mode 100644
index 0000000..9c5a7d7
--- /dev/null
+++ b/boto/sns/__init__.py
@@ -0,0 +1,25 @@
+# Copyright (c) 2010-2011 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+# this is here for backward compatibility
+# originally, the SNSConnection class was defined here
+from connection import SNSConnection
diff --git a/boto/sns/connection.py b/boto/sns/connection.py
new file mode 100644
index 0000000..2a49adb
--- /dev/null
+++ b/boto/sns/connection.py
@@ -0,0 +1,398 @@
+# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.connection import AWSQueryConnection
+from boto.sdb.regioninfo import SDBRegionInfo
+import boto
+import uuid
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+#boto.set_stream_logger('sns')
+
+class SNSConnection(AWSQueryConnection):
+
+ DefaultRegionName = 'us-east-1'
+ DefaultRegionEndpoint = 'sns.us-east-1.amazonaws.com'
+ APIVersion = '2010-03-31'
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, debug=0,
+ https_connection_factory=None, region=None, path='/', converter=None):
+ if not region:
+ region = SDBRegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint)
+ self.region = region
+ AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key,
+ is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
+ self.region.endpoint, debug, https_connection_factory, path)
+
+ def _required_auth_capability(self):
+ return ['sns']
+
+ def get_all_topics(self, next_token=None):
+ """
+ :type next_token: string
+ :param next_token: Token returned by the previous call to
+ this method.
+
+ """
+ params = {'ContentType' : 'JSON'}
+ if next_token:
+ params['NextToken'] = next_token
+ response = self.make_request('ListTopics', params, '/', 'GET')
+ body = response.read()
+ if response.status == 200:
+ return json.loads(body)
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
+ def get_topic_attributes(self, topic):
+ """
+ Get attributes of a Topic
+
+ :type topic: string
+ :param topic: The ARN of the topic.
+
+ """
+ params = {'ContentType' : 'JSON',
+ 'TopicArn' : topic}
+ response = self.make_request('GetTopicAttributes', params, '/', 'GET')
+ body = response.read()
+ if response.status == 200:
+ return json.loads(body)
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
+ def add_permission(self, topic, label, account_ids, actions):
+ """
+ Adds a statement to a topic's access control policy, granting
+ access for the specified AWS accounts to the specified actions.
+
+ :type topic: string
+ :param topic: The ARN of the topic.
+
+ :type label: string
+ :param label: A unique identifier for the new policy statement.
+
+ :type account_ids: list of strings
+ :param account_ids: The AWS account ids of the users who will be
+ give access to the specified actions.
+
+ :type actions: list of strings
+ :param actions: The actions you want to allow for each of the
+ specified principal(s).
+
+ """
+ params = {'ContentType' : 'JSON',
+ 'TopicArn' : topic,
+ 'Label' : label}
+ self.build_list_params(params, account_ids, 'AWSAccountId')
+ self.build_list_params(params, actions, 'ActionName')
+ response = self.make_request('AddPermission', params, '/', 'GET')
+ body = response.read()
+ if response.status == 200:
+ return json.loads(body)
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
+ def remove_permission(self, topic, label):
+ """
+ Removes a statement from a topic's access control policy.
+
+ :type topic: string
+ :param topic: The ARN of the topic.
+
+ :type label: string
+ :param label: A unique identifier for the policy statement
+ to be removed.
+
+ """
+ params = {'ContentType' : 'JSON',
+ 'TopicArn' : topic,
+ 'Label' : label}
+ response = self.make_request('RemovePermission', params, '/', 'GET')
+ body = response.read()
+ if response.status == 200:
+ return json.loads(body)
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
+ def create_topic(self, topic):
+ """
+ Create a new Topic.
+
+ :type topic: string
+ :param topic: The name of the new topic.
+
+ """
+ params = {'ContentType' : 'JSON',
+ 'Name' : topic}
+ response = self.make_request('CreateTopic', params, '/', 'GET')
+ body = response.read()
+ if response.status == 200:
+ return json.loads(body)
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
+ def delete_topic(self, topic):
+ """
+ Delete an existing topic
+
+ :type topic: string
+ :param topic: The ARN of the topic
+
+ """
+ params = {'ContentType' : 'JSON',
+ 'TopicArn' : topic}
+ response = self.make_request('DeleteTopic', params, '/', 'GET')
+ body = response.read()
+ if response.status == 200:
+ return json.loads(body)
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
+
+
+ def publish(self, topic, message, subject=None):
+ """
+ Get properties of a Topic
+
+ :type topic: string
+ :param topic: The ARN of the new topic.
+
+ :type message: string
+ :param message: The message you want to send to the topic.
+ Messages must be UTF-8 encoded strings and
+ be at most 4KB in size.
+
+ :type subject: string
+ :param subject: Optional parameter to be used as the "Subject"
+ line of the email notifications.
+
+ """
+ params = {'ContentType' : 'JSON',
+ 'TopicArn' : topic,
+ 'Message' : message}
+ if subject:
+ params['Subject'] = subject
+ response = self.make_request('Publish', params, '/', 'GET')
+ body = response.read()
+ if response.status == 200:
+ return json.loads(body)
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
+ def subscribe(self, topic, protocol, endpoint):
+ """
+ Subscribe to a Topic.
+
+ :type topic: string
+ :param topic: The name of the new topic.
+
+ :type protocol: string
+ :param protocol: The protocol used to communicate with
+ the subscriber. Current choices are:
+ email|email-json|http|https|sqs
+
+ :type endpoint: string
+ :param endpoint: The location of the endpoint for
+ the subscriber.
+ * For email, this would be a valid email address
+ * For email-json, this would be a valid email address
+ * For http, this would be a URL beginning with http
+ * For https, this would be a URL beginning with https
+ * For sqs, this would be the ARN of an SQS Queue
+
+ :rtype: :class:`boto.sdb.domain.Domain` object
+ :return: The newly created domain
+ """
+ params = {'ContentType' : 'JSON',
+ 'TopicArn' : topic,
+ 'Protocol' : protocol,
+ 'Endpoint' : endpoint}
+ response = self.make_request('Subscribe', params, '/', 'GET')
+ body = response.read()
+ if response.status == 200:
+ return json.loads(body)
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
+ def subscribe_sqs_queue(self, topic, queue):
+ """
+ Subscribe an SQS queue to a topic.
+
+ This is convenience method that handles most of the complexity involved
+ in using ans SQS queue as an endpoint for an SNS topic. To achieve this
+ the following operations are performed:
+
+ * The correct ARN is constructed for the SQS queue and that ARN is
+ then subscribed to the topic.
+ * A JSON policy document is contructed that grants permission to
+ the SNS topic to send messages to the SQS queue.
+ * This JSON policy is then associated with the SQS queue using
+ the queue's set_attribute method. If the queue already has
+ a policy associated with it, this process will add a Statement to
+ that policy. If no policy exists, a new policy will be created.
+
+ :type topic: string
+ :param topic: The name of the new topic.
+
+ :type queue: A boto Queue object
+ :param queue: The queue you wish to subscribe to the SNS Topic.
+ """
+ t = queue.id.split('/')
+ q_arn = 'arn:aws:sqs:%s:%s:%s' % (queue.connection.region.name,
+ t[1], t[2])
+ resp = self.subscribe(topic, 'sqs', q_arn)
+ policy = queue.get_attributes('Policy')
+ if 'Version' not in policy:
+ policy['Version'] = '2008-10-17'
+ if 'Statement' not in policy:
+ policy['Statement'] = []
+ statement = {'Action' : 'SQS:SendMessage',
+ 'Effect' : 'Allow',
+ 'Principal' : {'AWS' : '*'},
+ 'Resource' : q_arn,
+ 'Sid' : str(uuid.uuid4()),
+ 'Condition' : {'StringLike' : {'aws:SourceArn' : topic}}}
+ policy['Statement'].append(statement)
+ queue.set_attribute('Policy', json.dumps(policy))
+ return resp
+
+ def confirm_subscription(self, topic, token,
+ authenticate_on_unsubscribe=False):
+ """
+ Get properties of a Topic
+
+ :type topic: string
+ :param topic: The ARN of the new topic.
+
+ :type token: string
+ :param token: Short-lived token sent to and endpoint during
+ the Subscribe operation.
+
+ :type authenticate_on_unsubscribe: bool
+ :param authenticate_on_unsubscribe: Optional parameter indicating
+ that you wish to disable
+ unauthenticated unsubscription
+ of the subscription.
+
+ """
+ params = {'ContentType' : 'JSON',
+ 'TopicArn' : topic,
+ 'Token' : token}
+ if authenticate_on_unsubscribe:
+ params['AuthenticateOnUnsubscribe'] = 'true'
+ response = self.make_request('ConfirmSubscription', params, '/', 'GET')
+ body = response.read()
+ if response.status == 200:
+ return json.loads(body)
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
+ def unsubscribe(self, subscription):
+ """
+ Allows endpoint owner to delete subscription.
+ Confirmation message will be delivered.
+
+ :type subscription: string
+ :param subscription: The ARN of the subscription to be deleted.
+
+ """
+ params = {'ContentType' : 'JSON',
+ 'SubscriptionArn' : subscription}
+ response = self.make_request('Unsubscribe', params, '/', 'GET')
+ body = response.read()
+ if response.status == 200:
+ return json.loads(body)
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
+ def get_all_subscriptions(self, next_token=None):
+ """
+ Get list of all subscriptions.
+
+ :type next_token: string
+ :param next_token: Token returned by the previous call to
+ this method.
+
+ """
+ params = {'ContentType' : 'JSON'}
+ if next_token:
+ params['NextToken'] = next_token
+ response = self.make_request('ListSubscriptions', params, '/', 'GET')
+ body = response.read()
+ if response.status == 200:
+ return json.loads(body)
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
+ def get_all_subscriptions_by_topic(self, topic, next_token=None):
+ """
+ Get list of all subscriptions to a specific topic.
+
+ :type topic: string
+ :param topic: The ARN of the topic for which you wish to
+ find subscriptions.
+
+ :type next_token: string
+ :param next_token: Token returned by the previous call to
+ this method.
+
+ """
+ params = {'ContentType' : 'JSON',
+ 'TopicArn' : topic}
+ if next_token:
+ params['NextToken'] = next_token
+ response = self.make_request('ListSubscriptions', params, '/', 'GET')
+ body = response.read()
+ if response.status == 200:
+ return json.loads(body)
+ else:
+ boto.log.error('%s %s' % (response.status, response.reason))
+ boto.log.error('%s' % body)
+ raise self.ResponseError(response.status, response.reason, body)
+
diff --git a/boto/sqs/__init__.py b/boto/sqs/__init__.py
new file mode 100644
index 0000000..463c42c
--- /dev/null
+++ b/boto/sqs/__init__.py
@@ -0,0 +1,46 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from regioninfo import SQSRegionInfo
+
+def regions():
+ """
+ Get all available regions for the SQS service.
+
+ :rtype: list
+ :return: A list of :class:`boto.ec2.regioninfo.RegionInfo`
+ """
+ return [SQSRegionInfo(name='us-east-1',
+ endpoint='queue.amazonaws.com'),
+ SQSRegionInfo(name='eu-west-1',
+ endpoint='eu-west-1.queue.amazonaws.com'),
+ SQSRegionInfo(name='us-west-1',
+ endpoint='us-west-1.queue.amazonaws.com'),
+ SQSRegionInfo(name='ap-southeast-1',
+ endpoint='ap-southeast-1.queue.amazonaws.com')
+ ]
+
+def connect_to_region(region_name):
+ for region in regions():
+ if region.name == region_name:
+ return region.connect()
+ return None
diff --git a/boto/sqs/attributes.py b/boto/sqs/attributes.py
new file mode 100644
index 0000000..26c7204
--- /dev/null
+++ b/boto/sqs/attributes.py
@@ -0,0 +1,46 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents an SQS Attribute Name/Value set
+"""
+
+class Attributes(dict):
+
+ def __init__(self, parent):
+ self.parent = parent
+ self.current_key = None
+ self.current_value = None
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'Attribute':
+ self[self.current_key] = self.current_value
+ elif name == 'Name':
+ self.current_key = value
+ elif name == 'Value':
+ self.current_value = value
+ else:
+ setattr(self, name, value)
+
+
diff --git a/boto/sqs/connection.py b/boto/sqs/connection.py
new file mode 100644
index 0000000..240fc72
--- /dev/null
+++ b/boto/sqs/connection.py
@@ -0,0 +1,288 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.connection import AWSQueryConnection
+from boto.sqs.regioninfo import SQSRegionInfo
+from boto.sqs.queue import Queue
+from boto.sqs.message import Message
+from boto.sqs.attributes import Attributes
+from boto.exception import SQSError
+
+
+class SQSConnection(AWSQueryConnection):
+ """
+ A Connection to the SQS Service.
+ """
+ DefaultRegionName = 'us-east-1'
+ DefaultRegionEndpoint = 'queue.amazonaws.com'
+ APIVersion = '2009-02-01'
+ DefaultContentType = 'text/plain'
+ ResponseError = SQSError
+
+ def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
+ is_secure=True, port=None, proxy=None, proxy_port=None,
+ proxy_user=None, proxy_pass=None, debug=0,
+ https_connection_factory=None, region=None, path='/'):
+ if not region:
+ region = SQSRegionInfo(self, self.DefaultRegionName, self.DefaultRegionEndpoint)
+ self.region = region
+ AWSQueryConnection.__init__(self, aws_access_key_id, aws_secret_access_key,
+ is_secure, port, proxy, proxy_port, proxy_user, proxy_pass,
+ self.region.endpoint, debug, https_connection_factory, path)
+
+ def _required_auth_capability(self):
+ return ['sqs']
+
+ def create_queue(self, queue_name, visibility_timeout=None):
+ """
+ Create an SQS Queue.
+
+ :type queue_name: str or unicode
+ :param queue_name: The name of the new queue. Names are scoped to an account and need to
+ be unique within that account. Calling this method on an existing
+ queue name will not return an error from SQS unless the value for
+ visibility_timeout is different than the value of the existing queue
+ of that name. This is still an expensive operation, though, and not
+ the preferred way to check for the existence of a queue. See the
+ :func:`boto.sqs.connection.SQSConnection.lookup` method.
+
+ :type visibility_timeout: int
+ :param visibility_timeout: The default visibility timeout for all messages written in the
+ queue. This can be overridden on a per-message.
+
+ :rtype: :class:`boto.sqs.queue.Queue`
+ :return: The newly created queue.
+
+ """
+ params = {'QueueName': queue_name}
+ if visibility_timeout:
+ params['DefaultVisibilityTimeout'] = '%d' % (visibility_timeout,)
+ return self.get_object('CreateQueue', params, Queue)
+
+ def delete_queue(self, queue, force_deletion=False):
+ """
+ Delete an SQS Queue.
+
+ :type queue: A Queue object
+ :param queue: The SQS queue to be deleted
+
+ :type force_deletion: Boolean
+ :param force_deletion: Normally, SQS will not delete a queue that contains messages.
+ However, if the force_deletion argument is True, the
+ queue will be deleted regardless of whether there are messages in
+ the queue or not. USE WITH CAUTION. This will delete all
+ messages in the queue as well.
+
+ :rtype: bool
+ :return: True if the command succeeded, False otherwise
+ """
+ return self.get_status('DeleteQueue', None, queue.id)
+
+ def get_queue_attributes(self, queue, attribute='All'):
+ """
+ Gets one or all attributes of a Queue
+
+ :type queue: A Queue object
+ :param queue: The SQS queue to be deleted
+
+ :type attribute: str
+ :type attribute: The specific attribute requested. If not supplied, the default
+ is to return all attributes. Valid attributes are:
+ ApproximateNumberOfMessages,
+ ApproximateNumberOfMessagesNotVisible,
+ VisibilityTimeout,
+ CreatedTimestamp,
+ LastModifiedTimestamp,
+ Policy
+
+ :rtype: :class:`boto.sqs.attributes.Attributes`
+ :return: An Attributes object containing request value(s).
+ """
+ params = {'AttributeName' : attribute}
+ return self.get_object('GetQueueAttributes', params, Attributes, queue.id)
+
+ def set_queue_attribute(self, queue, attribute, value):
+ params = {'Attribute.Name' : attribute, 'Attribute.Value' : value}
+ return self.get_status('SetQueueAttributes', params, queue.id)
+
+ def receive_message(self, queue, number_messages=1, visibility_timeout=None,
+ attributes=None):
+ """
+ Read messages from an SQS Queue.
+
+ :type queue: A Queue object
+ :param queue: The Queue from which messages are read.
+
+ :type number_messages: int
+ :param number_messages: The maximum number of messages to read (default=1)
+
+ :type visibility_timeout: int
+ :param visibility_timeout: The number of seconds the message should remain invisible
+ to other queue readers (default=None which uses the Queues default)
+
+ :type attributes: str
+ :param attributes: The name of additional attribute to return with response
+ or All if you want all attributes. The default is to
+ return no additional attributes. Valid values:
+ All
+ SenderId
+ SentTimestamp
+ ApproximateReceiveCount
+ ApproximateFirstReceiveTimestamp
+
+ :rtype: list
+ :return: A list of :class:`boto.sqs.message.Message` objects.
+ """
+ params = {'MaxNumberOfMessages' : number_messages}
+ if visibility_timeout:
+ params['VisibilityTimeout'] = visibility_timeout
+ if attributes:
+ self.build_list_params(params, attributes, 'AttributeName')
+ return self.get_list('ReceiveMessage', params, [('Message', queue.message_class)],
+ queue.id, queue)
+
+ def delete_message(self, queue, message):
+ """
+ Delete a message from a queue.
+
+ :type queue: A :class:`boto.sqs.queue.Queue` object
+ :param queue: The Queue from which messages are read.
+
+ :type message: A :class:`boto.sqs.message.Message` object
+ :param message: The Message to be deleted
+
+ :rtype: bool
+ :return: True if successful, False otherwise.
+ """
+ params = {'ReceiptHandle' : message.receipt_handle}
+ return self.get_status('DeleteMessage', params, queue.id)
+
+ def delete_message_from_handle(self, queue, receipt_handle):
+ """
+ Delete a message from a queue, given a receipt handle.
+
+ :type queue: A :class:`boto.sqs.queue.Queue` object
+ :param queue: The Queue from which messages are read.
+
+ :type receipt_handle: str
+ :param receipt_handle: The receipt handle for the message
+
+ :rtype: bool
+ :return: True if successful, False otherwise.
+ """
+ params = {'ReceiptHandle' : receipt_handle}
+ return self.get_status('DeleteMessage', params, queue.id)
+
+ def send_message(self, queue, message_content):
+ params = {'MessageBody' : message_content}
+ return self.get_object('SendMessage', params, Message, queue.id, verb='POST')
+
+ def change_message_visibility(self, queue, receipt_handle, visibility_timeout):
+ """
+ Extends the read lock timeout for the specified message from the specified queue
+ to the specified value.
+
+ :type queue: A :class:`boto.sqs.queue.Queue` object
+ :param queue: The Queue from which messages are read.
+
+ :type receipt_handle: str
+ :param queue: The receipt handle associated with the message whose
+ visibility timeout will be changed.
+
+ :type visibility_timeout: int
+ :param visibility_timeout: The new value of the message's visibility timeout
+ in seconds.
+ """
+ params = {'ReceiptHandle' : receipt_handle,
+ 'VisibilityTimeout' : visibility_timeout}
+ return self.get_status('ChangeMessageVisibility', params, queue.id)
+
+ def get_all_queues(self, prefix=''):
+ params = {}
+ if prefix:
+ params['QueueNamePrefix'] = prefix
+ return self.get_list('ListQueues', params, [('QueueUrl', Queue)])
+
+ def get_queue(self, queue_name):
+ rs = self.get_all_queues(queue_name)
+ for q in rs:
+ if q.url.endswith(queue_name):
+ return q
+ return None
+
+ lookup = get_queue
+
+ #
+ # Permissions methods
+ #
+
+ def add_permission(self, queue, label, aws_account_id, action_name):
+ """
+ Add a permission to a queue.
+
+ :type queue: :class:`boto.sqs.queue.Queue`
+ :param queue: The queue object
+
+ :type label: str or unicode
+ :param label: A unique identification of the permission you are setting.
+ Maximum of 80 characters ``[0-9a-zA-Z_-]``
+ Example, AliceSendMessage
+
+ :type aws_account_id: str or unicode
+ :param principal_id: The AWS account number of the principal who will be given
+ permission. The principal must have an AWS account, but
+ does not need to be signed up for Amazon SQS. For information
+ about locating the AWS account identification.
+
+ :type action_name: str or unicode
+ :param action_name: The action. Valid choices are:
+ \*|SendMessage|ReceiveMessage|DeleteMessage|
+ ChangeMessageVisibility|GetQueueAttributes
+
+ :rtype: bool
+ :return: True if successful, False otherwise.
+
+ """
+ params = {'Label': label,
+ 'AWSAccountId' : aws_account_id,
+ 'ActionName' : action_name}
+ return self.get_status('AddPermission', params, queue.id)
+
+ def remove_permission(self, queue, label):
+ """
+ Remove a permission from a queue.
+
+ :type queue: :class:`boto.sqs.queue.Queue`
+ :param queue: The queue object
+
+ :type label: str or unicode
+ :param label: The unique label associated with the permission being removed.
+
+ :rtype: bool
+ :return: True if successful, False otherwise.
+ """
+ params = {'Label': label}
+ return self.get_status('RemovePermission', params, queue.id)
+
+
+
+
+
diff --git a/boto/sqs/jsonmessage.py b/boto/sqs/jsonmessage.py
new file mode 100644
index 0000000..24a3be2
--- /dev/null
+++ b/boto/sqs/jsonmessage.py
@@ -0,0 +1,45 @@
+# Copyright (c) 2006-2008 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+from boto.sqs.message import MHMessage
+from boto.exception import SQSDecodeError
+import base64
+try:
+ import json
+except ImportError:
+ import simplejson as json
+
+class JSONMessage(MHMessage):
+ """
+ Acts like a dictionary but encodes it's data as a Base64 encoded JSON payload.
+ """
+
+ def decode(self, value):
+ try:
+ value = base64.b64decode(value)
+ value = json.loads(value)
+ except:
+ raise SQSDecodeError('Unable to decode message', self)
+ return value
+
+ def encode(self, value):
+ value = json.dumps(value)
+ return base64.b64encode(value)
diff --git a/boto/sqs/message.py b/boto/sqs/message.py
new file mode 100644
index 0000000..8fabd47
--- /dev/null
+++ b/boto/sqs/message.py
@@ -0,0 +1,251 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+SQS Message
+
+A Message represents the data stored in an SQS queue. The rules for what is allowed within an SQS
+Message are here:
+
+ http://docs.amazonwebservices.com/AWSSimpleQueueService/2008-01-01/SQSDeveloperGuide/Query_QuerySendMessage.html
+
+So, at it's simplest level a Message just needs to allow a developer to store bytes in it and get the bytes
+back out. However, to allow messages to have richer semantics, the Message class must support the
+following interfaces:
+
+The constructor for the Message class must accept a keyword parameter "queue" which is an instance of a
+boto Queue object and represents the queue that the message will be stored in. The default value for
+this parameter is None.
+
+The constructor for the Message class must accept a keyword parameter "body" which represents the
+content or body of the message. The format of this parameter will depend on the behavior of the
+particular Message subclass. For example, if the Message subclass provides dictionary-like behavior to the
+user the body passed to the constructor should be a dict-like object that can be used to populate
+the initial state of the message.
+
+The Message class must provide an encode method that accepts a value of the same type as the body
+parameter of the constructor and returns a string of characters that are able to be stored in an
+SQS message body (see rules above).
+
+The Message class must provide a decode method that accepts a string of characters that can be
+stored (and probably were stored!) in an SQS message and return an object of a type that is consistent
+with the "body" parameter accepted on the class constructor.
+
+The Message class must provide a __len__ method that will return the size of the encoded message
+that would be stored in SQS based on the current state of the Message object.
+
+The Message class must provide a get_body method that will return the body of the message in the
+same format accepted in the constructor of the class.
+
+The Message class must provide a set_body method that accepts a message body in the same format
+accepted by the constructor of the class. This method should alter to the internal state of the
+Message object to reflect the state represented in the message body parameter.
+
+The Message class must provide a get_body_encoded method that returns the current body of the message
+in the format in which it would be stored in SQS.
+"""
+
+import base64
+import StringIO
+from boto.sqs.attributes import Attributes
+from boto.exception import SQSDecodeError
+
+class RawMessage:
+ """
+ Base class for SQS messages. RawMessage does not encode the message
+ in any way. Whatever you store in the body of the message is what
+ will be written to SQS and whatever is returned from SQS is stored
+ directly into the body of the message.
+ """
+
+ def __init__(self, queue=None, body=''):
+ self.queue = queue
+ self.set_body(body)
+ self.id = None
+ self.receipt_handle = None
+ self.md5 = None
+ self.attributes = Attributes(self)
+
+ def __len__(self):
+ return len(self.encode(self._body))
+
+ def startElement(self, name, attrs, connection):
+ if name == 'Attribute':
+ return self.attributes
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'Body':
+ self.set_body(self.decode(value))
+ elif name == 'MessageId':
+ self.id = value
+ elif name == 'ReceiptHandle':
+ self.receipt_handle = value
+ elif name == 'MD5OfMessageBody':
+ self.md5 = value
+ else:
+ setattr(self, name, value)
+
+ def encode(self, value):
+ """Transform body object into serialized byte array format."""
+ return value
+
+ def decode(self, value):
+ """Transform seralized byte array into any object."""
+ return value
+
+ def set_body(self, body):
+ """Override the current body for this object, using decoded format."""
+ self._body = body
+
+ def get_body(self):
+ return self._body
+
+ def get_body_encoded(self):
+ """
+ This method is really a semi-private method used by the Queue.write
+ method when writing the contents of the message to SQS.
+ You probably shouldn't need to call this method in the normal course of events.
+ """
+ return self.encode(self.get_body())
+
+ def delete(self):
+ if self.queue:
+ return self.queue.delete_message(self)
+
+ def change_visibility(self, visibility_timeout):
+ if self.queue:
+ self.queue.connection.change_message_visibility(self.queue,
+ self.receipt_handle,
+ visibility_timeout)
+
+class Message(RawMessage):
+ """
+ The default Message class used for SQS queues. This class automatically
+ encodes/decodes the message body using Base64 encoding to avoid any
+ illegal characters in the message body. See:
+
+ http://developer.amazonwebservices.com/connect/thread.jspa?messageID=49680%EC%88%90
+
+ for details on why this is a good idea. The encode/decode is meant to
+ be transparent to the end-user.
+ """
+
+ def encode(self, value):
+ return base64.b64encode(value)
+
+ def decode(self, value):
+ try:
+ value = base64.b64decode(value)
+ except:
+ raise SQSDecodeError('Unable to decode message', self)
+ return value
+
+class MHMessage(Message):
+ """
+ The MHMessage class provides a message that provides RFC821-like
+ headers like this:
+
+ HeaderName: HeaderValue
+
+ The encoding/decoding of this is handled automatically and after
+ the message body has been read, the message instance can be treated
+ like a mapping object, i.e. m['HeaderName'] would return 'HeaderValue'.
+ """
+
+ def __init__(self, queue=None, body=None, xml_attrs=None):
+ if body == None or body == '':
+ body = {}
+ Message.__init__(self, queue, body)
+
+ def decode(self, value):
+ try:
+ msg = {}
+ fp = StringIO.StringIO(value)
+ line = fp.readline()
+ while line:
+ delim = line.find(':')
+ key = line[0:delim]
+ value = line[delim+1:].strip()
+ msg[key.strip()] = value.strip()
+ line = fp.readline()
+ except:
+ raise SQSDecodeError('Unable to decode message', self)
+ return msg
+
+ def encode(self, value):
+ s = ''
+ for item in value.items():
+ s = s + '%s: %s\n' % (item[0], item[1])
+ return s
+
+ def __getitem__(self, key):
+ if self._body.has_key(key):
+ return self._body[key]
+ else:
+ raise KeyError(key)
+
+ def __setitem__(self, key, value):
+ self._body[key] = value
+ self.set_body(self._body)
+
+ def keys(self):
+ return self._body.keys()
+
+ def values(self):
+ return self._body.values()
+
+ def items(self):
+ return self._body.items()
+
+ def has_key(self, key):
+ return self._body.has_key(key)
+
+ def update(self, d):
+ self._body.update(d)
+ self.set_body(self._body)
+
+ def get(self, key, default=None):
+ return self._body.get(key, default)
+
+class EncodedMHMessage(MHMessage):
+ """
+ The EncodedMHMessage class provides a message that provides RFC821-like
+ headers like this:
+
+ HeaderName: HeaderValue
+
+ This variation encodes/decodes the body of the message in base64 automatically.
+ The message instance can be treated like a mapping object,
+ i.e. m['HeaderName'] would return 'HeaderValue'.
+ """
+
+ def decode(self, value):
+ try:
+ value = base64.b64decode(value)
+ except:
+ raise SQSDecodeError('Unable to decode message', self)
+ return MHMessage.decode(self, value)
+
+ def encode(self, value):
+ value = MHMessage.encode(self, value)
+ return base64.b64encode(value)
+
diff --git a/boto/sqs/queue.py b/boto/sqs/queue.py
new file mode 100644
index 0000000..9965e43
--- /dev/null
+++ b/boto/sqs/queue.py
@@ -0,0 +1,414 @@
+# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents an SQS Queue
+"""
+
+import urlparse
+from boto.sqs.message import Message
+
+
+class Queue:
+
+ def __init__(self, connection=None, url=None, message_class=Message):
+ self.connection = connection
+ self.url = url
+ self.message_class = message_class
+ self.visibility_timeout = None
+
+ def _id(self):
+ if self.url:
+ val = urlparse.urlparse(self.url)[2]
+ else:
+ val = self.url
+ return val
+ id = property(_id)
+
+ def _name(self):
+ if self.url:
+ val = urlparse.urlparse(self.url)[2].split('/')[2]
+ else:
+ val = self.url
+ return val
+ name = property(_name)
+
+ def startElement(self, name, attrs, connection):
+ return None
+
+ def endElement(self, name, value, connection):
+ if name == 'QueueUrl':
+ self.url = value
+ elif name == 'VisibilityTimeout':
+ self.visibility_timeout = int(value)
+ else:
+ setattr(self, name, value)
+
+ def set_message_class(self, message_class):
+ """
+ Set the message class that should be used when instantiating messages read
+ from the queue. By default, the class boto.sqs.message.Message is used but
+ this can be overriden with any class that behaves like a message.
+
+ :type message_class: Message-like class
+ :param message_class: The new Message class
+ """
+ self.message_class = message_class
+
+ def get_attributes(self, attributes='All'):
+ """
+ Retrieves attributes about this queue object and returns
+ them in an Attribute instance (subclass of a Dictionary).
+
+ :type attributes: string
+ :param attributes: String containing one of:
+ ApproximateNumberOfMessages,
+ ApproximateNumberOfMessagesNotVisible,
+ VisibilityTimeout,
+ CreatedTimestamp,
+ LastModifiedTimestamp,
+ Policy
+ :rtype: Attribute object
+ :return: An Attribute object which is a mapping type holding the
+ requested name/value pairs
+ """
+ return self.connection.get_queue_attributes(self, attributes)
+
+ def set_attribute(self, attribute, value):
+ """
+ Set a new value for an attribute of the Queue.
+
+ :type attribute: String
+ :param attribute: The name of the attribute you want to set. The
+ only valid value at this time is: VisibilityTimeout
+ :type value: int
+ :param value: The new value for the attribute.
+ For VisibilityTimeout the value must be an
+ integer number of seconds from 0 to 86400.
+
+ :rtype: bool
+ :return: True if successful, otherwise False.
+ """
+ return self.connection.set_queue_attribute(self, attribute, value)
+
+ def get_timeout(self):
+ """
+ Get the visibility timeout for the queue.
+
+ :rtype: int
+ :return: The number of seconds as an integer.
+ """
+ a = self.get_attributes('VisibilityTimeout')
+ return int(a['VisibilityTimeout'])
+
+ def set_timeout(self, visibility_timeout):
+ """
+ Set the visibility timeout for the queue.
+
+ :type visibility_timeout: int
+ :param visibility_timeout: The desired timeout in seconds
+ """
+ retval = self.set_attribute('VisibilityTimeout', visibility_timeout)
+ if retval:
+ self.visibility_timeout = visibility_timeout
+ return retval
+
+ def add_permission(self, label, aws_account_id, action_name):
+ """
+ Add a permission to a queue.
+
+ :type label: str or unicode
+ :param label: A unique identification of the permission you are setting.
+ Maximum of 80 characters ``[0-9a-zA-Z_-]``
+ Example, AliceSendMessage
+
+ :type aws_account_id: str or unicode
+ :param principal_id: The AWS account number of the principal who will be given
+ permission. The principal must have an AWS account, but
+ does not need to be signed up for Amazon SQS. For information
+ about locating the AWS account identification.
+
+ :type action_name: str or unicode
+ :param action_name: The action. Valid choices are:
+ \*|SendMessage|ReceiveMessage|DeleteMessage|
+ ChangeMessageVisibility|GetQueueAttributes
+
+ :rtype: bool
+ :return: True if successful, False otherwise.
+
+ """
+ return self.connection.add_permission(self, label, aws_account_id, action_name)
+
+ def remove_permission(self, label):
+ """
+ Remove a permission from a queue.
+
+ :type label: str or unicode
+ :param label: The unique label associated with the permission being removed.
+
+ :rtype: bool
+ :return: True if successful, False otherwise.
+ """
+ return self.connection.remove_permission(self, label)
+
+ def read(self, visibility_timeout=None):
+ """
+ Read a single message from the queue.
+
+ :type visibility_timeout: int
+ :param visibility_timeout: The timeout for this message in seconds
+
+ :rtype: :class:`boto.sqs.message.Message`
+ :return: A single message or None if queue is empty
+ """
+ rs = self.get_messages(1, visibility_timeout)
+ if len(rs) == 1:
+ return rs[0]
+ else:
+ return None
+
+ def write(self, message):
+ """
+ Add a single message to the queue.
+
+ :type message: Message
+ :param message: The message to be written to the queue
+
+ :rtype: :class:`boto.sqs.message.Message`
+ :return: The :class:`boto.sqs.message.Message` object that was written.
+ """
+ new_msg = self.connection.send_message(self, message.get_body_encoded())
+ message.id = new_msg.id
+ message.md5 = new_msg.md5
+ return message
+
+ def new_message(self, body=''):
+ """
+ Create new message of appropriate class.
+
+ :type body: message body
+ :param body: The body of the newly created message (optional).
+
+ :rtype: :class:`boto.sqs.message.Message`
+ :return: A new Message object
+ """
+ m = self.message_class(self, body)
+ m.queue = self
+ return m
+
+ # get a variable number of messages, returns a list of messages
+ def get_messages(self, num_messages=1, visibility_timeout=None,
+ attributes=None):
+ """
+ Get a variable number of messages.
+
+ :type num_messages: int
+ :param num_messages: The maximum number of messages to read from the queue.
+
+ :type visibility_timeout: int
+ :param visibility_timeout: The VisibilityTimeout for the messages read.
+
+ :type attributes: str
+ :param attributes: The name of additional attribute to return with response
+ or All if you want all attributes. The default is to
+ return no additional attributes. Valid values:
+ All
+ SenderId
+ SentTimestamp
+ ApproximateReceiveCount
+ ApproximateFirstReceiveTimestamp
+
+ :rtype: list
+ :return: A list of :class:`boto.sqs.message.Message` objects.
+ """
+ return self.connection.receive_message(self, number_messages=num_messages,
+ visibility_timeout=visibility_timeout,
+ attributes=attributes)
+
+ def delete_message(self, message):
+ """
+ Delete a message from the queue.
+
+ :type message: :class:`boto.sqs.message.Message`
+ :param message: The :class:`boto.sqs.message.Message` object to delete.
+
+ :rtype: bool
+ :return: True if successful, False otherwise
+ """
+ return self.connection.delete_message(self, message)
+
+ def delete(self):
+ """
+ Delete the queue.
+ """
+ return self.connection.delete_queue(self)
+
+ def clear(self, page_size=10, vtimeout=10):
+ """Utility function to remove all messages from a queue"""
+ n = 0
+ l = self.get_messages(page_size, vtimeout)
+ while l:
+ for m in l:
+ self.delete_message(m)
+ n += 1
+ l = self.get_messages(page_size, vtimeout)
+ return n
+
+ def count(self, page_size=10, vtimeout=10):
+ """
+ Utility function to count the number of messages in a queue.
+ Note: This function now calls GetQueueAttributes to obtain
+ an 'approximate' count of the number of messages in a queue.
+ """
+ a = self.get_attributes('ApproximateNumberOfMessages')
+ return int(a['ApproximateNumberOfMessages'])
+
+ def count_slow(self, page_size=10, vtimeout=10):
+ """
+ Deprecated. This is the old 'count' method that actually counts
+ the messages by reading them all. This gives an accurate count but
+ is very slow for queues with non-trivial number of messasges.
+ Instead, use get_attribute('ApproximateNumberOfMessages') to take
+ advantage of the new SQS capability. This is retained only for
+ the unit tests.
+ """
+ n = 0
+ l = self.get_messages(page_size, vtimeout)
+ while l:
+ for m in l:
+ n += 1
+ l = self.get_messages(page_size, vtimeout)
+ return n
+
+ def dump(self, file_name, page_size=10, vtimeout=10, sep='\n'):
+ """Utility function to dump the messages in a queue to a file
+ NOTE: Page size must be < 10 else SQS errors"""
+ fp = open(file_name, 'wb')
+ n = 0
+ l = self.get_messages(page_size, vtimeout)
+ while l:
+ for m in l:
+ fp.write(m.get_body())
+ if sep:
+ fp.write(sep)
+ n += 1
+ l = self.get_messages(page_size, vtimeout)
+ fp.close()
+ return n
+
+ def save_to_file(self, fp, sep='\n'):
+ """
+ Read all messages from the queue and persist them to file-like object.
+ Messages are written to the file and the 'sep' string is written
+ in between messages. Messages are deleted from the queue after
+ being written to the file.
+ Returns the number of messages saved.
+ """
+ n = 0
+ m = self.read()
+ while m:
+ n += 1
+ fp.write(m.get_body())
+ if sep:
+ fp.write(sep)
+ self.delete_message(m)
+ m = self.read()
+ return n
+
+ def save_to_filename(self, file_name, sep='\n'):
+ """
+ Read all messages from the queue and persist them to local file.
+ Messages are written to the file and the 'sep' string is written
+ in between messages. Messages are deleted from the queue after
+ being written to the file.
+ Returns the number of messages saved.
+ """
+ fp = open(file_name, 'wb')
+ n = self.save_to_file(fp, sep)
+ fp.close()
+ return n
+
+ # for backwards compatibility
+ save = save_to_filename
+
+ def save_to_s3(self, bucket):
+ """
+ Read all messages from the queue and persist them to S3.
+ Messages are stored in the S3 bucket using a naming scheme of::
+
+ <queue_id>/<message_id>
+
+ Messages are deleted from the queue after being saved to S3.
+ Returns the number of messages saved.
+ """
+ n = 0
+ m = self.read()
+ while m:
+ n += 1
+ key = bucket.new_key('%s/%s' % (self.id, m.id))
+ key.set_contents_from_string(m.get_body())
+ self.delete_message(m)
+ m = self.read()
+ return n
+
+ def load_from_s3(self, bucket, prefix=None):
+ """
+ Load messages previously saved to S3.
+ """
+ n = 0
+ if prefix:
+ prefix = '%s/' % prefix
+ else:
+ prefix = '%s/' % self.id[1:]
+ rs = bucket.list(prefix=prefix)
+ for key in rs:
+ n += 1
+ m = self.new_message(key.get_contents_as_string())
+ self.write(m)
+ return n
+
+ def load_from_file(self, fp, sep='\n'):
+ """Utility function to load messages from a file-like object to a queue"""
+ n = 0
+ body = ''
+ l = fp.readline()
+ while l:
+ if l == sep:
+ m = Message(self, body)
+ self.write(m)
+ n += 1
+ print 'writing message %d' % n
+ body = ''
+ else:
+ body = body + l
+ l = fp.readline()
+ return n
+
+ def load_from_filename(self, file_name, sep='\n'):
+ """Utility function to load messages from a local filename to a queue"""
+ fp = open(file_name, 'rb')
+ n = self.load_file_file(fp, sep)
+ fp.close()
+ return n
+
+ # for backward compatibility
+ load = load_from_filename
+
diff --git a/boto/sqs/regioninfo.py b/boto/sqs/regioninfo.py
new file mode 100644
index 0000000..66d6733
--- /dev/null
+++ b/boto/sqs/regioninfo.py
@@ -0,0 +1,32 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+from boto.regioninfo import RegionInfo
+
+class SQSRegionInfo(RegionInfo):
+
+ def __init__(self, connection=None, name=None, endpoint=None):
+ from boto.sqs.connection import SQSConnection
+ RegionInfo.__init__(self, connection, name, endpoint,
+ SQSConnection)
diff --git a/boto/storage_uri.py b/boto/storage_uri.py
new file mode 100755
index 0000000..9c051a4
--- /dev/null
+++ b/boto/storage_uri.py
@@ -0,0 +1,380 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+import os
+from boto.exception import BotoClientError
+from boto.exception import InvalidUriError
+
+
+class StorageUri(object):
+ """
+ Base class for representing storage provider-independent bucket and
+ object name with a shorthand URI-like syntax.
+
+ This is an abstract class: the constructor cannot be called (throws an
+ exception if you try).
+ """
+
+ connection = None
+
+ def __init__(self):
+ """Uncallable constructor on abstract base StorageUri class.
+ """
+ raise BotoClientError('Attempt to instantiate abstract StorageUri '
+ 'class')
+
+ def __repr__(self):
+ """Returns string representation of URI."""
+ return self.uri
+
+ def equals(self, uri):
+ """Returns true if two URIs are equal."""
+ return self.uri == uri.uri
+
+ def check_response(self, resp, level, uri):
+ if resp is None:
+ raise InvalidUriError('Attempt to get %s for "%s" failed. This '
+ 'probably indicates the URI is invalid' %
+ (level, uri))
+
+ def connect(self, access_key_id=None, secret_access_key=None, **kwargs):
+ """
+ Opens a connection to appropriate provider, depending on provider
+ portion of URI. Requires Credentials defined in boto config file (see
+ boto/pyami/config.py).
+ @type storage_uri: StorageUri
+ @param storage_uri: StorageUri specifying a bucket or a bucket+object
+ @rtype: L{AWSAuthConnection<boto.gs.connection.AWSAuthConnection>}
+ @return: A connection to storage service provider of the given URI.
+ """
+
+ if not self.connection:
+ if self.scheme == 's3':
+ from boto.s3.connection import S3Connection
+ self.connection = S3Connection(access_key_id,
+ secret_access_key, **kwargs)
+ elif self.scheme == 'gs':
+ from boto.gs.connection import GSConnection
+ self.connection = GSConnection(access_key_id,
+ secret_access_key, **kwargs)
+ elif self.scheme == 'file':
+ from boto.file.connection import FileConnection
+ self.connection = FileConnection(self)
+ else:
+ raise InvalidUriError('Unrecognized scheme "%s"' %
+ self.scheme)
+ self.connection.debug = self.debug
+ return self.connection
+
+ def delete_key(self, validate=True, headers=None, version_id=None,
+ mfa_token=None):
+ if not self.object_name:
+ raise InvalidUriError('delete_key on object-less URI (%s)' %
+ self.uri)
+ bucket = self.get_bucket(validate, headers)
+ return bucket.delete_key(self.object_name, headers, version_id,
+ mfa_token)
+
+ def get_all_keys(self, validate=True, headers=None):
+ bucket = self.get_bucket(validate, headers)
+ return bucket.get_all_keys(headers)
+
+ def get_bucket(self, validate=True, headers=None):
+ if self.bucket_name is None:
+ raise InvalidUriError('get_bucket on bucket-less URI (%s)' %
+ self.uri)
+ conn = self.connect()
+ bucket = conn.get_bucket(self.bucket_name, validate, headers)
+ self.check_response(bucket, 'bucket', self.uri)
+ return bucket
+
+ def get_key(self, validate=True, headers=None, version_id=None):
+ if not self.object_name:
+ raise InvalidUriError('get_key on object-less URI (%s)' % self.uri)
+ bucket = self.get_bucket(validate, headers)
+ key = bucket.get_key(self.object_name, headers, version_id)
+ self.check_response(key, 'key', self.uri)
+ return key
+
+ def new_key(self, validate=True, headers=None):
+ if not self.object_name:
+ raise InvalidUriError('new_key on object-less URI (%s)' % self.uri)
+ bucket = self.get_bucket(validate, headers)
+ return bucket.new_key(self.object_name)
+
+ def get_contents_as_string(self, validate=True, headers=None, cb=None,
+ num_cb=10, torrent=False, version_id=None):
+ if not self.object_name:
+ raise InvalidUriError('get_contents_as_string on object-less URI '
+ '(%s)' % self.uri)
+ key = self.get_key(validate, headers)
+ self.check_response(key, 'key', self.uri)
+ return key.get_contents_as_string(headers, cb, num_cb, torrent,
+ version_id)
+
+ def acl_class(self):
+ if self.bucket_name is None:
+ raise InvalidUriError('acl_class on bucket-less URI (%s)' %
+ self.uri)
+ conn = self.connect()
+ acl_class = conn.provider.acl_class
+ self.check_response(acl_class, 'acl_class', self.uri)
+ return acl_class
+
+ def canned_acls(self):
+ if self.bucket_name is None:
+ raise InvalidUriError('canned_acls on bucket-less URI (%s)' %
+ self.uri)
+ conn = self.connect()
+ canned_acls = conn.provider.canned_acls
+ self.check_response(canned_acls, 'canned_acls', self.uri)
+ return canned_acls
+
+
+class BucketStorageUri(StorageUri):
+ """
+ StorageUri subclass that handles bucket storage providers.
+ Callers should instantiate this class by calling boto.storage_uri().
+ """
+
+ def __init__(self, scheme, bucket_name=None, object_name=None,
+ debug=0):
+ """Instantiate a BucketStorageUri from scheme,bucket,object tuple.
+
+ @type scheme: string
+ @param scheme: URI scheme naming the storage provider (gs, s3, etc.)
+ @type bucket_name: string
+ @param bucket_name: bucket name
+ @type object_name: string
+ @param object_name: object name
+ @type debug: int
+ @param debug: debug level to pass in to connection (range 0..2)
+
+ After instantiation the components are available in the following
+ fields: uri, scheme, bucket_name, object_name.
+ """
+
+ self.scheme = scheme
+ self.bucket_name = bucket_name
+ self.object_name = object_name
+ if self.bucket_name and self.object_name:
+ self.uri = ('%s://%s/%s' % (self.scheme, self.bucket_name,
+ self.object_name))
+ elif self.bucket_name:
+ self.uri = ('%s://%s/' % (self.scheme, self.bucket_name))
+ else:
+ self.uri = ('%s://' % self.scheme)
+ self.debug = debug
+
+ def clone_replace_name(self, new_name):
+ """Instantiate a BucketStorageUri from the current BucketStorageUri,
+ but replacing the object_name.
+
+ @type new_name: string
+ @param new_name: new object name
+ """
+ if not self.bucket_name:
+ raise InvalidUriError('clone_replace_name() on bucket-less URI %s' %
+ self.uri)
+ return BucketStorageUri(self.scheme, self.bucket_name, new_name,
+ self.debug)
+
+ def get_acl(self, validate=True, headers=None, version_id=None):
+ if not self.bucket_name:
+ raise InvalidUriError('get_acl on bucket-less URI (%s)' % self.uri)
+ bucket = self.get_bucket(validate, headers)
+ # This works for both bucket- and object- level ACLs (former passes
+ # key_name=None):
+ acl = bucket.get_acl(self.object_name, headers, version_id)
+ self.check_response(acl, 'acl', self.uri)
+ return acl
+
+ def add_group_email_grant(self, permission, email_address, recursive=False,
+ validate=True, headers=None):
+ if self.scheme != 'gs':
+ raise ValueError('add_group_email_grant() not supported for %s '
+ 'URIs.' % self.scheme)
+ if self.object_name:
+ if recursive:
+ raise ValueError('add_group_email_grant() on key-ful URI cannot '
+ 'specify recursive=True')
+ key = self.get_key(validate, headers)
+ self.check_response(key, 'key', self.uri)
+ key.add_group_email_grant(permission, email_address, headers)
+ elif self.bucket_name:
+ bucket = self.get_bucket(validate, headers)
+ bucket.add_group_email_grant(permission, email_address, recursive,
+ headers)
+ else:
+ raise InvalidUriError('add_group_email_grant() on bucket-less URI %s' %
+ self.uri)
+
+ def add_email_grant(self, permission, email_address, recursive=False,
+ validate=True, headers=None):
+ if not self.bucket_name:
+ raise InvalidUriError('add_email_grant on bucket-less URI (%s)' %
+ self.uri)
+ if not self.object_name:
+ bucket = self.get_bucket(validate, headers)
+ bucket.add_email_grant(permission, email_address, recursive,
+ headers)
+ else:
+ key = self.get_key(validate, headers)
+ self.check_response(key, 'key', self.uri)
+ key.add_email_grant(permission, email_address)
+
+ def add_user_grant(self, permission, user_id, recursive=False,
+ validate=True, headers=None):
+ if not self.bucket_name:
+ raise InvalidUriError('add_user_grant on bucket-less URI (%s)' %
+ self.uri)
+ if not self.object_name:
+ bucket = self.get_bucket(validate, headers)
+ bucket.add_user_grant(permission, user_id, recursive, headers)
+ else:
+ key = self.get_key(validate, headers)
+ self.check_response(key, 'key', self.uri)
+ key.add_user_grant(permission, user_id)
+
+ def list_grants(self, headers=None):
+ if not self.bucket_name:
+ raise InvalidUriError('list_grants on bucket-less URI (%s)' %
+ self.uri)
+ bucket = self.get_bucket(headers)
+ return bucket.list_grants(headers)
+
+ def names_container(self):
+ """Returns True if this URI names a bucket (vs. an object).
+ """
+ return not self.object_name
+
+ def names_singleton(self):
+ """Returns True if this URI names an object (vs. a bucket).
+ """
+ return self.object_name
+
+ def is_file_uri(self):
+ return False
+
+ def is_cloud_uri(self):
+ return True
+
+ def create_bucket(self, headers=None, location='', policy=None):
+ if self.bucket_name is None:
+ raise InvalidUriError('create_bucket on bucket-less URI (%s)' %
+ self.uri)
+ conn = self.connect()
+ return conn.create_bucket(self.bucket_name, headers, location, policy)
+
+ def delete_bucket(self, headers=None):
+ if self.bucket_name is None:
+ raise InvalidUriError('delete_bucket on bucket-less URI (%s)' %
+ self.uri)
+ conn = self.connect()
+ return conn.delete_bucket(self.bucket_name, headers)
+
+ def get_all_buckets(self, headers=None):
+ conn = self.connect()
+ return conn.get_all_buckets(headers)
+
+ def get_provider(self):
+ conn = self.connect()
+ provider = conn.provider
+ self.check_response(provider, 'provider', self.uri)
+ return provider
+
+ def set_acl(self, acl_or_str, key_name='', validate=True, headers=None,
+ version_id=None):
+ if not self.bucket_name:
+ raise InvalidUriError('set_acl on bucket-less URI (%s)' %
+ self.uri)
+ self.get_bucket(validate, headers).set_acl(acl_or_str, key_name,
+ headers, version_id)
+
+ def set_canned_acl(self, acl_str, validate=True, headers=None,
+ version_id=None):
+ if not self.object_name:
+ raise InvalidUriError('set_canned_acl on object-less URI (%s)' %
+ self.uri)
+ key = self.get_key(validate, headers)
+ self.check_response(key, 'key', self.uri)
+ key.set_canned_acl(acl_str, headers, version_id)
+
+ def set_contents_from_string(self, s, headers=None, replace=True,
+ cb=None, num_cb=10, policy=None, md5=None,
+ reduced_redundancy=False):
+ key = self.new_key(headers=headers)
+ key.set_contents_from_string(s, headers, replace, cb, num_cb, policy,
+ md5, reduced_redundancy)
+
+
+
+class FileStorageUri(StorageUri):
+ """
+ StorageUri subclass that handles files in the local file system.
+ Callers should instantiate this class by calling boto.storage_uri().
+
+ See file/README about how we map StorageUri operations onto a file system.
+ """
+
+ def __init__(self, object_name, debug):
+ """Instantiate a FileStorageUri from a path name.
+
+ @type object_name: string
+ @param object_name: object name
+ @type debug: boolean
+ @param debug: whether to enable debugging on this StorageUri
+
+ After instantiation the components are available in the following
+ fields: uri, scheme, bucket_name (always blank for this "anonymous"
+ bucket), object_name.
+ """
+
+ self.scheme = 'file'
+ self.bucket_name = ''
+ self.object_name = object_name
+ self.uri = 'file://' + object_name
+ self.debug = debug
+
+ def clone_replace_name(self, new_name):
+ """Instantiate a FileStorageUri from the current FileStorageUri,
+ but replacing the object_name.
+
+ @type new_name: string
+ @param new_name: new object name
+ """
+ return FileStorageUri(new_name, self.debug)
+
+ def names_container(self):
+ """Returns True if this URI names a directory.
+ """
+ return os.path.isdir(self.object_name)
+
+ def names_singleton(self):
+ """Returns True if this URI names a file.
+ """
+ return os.path.isfile(self.object_name)
+
+ def is_file_uri(self):
+ return True
+
+ def is_cloud_uri(self):
+ return False
diff --git a/boto/tests/__init__.py b/boto/tests/__init__.py
new file mode 100644
index 0000000..449bd16
--- /dev/null
+++ b/boto/tests/__init__.py
@@ -0,0 +1,23 @@
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+
+
diff --git a/boto/tests/cb_test_harnass.py b/boto/tests/cb_test_harnass.py
new file mode 100644
index 0000000..9f4f1c5
--- /dev/null
+++ b/boto/tests/cb_test_harnass.py
@@ -0,0 +1,71 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Test harnass that allows us to raise exceptions, change file content,
+and record the byte transfer callback sequence, to test various resumable
+upload and download cases. The 'call' method of this harnass can be passed
+as the 'cb' parameter to boto.s3.Key.send_file() and boto.s3.Key.get_file(),
+allowing testing of various file upload/download conditions.
+"""
+
+import socket
+
+
+class CallbackTestHarnass(object):
+
+ def __init__(self, fail_after_n_bytes=0, num_times_to_fail=1,
+ exception=socket.error('mock socket error', 0),
+ fp_to_change=None, fp_change_pos=None):
+ self.fail_after_n_bytes = fail_after_n_bytes
+ self.num_times_to_fail = num_times_to_fail
+ self.exception = exception
+ # If fp_to_change and fp_change_pos are specified, 3 bytes will be
+ # written at that position just before the first exception is thrown.
+ self.fp_to_change = fp_to_change
+ self.fp_change_pos = fp_change_pos
+ self.num_failures = 0
+ self.transferred_seq_before_first_failure = []
+ self.transferred_seq_after_first_failure = []
+
+ def call(self, total_bytes_transferred, unused_total_size):
+ """
+ To use this test harnass, pass the 'call' method of the instantiated
+ object as the cb param to the set_contents_from_file() or
+ get_contents_to_file() call.
+ """
+ # Record transfer sequence to allow verification.
+ if self.num_failures:
+ self.transferred_seq_after_first_failure.append(
+ total_bytes_transferred)
+ else:
+ self.transferred_seq_before_first_failure.append(
+ total_bytes_transferred)
+ if (total_bytes_transferred >= self.fail_after_n_bytes and
+ self.num_failures < self.num_times_to_fail):
+ self.num_failures += 1
+ if self.fp_to_change and self.fp_change_pos is not None:
+ cur_pos = self.fp_to_change.tell()
+ self.fp_to_change.seek(self.fp_change_pos)
+ self.fp_to_change.write('abc')
+ self.fp_to_change.seek(cur_pos)
+ self.called = True
+ raise self.exception
diff --git a/boto/tests/devpay_s3.py b/boto/tests/devpay_s3.py
new file mode 100644
index 0000000..bb91125
--- /dev/null
+++ b/boto/tests/devpay_s3.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Some unit tests for the S3Connection
+"""
+
+import time
+import os
+import urllib
+
+from boto.s3.connection import S3Connection
+from boto.exception import S3PermissionsError
+
+# this test requires a devpay product and user token to run:
+
+AMAZON_USER_TOKEN = '{UserToken}...your token here...'
+DEVPAY_HEADERS = { 'x-amz-security-token': AMAZON_USER_TOKEN }
+
+print '--- running S3Connection tests (DevPay) ---'
+c = S3Connection()
+# create a new, empty bucket
+bucket_name = 'test-%d' % int(time.time())
+bucket = c.create_bucket(bucket_name, headers=DEVPAY_HEADERS)
+# now try a get_bucket call and see if it's really there
+bucket = c.get_bucket(bucket_name, headers=DEVPAY_HEADERS)
+# test logging
+logging_bucket = c.create_bucket(bucket_name + '-log', headers=DEVPAY_HEADERS)
+logging_bucket.set_as_logging_target(headers=DEVPAY_HEADERS)
+bucket.enable_logging(target_bucket=logging_bucket, target_prefix=bucket.name, headers=DEVPAY_HEADERS)
+bucket.disable_logging(headers=DEVPAY_HEADERS)
+c.delete_bucket(logging_bucket, headers=DEVPAY_HEADERS)
+# create a new key and store it's content from a string
+k = bucket.new_key()
+k.name = 'foobar'
+s1 = 'This is a test of file upload and download'
+s2 = 'This is a second string to test file upload and download'
+k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
+fp = open('foobar', 'wb')
+# now get the contents from s3 to a local file
+k.get_contents_to_file(fp, headers=DEVPAY_HEADERS)
+fp.close()
+fp = open('foobar')
+# check to make sure content read from s3 is identical to original
+assert s1 == fp.read(), 'corrupted file'
+fp.close()
+# test generated URLs
+url = k.generate_url(3600, headers=DEVPAY_HEADERS)
+file = urllib.urlopen(url)
+assert s1 == file.read(), 'invalid URL %s' % url
+url = k.generate_url(3600, force_http=True, headers=DEVPAY_HEADERS)
+file = urllib.urlopen(url)
+assert s1 == file.read(), 'invalid URL %s' % url
+bucket.delete_key(k, headers=DEVPAY_HEADERS)
+# test a few variations on get_all_keys - first load some data
+# for the first one, let's override the content type
+phony_mimetype = 'application/x-boto-test'
+headers = {'Content-Type': phony_mimetype}
+headers.update(DEVPAY_HEADERS)
+k.name = 'foo/bar'
+k.set_contents_from_string(s1, headers)
+k.name = 'foo/bas'
+k.set_contents_from_filename('foobar', headers=DEVPAY_HEADERS)
+k.name = 'foo/bat'
+k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
+k.name = 'fie/bar'
+k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
+k.name = 'fie/bas'
+k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
+k.name = 'fie/bat'
+k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
+# try resetting the contents to another value
+md5 = k.md5
+k.set_contents_from_string(s2, headers=DEVPAY_HEADERS)
+assert k.md5 != md5
+os.unlink('foobar')
+all = bucket.get_all_keys(headers=DEVPAY_HEADERS)
+assert len(all) == 6
+rs = bucket.get_all_keys(prefix='foo', headers=DEVPAY_HEADERS)
+assert len(rs) == 3
+rs = bucket.get_all_keys(prefix='', delimiter='/', headers=DEVPAY_HEADERS)
+assert len(rs) == 2
+rs = bucket.get_all_keys(maxkeys=5, headers=DEVPAY_HEADERS)
+assert len(rs) == 5
+# test the lookup method
+k = bucket.lookup('foo/bar', headers=DEVPAY_HEADERS)
+assert isinstance(k, bucket.key_class)
+assert k.content_type == phony_mimetype
+k = bucket.lookup('notthere', headers=DEVPAY_HEADERS)
+assert k == None
+# try some metadata stuff
+k = bucket.new_key()
+k.name = 'has_metadata'
+mdkey1 = 'meta1'
+mdval1 = 'This is the first metadata value'
+k.set_metadata(mdkey1, mdval1)
+mdkey2 = 'meta2'
+mdval2 = 'This is the second metadata value'
+k.set_metadata(mdkey2, mdval2)
+k.set_contents_from_string(s1, headers=DEVPAY_HEADERS)
+k = bucket.lookup('has_metadata', headers=DEVPAY_HEADERS)
+assert k.get_metadata(mdkey1) == mdval1
+assert k.get_metadata(mdkey2) == mdval2
+k = bucket.new_key()
+k.name = 'has_metadata'
+k.get_contents_as_string(headers=DEVPAY_HEADERS)
+assert k.get_metadata(mdkey1) == mdval1
+assert k.get_metadata(mdkey2) == mdval2
+bucket.delete_key(k, headers=DEVPAY_HEADERS)
+# test list and iterator
+rs1 = bucket.list(headers=DEVPAY_HEADERS)
+num_iter = 0
+for r in rs1:
+ num_iter = num_iter + 1
+rs = bucket.get_all_keys(headers=DEVPAY_HEADERS)
+num_keys = len(rs)
+assert num_iter == num_keys
+# try a key with a funny character
+k = bucket.new_key()
+k.name = 'testnewline\n'
+k.set_contents_from_string('This is a test', headers=DEVPAY_HEADERS)
+rs = bucket.get_all_keys(headers=DEVPAY_HEADERS)
+assert len(rs) == num_keys + 1
+bucket.delete_key(k, headers=DEVPAY_HEADERS)
+rs = bucket.get_all_keys(headers=DEVPAY_HEADERS)
+assert len(rs) == num_keys
+# try some acl stuff
+bucket.set_acl('public-read', headers=DEVPAY_HEADERS)
+policy = bucket.get_acl(headers=DEVPAY_HEADERS)
+assert len(policy.acl.grants) == 2
+bucket.set_acl('private', headers=DEVPAY_HEADERS)
+policy = bucket.get_acl(headers=DEVPAY_HEADERS)
+assert len(policy.acl.grants) == 1
+k = bucket.lookup('foo/bar', headers=DEVPAY_HEADERS)
+k.set_acl('public-read', headers=DEVPAY_HEADERS)
+policy = k.get_acl(headers=DEVPAY_HEADERS)
+assert len(policy.acl.grants) == 2
+k.set_acl('private', headers=DEVPAY_HEADERS)
+policy = k.get_acl(headers=DEVPAY_HEADERS)
+assert len(policy.acl.grants) == 1
+# try the convenience methods for grants
+# this doesn't work with devpay
+#bucket.add_user_grant('FULL_CONTROL',
+# 'c1e724fbfa0979a4448393c59a8c055011f739b6d102fb37a65f26414653cd67',
+# headers=DEVPAY_HEADERS)
+try:
+ bucket.add_email_grant('foobar', 'foo@bar.com', headers=DEVPAY_HEADERS)
+except S3PermissionsError:
+ pass
+# now delete all keys in bucket
+for k in all:
+ bucket.delete_key(k, headers=DEVPAY_HEADERS)
+# now delete bucket
+
+c.delete_bucket(bucket, headers=DEVPAY_HEADERS)
+
+print '--- tests completed ---'
diff --git a/boto/tests/mock_storage_service.py b/boto/tests/mock_storage_service.py
new file mode 100644
index 0000000..10b5253
--- /dev/null
+++ b/boto/tests/mock_storage_service.py
@@ -0,0 +1,298 @@
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Provides basic mocks of core storage service classes, for unit testing:
+ACL, Key, Bucket, Connection, and StorageUri. We implement a subset of
+the interfaces defined in the real boto classes, but don't handle most
+of the optional params (which we indicate with the constant "NOT_IMPL").
+"""
+
+import copy
+import boto
+
+NOT_IMPL = None
+
+
+class MockAcl(object):
+
+ def __init__(self, parent=NOT_IMPL):
+ pass
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ pass
+
+ def to_xml(self):
+ return '<mock_ACL_XML/>'
+
+
+class MockKey(object):
+
+ def __init__(self, bucket=None, name=None):
+ self.bucket = bucket
+ self.name = name
+ self.data = None
+ self.size = None
+ self.content_encoding = None
+ self.content_type = None
+ self.last_modified = 'Wed, 06 Oct 2010 05:11:54 GMT'
+
+ def get_contents_as_string(self, headers=NOT_IMPL,
+ cb=NOT_IMPL, num_cb=NOT_IMPL,
+ torrent=NOT_IMPL,
+ version_id=NOT_IMPL):
+ return self.data
+
+ def get_contents_to_file(self, fp, headers=NOT_IMPL,
+ cb=NOT_IMPL, num_cb=NOT_IMPL,
+ torrent=NOT_IMPL,
+ version_id=NOT_IMPL,
+ res_download_handler=NOT_IMPL):
+ fp.write(self.data)
+
+ def get_file(self, fp, headers=NOT_IMPL, cb=NOT_IMPL, num_cb=NOT_IMPL,
+ torrent=NOT_IMPL, version_id=NOT_IMPL,
+ override_num_retries=NOT_IMPL):
+ fp.write(self.data)
+
+ def _handle_headers(self, headers):
+ if not headers:
+ return
+ if 'Content-Encoding' in headers:
+ self.content_encoding = headers['Content-Encoding']
+ if 'Content-Type' in headers:
+ self.content_type = headers['Content-Type']
+
+ def open_read(self, headers=NOT_IMPL, query_args=NOT_IMPL,
+ override_num_retries=NOT_IMPL):
+ pass
+
+ def set_contents_from_file(self, fp, headers=None, replace=NOT_IMPL,
+ cb=NOT_IMPL, num_cb=NOT_IMPL,
+ policy=NOT_IMPL, md5=NOT_IMPL,
+ res_upload_handler=NOT_IMPL):
+ self.data = fp.readlines()
+ self.size = len(self.data)
+ self._handle_headers(headers)
+
+ def set_contents_from_string(self, s, headers=NOT_IMPL, replace=NOT_IMPL,
+ cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL,
+ md5=NOT_IMPL, reduced_redundancy=NOT_IMPL):
+ self.data = copy.copy(s)
+ self.size = len(s)
+ self._handle_headers(headers)
+
+
+class MockBucket(object):
+
+ def __init__(self, connection=NOT_IMPL, name=None, key_class=NOT_IMPL):
+ self.name = name
+ self.keys = {}
+ self.acls = {name: MockAcl()}
+
+ def copy_key(self, new_key_name, src_bucket_name,
+ src_key_name, metadata=NOT_IMPL, src_version_id=NOT_IMPL,
+ storage_class=NOT_IMPL, preserve_acl=NOT_IMPL):
+ new_key = self.new_key(key_name=new_key_name)
+ src_key = mock_connection.get_bucket(
+ src_bucket_name).get_key(src_key_name)
+ new_key.data = copy.copy(src_key.data)
+ new_key.size = len(new_key.data)
+
+ def get_acl(self, key_name='', headers=NOT_IMPL, version_id=NOT_IMPL):
+ if key_name:
+ # Return ACL for the key.
+ return self.acls[key_name]
+ else:
+ # Return ACL for the bucket.
+ return self.acls[self.name]
+
+ def new_key(self, key_name=None):
+ mock_key = MockKey(self, key_name)
+ self.keys[key_name] = mock_key
+ self.acls[key_name] = MockAcl()
+ return mock_key
+
+ def delete_key(self, key_name, headers=NOT_IMPL,
+ version_id=NOT_IMPL, mfa_token=NOT_IMPL):
+ if key_name not in self.keys:
+ raise boto.exception.StorageResponseError(404, 'Not Found')
+ del self.keys[key_name]
+
+ def get_all_keys(self, headers=NOT_IMPL):
+ return self.keys.itervalues()
+
+ def get_key(self, key_name, headers=NOT_IMPL, version_id=NOT_IMPL):
+ # Emulate behavior of boto when get_key called with non-existent key.
+ if key_name not in self.keys:
+ return None
+ return self.keys[key_name]
+
+ def list(self, prefix='', delimiter=NOT_IMPL, marker=NOT_IMPL,
+ headers=NOT_IMPL):
+ # Return list instead of using a generator so we don't get
+ # 'dictionary changed size during iteration' error when performing
+ # deletions while iterating (e.g., during test cleanup).
+ result = []
+ for k in self.keys.itervalues():
+ if not prefix:
+ result.append(k)
+ elif k.name.startswith(prefix):
+ result.append(k)
+ return result
+
+ def set_acl(self, acl_or_str, key_name='', headers=NOT_IMPL,
+ version_id=NOT_IMPL):
+ # We only handle setting ACL XML here; if you pass a canned ACL
+ # the get_acl call will just return that string name.
+ if key_name:
+ # Set ACL for the key.
+ self.acls[key_name] = acl_or_str
+ else:
+ # Set ACL for the bucket.
+ self.acls[self.name] = acl_or_str
+
+
+class MockConnection(object):
+
+ def __init__(self, aws_access_key_id=NOT_IMPL,
+ aws_secret_access_key=NOT_IMPL, is_secure=NOT_IMPL,
+ port=NOT_IMPL, proxy=NOT_IMPL, proxy_port=NOT_IMPL,
+ proxy_user=NOT_IMPL, proxy_pass=NOT_IMPL,
+ host=NOT_IMPL, debug=NOT_IMPL,
+ https_connection_factory=NOT_IMPL,
+ calling_format=NOT_IMPL,
+ path=NOT_IMPL, provider=NOT_IMPL,
+ bucket_class=NOT_IMPL):
+ self.buckets = {}
+
+ def create_bucket(self, bucket_name, headers=NOT_IMPL, location=NOT_IMPL,
+ policy=NOT_IMPL):
+ if bucket_name in self.buckets:
+ raise boto.exception.StorageCreateError(
+ 409, 'BucketAlreadyOwnedByYou', 'bucket already exists')
+ mock_bucket = MockBucket(name=bucket_name)
+ self.buckets[bucket_name] = mock_bucket
+ return mock_bucket
+
+ def delete_bucket(self, bucket, headers=NOT_IMPL):
+ if bucket not in self.buckets:
+ raise boto.exception.StorageResponseError(404, 'NoSuchBucket',
+ 'no such bucket')
+ del self.buckets[bucket]
+
+ def get_bucket(self, bucket_name, validate=NOT_IMPL, headers=NOT_IMPL):
+ if bucket_name not in self.buckets:
+ raise boto.exception.StorageResponseError(404, 'NoSuchBucket',
+ 'Not Found')
+ return self.buckets[bucket_name]
+
+ def get_all_buckets(self, headers=NOT_IMPL):
+ return self.buckets.itervalues()
+
+
+# We only mock a single provider/connection.
+mock_connection = MockConnection()
+
+
+class MockBucketStorageUri(object):
+
+ def __init__(self, scheme, bucket_name=None, object_name=None,
+ debug=NOT_IMPL):
+ self.scheme = scheme
+ self.bucket_name = bucket_name
+ self.object_name = object_name
+ if self.bucket_name and self.object_name:
+ self.uri = ('%s://%s/%s' % (self.scheme, self.bucket_name,
+ self.object_name))
+ elif self.bucket_name:
+ self.uri = ('%s://%s/' % (self.scheme, self.bucket_name))
+ else:
+ self.uri = ('%s://' % self.scheme)
+
+ def __repr__(self):
+ """Returns string representation of URI."""
+ return self.uri
+
+ def acl_class(self):
+ return MockAcl
+
+ def canned_acls(self):
+ return boto.provider.Provider('aws').canned_acls
+
+ def clone_replace_name(self, new_name):
+ return MockBucketStorageUri(self.scheme, self.bucket_name, new_name)
+
+ def connect(self, access_key_id=NOT_IMPL, secret_access_key=NOT_IMPL):
+ return mock_connection
+
+ def create_bucket(self, headers=NOT_IMPL, location=NOT_IMPL,
+ policy=NOT_IMPL):
+ return self.connect().create_bucket(self.bucket_name)
+
+ def delete_bucket(self, headers=NOT_IMPL):
+ return self.connect().delete_bucket(self.bucket_name)
+
+ def delete_key(self, validate=NOT_IMPL, headers=NOT_IMPL,
+ version_id=NOT_IMPL, mfa_token=NOT_IMPL):
+ self.get_bucket().delete_key(self.object_name)
+
+ def equals(self, uri):
+ return self.uri == uri.uri
+
+ def get_acl(self, validate=NOT_IMPL, headers=NOT_IMPL, version_id=NOT_IMPL):
+ return self.get_bucket().get_acl(self.object_name)
+
+ def get_all_buckets(self, headers=NOT_IMPL):
+ return self.connect().get_all_buckets()
+
+ def get_all_keys(self, validate=NOT_IMPL, headers=NOT_IMPL):
+ return self.get_bucket().get_all_keys(self)
+
+ def get_bucket(self, validate=NOT_IMPL, headers=NOT_IMPL):
+ return self.connect().get_bucket(self.bucket_name)
+
+ def get_key(self, validate=NOT_IMPL, headers=NOT_IMPL,
+ version_id=NOT_IMPL):
+ return self.get_bucket().get_key(self.object_name)
+
+ def is_file_uri(self):
+ return False
+
+ def is_cloud_uri(self):
+ return True
+
+ def names_container(self):
+ return not self.object_name
+
+ def names_singleton(self):
+ return self.object_name
+
+ def new_key(self, validate=NOT_IMPL, headers=NOT_IMPL):
+ bucket = self.get_bucket()
+ return bucket.new_key(self.object_name)
+
+ def set_acl(self, acl_or_str, key_name='', validate=NOT_IMPL,
+ headers=NOT_IMPL, version_id=NOT_IMPL):
+ self.get_bucket().set_acl(acl_or_str, key_name)
diff --git a/boto/tests/test.py b/boto/tests/test.py
new file mode 100755
index 0000000..8648e70
--- /dev/null
+++ b/boto/tests/test.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+do the unit tests!
+"""
+
+import sys
+import unittest
+import getopt
+
+from boto.tests.test_sqsconnection import SQSConnectionTest
+from boto.tests.test_s3connection import S3ConnectionTest
+from boto.tests.test_s3versioning import S3VersionTest
+from boto.tests.test_gsconnection import GSConnectionTest
+from boto.tests.test_ec2connection import EC2ConnectionTest
+from boto.tests.test_sdbconnection import SDBConnectionTest
+
+def usage():
+ print 'test.py [-t testsuite] [-v verbosity]'
+ print ' -t run specific testsuite (s3|s3ver|s3nover|gs|sqs|ec2|sdb|all)'
+ print ' -v verbosity (0|1|2)'
+
+def main():
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], 'ht:v:',
+ ['help', 'testsuite', 'verbosity'])
+ except:
+ usage()
+ sys.exit(2)
+ testsuite = 'all'
+ verbosity = 1
+ for o, a in opts:
+ if o in ('-h', '--help'):
+ usage()
+ sys.exit()
+ if o in ('-t', '--testsuite'):
+ testsuite = a
+ if o in ('-v', '--verbosity'):
+ verbosity = int(a)
+ if len(args) != 0:
+ usage()
+ sys.exit()
+ suite = unittest.TestSuite()
+ if testsuite == 'all':
+ suite.addTest(unittest.makeSuite(SQSConnectionTest))
+ suite.addTest(unittest.makeSuite(S3ConnectionTest))
+ suite.addTest(unittest.makeSuite(EC2ConnectionTest))
+ suite.addTest(unittest.makeSuite(SDBConnectionTest))
+ elif testsuite == 's3':
+ suite.addTest(unittest.makeSuite(S3ConnectionTest))
+ suite.addTest(unittest.makeSuite(S3VersionTest))
+ elif testsuite == 's3ver':
+ suite.addTest(unittest.makeSuite(S3VersionTest))
+ elif testsuite == 's3nover':
+ suite.addTest(unittest.makeSuite(S3ConnectionTest))
+ elif testsuite == 'gs':
+ suite.addTest(unittest.makeSuite(GSConnectionTest))
+ elif testsuite == 'sqs':
+ suite.addTest(unittest.makeSuite(SQSConnectionTest))
+ elif testsuite == 'ec2':
+ suite.addTest(unittest.makeSuite(EC2ConnectionTest))
+ elif testsuite == 'sdb':
+ suite.addTest(unittest.makeSuite(SDBConnectionTest))
+ else:
+ usage()
+ sys.exit()
+ unittest.TextTestRunner(verbosity=verbosity).run(suite)
+
+if __name__ == "__main__":
+ main()
diff --git a/boto/tests/test_ec2connection.py b/boto/tests/test_ec2connection.py
new file mode 100644
index 0000000..046ff92
--- /dev/null
+++ b/boto/tests/test_ec2connection.py
@@ -0,0 +1,171 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2009, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Some unit tests for the EC2Connection
+"""
+
+import unittest
+import time
+from boto.ec2.connection import EC2Connection
+import telnetlib
+import socket
+
+class EC2ConnectionTest (unittest.TestCase):
+
+ def test_1_basic(self):
+ # this is my user_id, if you want to run these tests you should
+ # replace this with yours or they won't work
+ user_id = '963068290131'
+ print '--- running EC2Connection tests ---'
+ c = EC2Connection()
+ # get list of private AMI's
+ rs = c.get_all_images(owners=[user_id])
+ assert len(rs) > 0
+ # now pick the first one
+ image = rs[0]
+ # temporarily make this image runnable by everyone
+ status = image.set_launch_permissions(group_names=['all'])
+ assert status
+ d = image.get_launch_permissions()
+ assert d.has_key('groups')
+ assert len(d['groups']) > 0
+ # now remove that permission
+ status = image.remove_launch_permissions(group_names=['all'])
+ assert status
+ d = image.get_launch_permissions()
+ assert not d.has_key('groups')
+
+ # create 2 new security groups
+ group1_name = 'test-%d' % int(time.time())
+ group_desc = 'This is a security group created during unit testing'
+ group1 = c.create_security_group(group1_name, group_desc)
+ time.sleep(2)
+ group2_name = 'test-%d' % int(time.time())
+ group_desc = 'This is a security group created during unit testing'
+ group2 = c.create_security_group(group2_name, group_desc)
+ # now get a listing of all security groups and look for our new one
+ rs = c.get_all_security_groups()
+ found = False
+ for g in rs:
+ if g.name == group1_name:
+ found = True
+ assert found
+ # now pass arg to filter results to only our new group
+ rs = c.get_all_security_groups([group1_name])
+ assert len(rs) == 1
+ # try some group to group authorizations/revocations
+ # first try the old style
+ status = c.authorize_security_group(group1.name, group2.name, group2.owner_id)
+ assert status
+ status = c.revoke_security_group(group1.name, group2.name, group2.owner_id)
+ assert status
+ # now try specifying a specific port
+ status = c.authorize_security_group(group1.name, group2.name, group2.owner_id,
+ 'tcp', 22, 22)
+ assert status
+ status = c.revoke_security_group(group1.name, group2.name, group2.owner_id,
+ 'tcp', 22, 22)
+ assert status
+
+ # now delete the second security group
+ status = c.delete_security_group(group2_name)
+ # now make sure it's really gone
+ rs = c.get_all_security_groups()
+ found = False
+ for g in rs:
+ if g.name == group2_name:
+ found = True
+ assert not found
+
+ group = group1
+
+ # now try to launch apache image with our new security group
+ rs = c.get_all_images()
+ img_loc = 'ec2-public-images/fedora-core4-apache.manifest.xml'
+ for image in rs:
+ if image.location == img_loc:
+ break
+ reservation = image.run(security_groups=[group.name])
+ instance = reservation.instances[0]
+ while instance.state != 'running':
+ print '\tinstance is %s' % instance.state
+ time.sleep(30)
+ instance.update()
+ # instance in now running, try to telnet to port 80
+ t = telnetlib.Telnet()
+ try:
+ t.open(instance.dns_name, 80)
+ except socket.error:
+ pass
+ # now open up port 80 and try again, it should work
+ group.authorize('tcp', 80, 80, '0.0.0.0/0')
+ t.open(instance.dns_name, 80)
+ t.close()
+ # now revoke authorization and try again
+ group.revoke('tcp', 80, 80, '0.0.0.0/0')
+ try:
+ t.open(instance.dns_name, 80)
+ except socket.error:
+ pass
+ # now kill the instance and delete the security group
+ instance.terminate()
+ # unfortunately, I can't delete the sg within this script
+ #sg.delete()
+
+ # create a new key pair
+ key_name = 'test-%d' % int(time.time())
+ status = c.create_key_pair(key_name)
+ assert status
+ # now get a listing of all key pairs and look for our new one
+ rs = c.get_all_key_pairs()
+ found = False
+ for k in rs:
+ if k.name == key_name:
+ found = True
+ assert found
+ # now pass arg to filter results to only our new key pair
+ rs = c.get_all_key_pairs([key_name])
+ assert len(rs) == 1
+ key_pair = rs[0]
+ # now delete the key pair
+ status = c.delete_key_pair(key_name)
+ # now make sure it's really gone
+ rs = c.get_all_key_pairs()
+ found = False
+ for k in rs:
+ if k.name == key_name:
+ found = True
+ assert not found
+
+ # short test around Paid AMI capability
+ demo_paid_ami_id = 'ami-bd9d78d4'
+ demo_paid_ami_product_code = 'A79EC0DB'
+ l = c.get_all_images([demo_paid_ami_id])
+ assert len(l) == 1
+ assert len(l[0].product_codes) == 1
+ assert l[0].product_codes[0] == demo_paid_ami_product_code
+
+ print '--- tests completed ---'
diff --git a/boto/tests/test_gsconnection.py b/boto/tests/test_gsconnection.py
new file mode 100644
index 0000000..5c324fa
--- /dev/null
+++ b/boto/tests/test_gsconnection.py
@@ -0,0 +1,149 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Some unit tests for the GSConnection
+"""
+
+import unittest
+import time
+import os
+from boto.gs.connection import GSConnection
+
+class GSConnectionTest (unittest.TestCase):
+
+ def test_1_basic(self):
+ print '--- running GSConnection tests ---'
+ c = GSConnection()
+ # create a new, empty bucket
+ bucket_name = 'test-%d' % int(time.time())
+ bucket = c.create_bucket(bucket_name)
+ # now try a get_bucket call and see if it's really there
+ bucket = c.get_bucket(bucket_name)
+ k = bucket.new_key()
+ k.name = 'foobar'
+ s1 = 'This is a test of file upload and download'
+ s2 = 'This is a second string to test file upload and download'
+ k.set_contents_from_string(s1)
+ fp = open('foobar', 'wb')
+ # now get the contents from s3 to a local file
+ k.get_contents_to_file(fp)
+ fp.close()
+ fp = open('foobar')
+ # check to make sure content read from s3 is identical to original
+ assert s1 == fp.read(), 'corrupted file'
+ fp.close()
+ bucket.delete_key(k)
+ # test a few variations on get_all_keys - first load some data
+ # for the first one, let's override the content type
+ phony_mimetype = 'application/x-boto-test'
+ headers = {'Content-Type': phony_mimetype}
+ k.name = 'foo/bar'
+ k.set_contents_from_string(s1, headers)
+ k.name = 'foo/bas'
+ k.set_contents_from_filename('foobar')
+ k.name = 'foo/bat'
+ k.set_contents_from_string(s1)
+ k.name = 'fie/bar'
+ k.set_contents_from_string(s1)
+ k.name = 'fie/bas'
+ k.set_contents_from_string(s1)
+ k.name = 'fie/bat'
+ k.set_contents_from_string(s1)
+ # try resetting the contents to another value
+ md5 = k.md5
+ k.set_contents_from_string(s2)
+ assert k.md5 != md5
+ os.unlink('foobar')
+ all = bucket.get_all_keys()
+ assert len(all) == 6
+ rs = bucket.get_all_keys(prefix='foo')
+ assert len(rs) == 3
+ rs = bucket.get_all_keys(prefix='', delimiter='/')
+ assert len(rs) == 2
+ rs = bucket.get_all_keys(maxkeys=5)
+ assert len(rs) == 5
+ # test the lookup method
+ k = bucket.lookup('foo/bar')
+ assert isinstance(k, bucket.key_class)
+ assert k.content_type == phony_mimetype
+ k = bucket.lookup('notthere')
+ assert k == None
+ # try some metadata stuff
+ k = bucket.new_key()
+ k.name = 'has_metadata'
+ mdkey1 = 'meta1'
+ mdval1 = 'This is the first metadata value'
+ k.set_metadata(mdkey1, mdval1)
+ mdkey2 = 'meta2'
+ mdval2 = 'This is the second metadata value'
+ k.set_metadata(mdkey2, mdval2)
+ # try a unicode metadata value
+
+ mdval3 = u'föö'
+ mdkey3 = 'meta3'
+ k.set_metadata(mdkey3, mdval3)
+ k.set_contents_from_string(s1)
+
+ k = bucket.lookup('has_metadata')
+ assert k.get_metadata(mdkey1) == mdval1
+ assert k.get_metadata(mdkey2) == mdval2
+ assert k.get_metadata(mdkey3) == mdval3
+ k = bucket.new_key()
+ k.name = 'has_metadata'
+ k.get_contents_as_string()
+ assert k.get_metadata(mdkey1) == mdval1
+ assert k.get_metadata(mdkey2) == mdval2
+ assert k.get_metadata(mdkey3) == mdval3
+ bucket.delete_key(k)
+ # test list and iterator
+ rs1 = bucket.list()
+ num_iter = 0
+ for r in rs1:
+ num_iter = num_iter + 1
+ rs = bucket.get_all_keys()
+ num_keys = len(rs)
+ assert num_iter == num_keys
+ # try some acl stuff
+ bucket.set_acl('public-read')
+ acl = bucket.get_acl()
+ assert len(acl.entries.entry_list) == 2
+ bucket.set_acl('private')
+ acl = bucket.get_acl()
+ assert len(acl.entries.entry_list) == 1
+ k = bucket.lookup('foo/bar')
+ k.set_acl('public-read')
+ acl = k.get_acl()
+ assert len(acl.entries.entry_list) == 2
+ k.set_acl('private')
+ acl = k.get_acl()
+ assert len(acl.entries.entry_list) == 1
+ # now delete all keys in bucket
+ for k in bucket:
+ bucket.delete_key(k)
+ # now delete bucket
+ time.sleep(5)
+ c.delete_bucket(bucket)
+ print '--- tests completed ---'
diff --git a/boto/tests/test_resumable_downloads.py b/boto/tests/test_resumable_downloads.py
new file mode 100755
index 0000000..d7ced7f
--- /dev/null
+++ b/boto/tests/test_resumable_downloads.py
@@ -0,0 +1,521 @@
+#!/usr/bin/env python
+#
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Tests of resumable downloads.
+"""
+
+import errno
+import getopt
+import os
+import random
+import re
+import shutil
+import socket
+import StringIO
+import sys
+import tempfile
+import time
+import unittest
+
+import boto
+from boto import storage_uri
+from boto.s3.resumable_download_handler import get_cur_file_size
+from boto.s3.resumable_download_handler import ResumableDownloadHandler
+from boto.exception import ResumableTransferDisposition
+from boto.exception import ResumableDownloadException
+from boto.exception import StorageResponseError
+from boto.tests.cb_test_harnass import CallbackTestHarnass
+
+
+class ResumableDownloadTests(unittest.TestCase):
+ """
+ Resumable download test suite.
+ """
+
+ def get_suite_description(self):
+ return 'Resumable download test suite'
+
+ @staticmethod
+ def resilient_close(key):
+ try:
+ key.close()
+ except StorageResponseError, e:
+ pass
+
+ @classmethod
+ def setUp(cls):
+ """
+ Creates file-like object for detination of each download test.
+
+ This method's namingCase is required by the unittest framework.
+ """
+ cls.dst_fp = open(cls.dst_file_name, 'w')
+
+ @classmethod
+ def tearDown(cls):
+ """
+ Deletes any objects or files created by last test run, and closes
+ any keys in case they were read incompletely (which would leave
+ partial buffers of data for subsequent tests to trip over).
+
+ This method's namingCase is required by the unittest framework.
+ """
+ # Recursively delete dst dir and then re-create it, so in effect we
+ # remove all dirs and files under that directory.
+ shutil.rmtree(cls.tmp_dir)
+ os.mkdir(cls.tmp_dir)
+
+ # Close test objects.
+ cls.resilient_close(cls.empty_src_key)
+ cls.resilient_close(cls.small_src_key)
+ cls.resilient_close(cls.larger_src_key)
+
+ @classmethod
+ def build_test_input_object(cls, obj_name, size, debug):
+ buf = []
+ for i in range(size):
+ buf.append(str(random.randint(0, 9)))
+ string_data = ''.join(buf)
+ uri = cls.src_bucket_uri.clone_replace_name(obj_name)
+ key = uri.new_key(validate=False)
+ key.set_contents_from_file(StringIO.StringIO(string_data))
+ # Set debug on key's connection after creating data, so only the test
+ # runs will show HTTP output (if called passed debug>0).
+ key.bucket.connection.debug = debug
+ return (string_data, key)
+
+ @classmethod
+ def set_up_class(cls, debug):
+ """
+ Initializes test suite.
+ """
+
+ # Create the test bucket.
+ hostname = socket.gethostname().split('.')[0]
+ uri_base_str = 'gs://res_download_test_%s_%s_%s' % (
+ hostname, os.getpid(), int(time.time()))
+ cls.src_bucket_uri = storage_uri('%s_dst' % uri_base_str)
+ cls.src_bucket_uri.create_bucket()
+
+ # Create test source objects.
+ cls.empty_src_key_size = 0
+ (cls.empty_src_key_as_string, cls.empty_src_key) = (
+ cls.build_test_input_object('empty', cls.empty_src_key_size,
+ debug=debug))
+ cls.small_src_key_size = 2 * 1024 # 2 KB.
+ (cls.small_src_key_as_string, cls.small_src_key) = (
+ cls.build_test_input_object('small', cls.small_src_key_size,
+ debug=debug))
+ cls.larger_src_key_size = 500 * 1024 # 500 KB.
+ (cls.larger_src_key_as_string, cls.larger_src_key) = (
+ cls.build_test_input_object('larger', cls.larger_src_key_size,
+ debug=debug))
+
+ # Use a designated tmpdir prefix to make it easy to find the end of
+ # the tmp path.
+ cls.tmpdir_prefix = 'tmp_resumable_download_test'
+
+ # Create temp dir and name for download file.
+ cls.tmp_dir = tempfile.mkdtemp(prefix=cls.tmpdir_prefix)
+ cls.dst_file_name = '%s%sdst_file' % (cls.tmp_dir, os.sep)
+
+ cls.tracker_file_name = '%s%stracker' % (cls.tmp_dir, os.sep)
+
+ cls.created_test_data = True
+
+ @classmethod
+ def tear_down_class(cls):
+ """
+ Deletes test objects and bucket and tmp dir created by set_up_class.
+ """
+ if not hasattr(cls, 'created_test_data'):
+ return
+ # Call cls.tearDown() in case the tests got interrupted, to ensure
+ # dst objects get deleted.
+ cls.tearDown()
+
+ # Delete test objects.
+ cls.empty_src_key.delete()
+ cls.small_src_key.delete()
+ cls.larger_src_key.delete()
+
+ # Retry (for up to 2 minutes) the bucket gets deleted (it may not
+ # the first time round, due to eventual consistency of bucket delete
+ # operations).
+ for i in range(60):
+ try:
+ cls.src_bucket_uri.delete_bucket()
+ break
+ except StorageResponseError:
+ print 'Test bucket (%s) not yet deleted, still trying' % (
+ cls.src_bucket_uri.uri)
+ time.sleep(2)
+ shutil.rmtree(cls.tmp_dir)
+ cls.tmp_dir = tempfile.mkdtemp(prefix=cls.tmpdir_prefix)
+
+ def test_non_resumable_download(self):
+ """
+ Tests that non-resumable downloads work
+ """
+ self.small_src_key.get_contents_to_file(self.dst_fp)
+ self.assertEqual(self.small_src_key_size,
+ get_cur_file_size(self.dst_fp))
+ self.assertEqual(self.small_src_key_as_string,
+ self.small_src_key.get_contents_as_string())
+
+ def test_download_without_persistent_tracker(self):
+ """
+ Tests a single resumable download, with no tracker persistence
+ """
+ res_download_handler = ResumableDownloadHandler()
+ self.small_src_key.get_contents_to_file(
+ self.dst_fp, res_download_handler=res_download_handler)
+ self.assertEqual(self.small_src_key_size,
+ get_cur_file_size(self.dst_fp))
+ self.assertEqual(self.small_src_key_as_string,
+ self.small_src_key.get_contents_as_string())
+
+ def test_failed_download_with_persistent_tracker(self):
+ """
+ Tests that failed resumable download leaves a correct tracker file
+ """
+ harnass = CallbackTestHarnass()
+ res_download_handler = ResumableDownloadHandler(
+ tracker_file_name=self.tracker_file_name, num_retries=0)
+ try:
+ self.small_src_key.get_contents_to_file(
+ self.dst_fp, cb=harnass.call,
+ res_download_handler=res_download_handler)
+ self.fail('Did not get expected ResumableDownloadException')
+ except ResumableDownloadException, e:
+ # We'll get a ResumableDownloadException at this point because
+ # of CallbackTestHarnass (above). Check that the tracker file was
+ # created correctly.
+ self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
+ self.assertTrue(os.path.exists(self.tracker_file_name))
+ f = open(self.tracker_file_name)
+ etag_line = f.readline()
+ m = re.search(ResumableDownloadHandler.ETAG_REGEX, etag_line)
+ f.close()
+ self.assertTrue(m)
+
+ def test_retryable_exception_recovery(self):
+ """
+ Tests handling of a retryable exception
+ """
+ # Test one of the RETRYABLE_EXCEPTIONS.
+ exception = ResumableDownloadHandler.RETRYABLE_EXCEPTIONS[0]
+ harnass = CallbackTestHarnass(exception=exception)
+ res_download_handler = ResumableDownloadHandler(num_retries=1)
+ self.small_src_key.get_contents_to_file(
+ self.dst_fp, cb=harnass.call,
+ res_download_handler=res_download_handler)
+ # Ensure downloaded object has correct content.
+ self.assertEqual(self.small_src_key_size,
+ get_cur_file_size(self.dst_fp))
+ self.assertEqual(self.small_src_key_as_string,
+ self.small_src_key.get_contents_as_string())
+
+ def test_non_retryable_exception_handling(self):
+ """
+ Tests resumable download that fails with a non-retryable exception
+ """
+ harnass = CallbackTestHarnass(
+ exception=OSError(errno.EACCES, 'Permission denied'))
+ res_download_handler = ResumableDownloadHandler(num_retries=1)
+ try:
+ self.small_src_key.get_contents_to_file(
+ self.dst_fp, cb=harnass.call,
+ res_download_handler=res_download_handler)
+ self.fail('Did not get expected OSError')
+ except OSError, e:
+ # Ensure the error was re-raised.
+ self.assertEqual(e.errno, 13)
+
+ def test_failed_and_restarted_download_with_persistent_tracker(self):
+ """
+ Tests resumable download that fails once and then completes,
+ with tracker file
+ """
+ harnass = CallbackTestHarnass()
+ res_download_handler = ResumableDownloadHandler(
+ tracker_file_name=self.tracker_file_name, num_retries=1)
+ self.small_src_key.get_contents_to_file(
+ self.dst_fp, cb=harnass.call,
+ res_download_handler=res_download_handler)
+ # Ensure downloaded object has correct content.
+ self.assertEqual(self.small_src_key_size,
+ get_cur_file_size(self.dst_fp))
+ self.assertEqual(self.small_src_key_as_string,
+ self.small_src_key.get_contents_as_string())
+ # Ensure tracker file deleted.
+ self.assertFalse(os.path.exists(self.tracker_file_name))
+
+ def test_multiple_in_process_failures_then_succeed(self):
+ """
+ Tests resumable download that fails twice in one process, then completes
+ """
+ res_download_handler = ResumableDownloadHandler(num_retries=3)
+ self.small_src_key.get_contents_to_file(
+ self.dst_fp, res_download_handler=res_download_handler)
+ # Ensure downloaded object has correct content.
+ self.assertEqual(self.small_src_key_size,
+ get_cur_file_size(self.dst_fp))
+ self.assertEqual(self.small_src_key_as_string,
+ self.small_src_key.get_contents_as_string())
+
+ def test_multiple_in_process_failures_then_succeed_with_tracker_file(self):
+ """
+ Tests resumable download that fails completely in one process,
+ then when restarted completes, using a tracker file
+ """
+ # Set up test harnass that causes more failures than a single
+ # ResumableDownloadHandler instance will handle, writing enough data
+ # before the first failure that some of it survives that process run.
+ harnass = CallbackTestHarnass(
+ fail_after_n_bytes=self.larger_src_key_size/2, num_times_to_fail=2)
+ res_download_handler = ResumableDownloadHandler(
+ tracker_file_name=self.tracker_file_name, num_retries=0)
+ try:
+ self.larger_src_key.get_contents_to_file(
+ self.dst_fp, cb=harnass.call,
+ res_download_handler=res_download_handler)
+ self.fail('Did not get expected ResumableDownloadException')
+ except ResumableDownloadException, e:
+ self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
+ # Ensure a tracker file survived.
+ self.assertTrue(os.path.exists(self.tracker_file_name))
+ # Try it one more time; this time should succeed.
+ self.larger_src_key.get_contents_to_file(
+ self.dst_fp, cb=harnass.call,
+ res_download_handler=res_download_handler)
+ self.assertEqual(self.larger_src_key_size,
+ get_cur_file_size(self.dst_fp))
+ self.assertEqual(self.larger_src_key_as_string,
+ self.larger_src_key.get_contents_as_string())
+ self.assertFalse(os.path.exists(self.tracker_file_name))
+ # Ensure some of the file was downloaded both before and after failure.
+ self.assertTrue(
+ len(harnass.transferred_seq_before_first_failure) > 1 and
+ len(harnass.transferred_seq_after_first_failure) > 1)
+
+ def test_download_with_inital_partial_download_before_failure(self):
+ """
+ Tests resumable download that successfully downloads some content
+ before it fails, then restarts and completes
+ """
+ # Set up harnass to fail download after several hundred KB so download
+ # server will have saved something before we retry.
+ harnass = CallbackTestHarnass(
+ fail_after_n_bytes=self.larger_src_key_size/2)
+ res_download_handler = ResumableDownloadHandler(num_retries=1)
+ self.larger_src_key.get_contents_to_file(
+ self.dst_fp, cb=harnass.call,
+ res_download_handler=res_download_handler)
+ # Ensure downloaded object has correct content.
+ self.assertEqual(self.larger_src_key_size,
+ get_cur_file_size(self.dst_fp))
+ self.assertEqual(self.larger_src_key_as_string,
+ self.larger_src_key.get_contents_as_string())
+ # Ensure some of the file was downloaded both before and after failure.
+ self.assertTrue(
+ len(harnass.transferred_seq_before_first_failure) > 1 and
+ len(harnass.transferred_seq_after_first_failure) > 1)
+
+ def test_zero_length_object_download(self):
+ """
+ Tests downloading a zero-length object (exercises boundary conditions).
+ """
+ res_download_handler = ResumableDownloadHandler()
+ self.empty_src_key.get_contents_to_file(
+ self.dst_fp, res_download_handler=res_download_handler)
+ self.assertEqual(0, get_cur_file_size(self.dst_fp))
+
+ def test_download_with_object_size_change_between_starts(self):
+ """
+ Tests resumable download on an object that changes sizes between inital
+ download start and restart
+ """
+ harnass = CallbackTestHarnass(
+ fail_after_n_bytes=self.larger_src_key_size/2, num_times_to_fail=2)
+ # Set up first process' ResumableDownloadHandler not to do any
+ # retries (initial download request will establish expected size to
+ # download server).
+ res_download_handler = ResumableDownloadHandler(
+ tracker_file_name=self.tracker_file_name, num_retries=0)
+ try:
+ self.larger_src_key.get_contents_to_file(
+ self.dst_fp, cb=harnass.call,
+ res_download_handler=res_download_handler)
+ self.fail('Did not get expected ResumableDownloadException')
+ except ResumableDownloadException, e:
+ self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
+ # Ensure a tracker file survived.
+ self.assertTrue(os.path.exists(self.tracker_file_name))
+ # Try it again, this time with different src key (simulating an
+ # object that changes sizes between downloads).
+ try:
+ self.small_src_key.get_contents_to_file(
+ self.dst_fp, res_download_handler=res_download_handler)
+ self.fail('Did not get expected ResumableDownloadException')
+ except ResumableDownloadException, e:
+ self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
+ self.assertNotEqual(
+ e.message.find('md5 signature doesn\'t match etag'), -1)
+
+ def test_download_with_file_content_change_during_download(self):
+ """
+ Tests resumable download on an object where the file content changes
+ without changing length while download in progress
+ """
+ harnass = CallbackTestHarnass(
+ fail_after_n_bytes=self.larger_src_key_size/2, num_times_to_fail=2)
+ # Set up first process' ResumableDownloadHandler not to do any
+ # retries (initial download request will establish expected size to
+ # download server).
+ res_download_handler = ResumableDownloadHandler(
+ tracker_file_name=self.tracker_file_name, num_retries=0)
+ dst_filename = self.dst_fp.name
+ try:
+ self.larger_src_key.get_contents_to_file(
+ self.dst_fp, cb=harnass.call,
+ res_download_handler=res_download_handler)
+ self.fail('Did not get expected ResumableDownloadException')
+ except ResumableDownloadException, e:
+ self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
+ # Ensure a tracker file survived.
+ self.assertTrue(os.path.exists(self.tracker_file_name))
+ # Before trying again change the first byte of the file fragment
+ # that was already downloaded.
+ orig_size = get_cur_file_size(self.dst_fp)
+ self.dst_fp.seek(0, os.SEEK_SET)
+ self.dst_fp.write('a')
+ # Ensure the file size didn't change.
+ self.assertEqual(orig_size, get_cur_file_size(self.dst_fp))
+ try:
+ self.larger_src_key.get_contents_to_file(
+ self.dst_fp, cb=harnass.call,
+ res_download_handler=res_download_handler)
+ self.fail('Did not get expected ResumableDownloadException')
+ except ResumableDownloadException, e:
+ self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
+ self.assertNotEqual(
+ e.message.find('md5 signature doesn\'t match etag'), -1)
+ # Ensure the bad data wasn't left around.
+ self.assertFalse(os.path.exists(dst_filename))
+
+ def test_download_with_invalid_tracker_etag(self):
+ """
+ Tests resumable download with a tracker file containing an invalid etag
+ """
+ invalid_etag_tracker_file_name = (
+ '%s%sinvalid_etag_tracker' % (self.tmp_dir, os.sep))
+ f = open(invalid_etag_tracker_file_name, 'w')
+ f.write('3.14159\n')
+ f.close()
+ res_download_handler = ResumableDownloadHandler(
+ tracker_file_name=invalid_etag_tracker_file_name)
+ # An error should be printed about the invalid tracker, but then it
+ # should run the update successfully.
+ self.small_src_key.get_contents_to_file(
+ self.dst_fp, res_download_handler=res_download_handler)
+ self.assertEqual(self.small_src_key_size,
+ get_cur_file_size(self.dst_fp))
+ self.assertEqual(self.small_src_key_as_string,
+ self.small_src_key.get_contents_as_string())
+
+ def test_download_with_inconsistent_etag_in_tracker(self):
+ """
+ Tests resumable download with an inconsistent etag in tracker file
+ """
+ inconsistent_etag_tracker_file_name = (
+ '%s%sinconsistent_etag_tracker' % (self.tmp_dir, os.sep))
+ f = open(inconsistent_etag_tracker_file_name, 'w')
+ good_etag = self.small_src_key.etag.strip('"\'')
+ new_val_as_list = []
+ for c in reversed(good_etag):
+ new_val_as_list.append(c)
+ f.write('%s\n' % ''.join(new_val_as_list))
+ f.close()
+ res_download_handler = ResumableDownloadHandler(
+ tracker_file_name=inconsistent_etag_tracker_file_name)
+ # An error should be printed about the expired tracker, but then it
+ # should run the update successfully.
+ self.small_src_key.get_contents_to_file(
+ self.dst_fp, res_download_handler=res_download_handler)
+ self.assertEqual(self.small_src_key_size,
+ get_cur_file_size(self.dst_fp))
+ self.assertEqual(self.small_src_key_as_string,
+ self.small_src_key.get_contents_as_string())
+
+ def test_download_with_unwritable_tracker_file(self):
+ """
+ Tests resumable download with an unwritable tracker file
+ """
+ # Make dir where tracker_file lives temporarily unwritable.
+ save_mod = os.stat(self.tmp_dir).st_mode
+ try:
+ os.chmod(self.tmp_dir, 0)
+ res_download_handler = ResumableDownloadHandler(
+ tracker_file_name=self.tracker_file_name)
+ except ResumableDownloadException, e:
+ self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
+ self.assertNotEqual(
+ e.message.find('Couldn\'t write URI tracker file'), -1)
+ finally:
+ # Restore original protection of dir where tracker_file lives.
+ os.chmod(self.tmp_dir, save_mod)
+
+if __name__ == '__main__':
+ if sys.version_info[:3] < (2, 5, 1):
+ sys.exit('These tests must be run on at least Python 2.5.1\n')
+
+ # Use -d to see more HTTP protocol detail during tests. Note that
+ # unlike the upload test case, you won't see much for the downloads
+ # because there's no HTTP server state protocol for in the download case
+ # (and the actual Range GET HTTP protocol detail is suppressed by the
+ # normal boto.s3.Key.get_file() processing).
+ debug = 0
+ opts, args = getopt.getopt(sys.argv[1:], 'd', ['debug'])
+ for o, a in opts:
+ if o in ('-d', '--debug'):
+ debug = 2
+
+ test_loader = unittest.TestLoader()
+ test_loader.testMethodPrefix = 'test_'
+ suite = test_loader.loadTestsFromTestCase(ResumableDownloadTests)
+ # Seems like there should be a cleaner way to find the test_class.
+ test_class = suite.__getattribute__('_tests')[0]
+ # We call set_up_class() and tear_down_class() ourselves because we
+ # don't assume the user has Python 2.7 (which supports classmethods
+ # that do it, with camelCase versions of these names).
+ try:
+ print 'Setting up %s...' % test_class.get_suite_description()
+ test_class.set_up_class(debug)
+ print 'Running %s...' % test_class.get_suite_description()
+ unittest.TextTestRunner(verbosity=2).run(suite)
+ finally:
+ print 'Cleaning up after %s...' % test_class.get_suite_description()
+ test_class.tear_down_class()
+ print ''
diff --git a/boto/tests/test_resumable_uploads.py b/boto/tests/test_resumable_uploads.py
new file mode 100755
index 0000000..da7b086
--- /dev/null
+++ b/boto/tests/test_resumable_uploads.py
@@ -0,0 +1,551 @@
+#!/usr/bin/env python
+#
+# Copyright 2010 Google Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Tests of resumable uploads.
+"""
+
+import errno
+import getopt
+import os
+import random
+import re
+import shutil
+import socket
+import StringIO
+import sys
+import tempfile
+import time
+import unittest
+
+import boto
+from boto.exception import GSResponseError
+from boto.gs.resumable_upload_handler import ResumableUploadHandler
+from boto.exception import ResumableTransferDisposition
+from boto.exception import ResumableUploadException
+from boto.exception import StorageResponseError
+from boto.tests.cb_test_harnass import CallbackTestHarnass
+
+
+class ResumableUploadTests(unittest.TestCase):
+ """
+ Resumable upload test suite.
+ """
+
+ def get_suite_description(self):
+ return 'Resumable upload test suite'
+
+ @classmethod
+ def setUp(cls):
+ """
+ Creates dst_key needed by all tests.
+
+ This method's namingCase is required by the unittest framework.
+ """
+ cls.dst_key = cls.dst_key_uri.new_key(validate=False)
+
+ @classmethod
+ def tearDown(cls):
+ """
+ Deletes any objects or files created by last test run.
+
+ This method's namingCase is required by the unittest framework.
+ """
+ try:
+ cls.dst_key_uri.delete_key()
+ except GSResponseError:
+ # Ignore possible not-found error.
+ pass
+ # Recursively delete dst dir and then re-create it, so in effect we
+ # remove all dirs and files under that directory.
+ shutil.rmtree(cls.tmp_dir)
+ os.mkdir(cls.tmp_dir)
+
+ @staticmethod
+ def build_test_input_file(size):
+ buf = []
+ # I manually construct the random data here instead of calling
+ # os.urandom() because I want to constrain the range of data (in
+ # this case to 0'..'9') so the test
+ # code can easily overwrite part of the StringIO file with
+ # known-to-be-different values.
+ for i in range(size):
+ buf.append(str(random.randint(0, 9)))
+ file_as_string = ''.join(buf)
+ return (file_as_string, StringIO.StringIO(file_as_string))
+
+ @classmethod
+ def set_up_class(cls, debug):
+ """
+ Initializes test suite.
+ """
+
+ # Use a designated tmpdir prefix to make it easy to find the end of
+ # the tmp path.
+ cls.tmpdir_prefix = 'tmp_resumable_upload_test'
+
+ # Create test source file data.
+ cls.empty_src_file_size = 0
+ (cls.empty_src_file_as_string, cls.empty_src_file) = (
+ cls.build_test_input_file(cls.empty_src_file_size))
+ cls.small_src_file_size = 2 * 1024 # 2 KB.
+ (cls.small_src_file_as_string, cls.small_src_file) = (
+ cls.build_test_input_file(cls.small_src_file_size))
+ cls.larger_src_file_size = 500 * 1024 # 500 KB.
+ (cls.larger_src_file_as_string, cls.larger_src_file) = (
+ cls.build_test_input_file(cls.larger_src_file_size))
+ cls.largest_src_file_size = 1024 * 1024 # 1 MB.
+ (cls.largest_src_file_as_string, cls.largest_src_file) = (
+ cls.build_test_input_file(cls.largest_src_file_size))
+
+ # Create temp dir.
+ cls.tmp_dir = tempfile.mkdtemp(prefix=cls.tmpdir_prefix)
+
+ # Create the test bucket.
+ hostname = socket.gethostname().split('.')[0]
+ cls.uri_base_str = 'gs://res_upload_test_%s_%s_%s' % (
+ hostname, os.getpid(), int(time.time()))
+ cls.dst_bucket_uri = boto.storage_uri('%s_dst' %
+ cls.uri_base_str, debug=debug)
+ cls.dst_bucket_uri.create_bucket()
+ cls.dst_key_uri = cls.dst_bucket_uri.clone_replace_name('obj')
+
+ cls.tracker_file_name = '%s%suri_tracker' % (cls.tmp_dir, os.sep)
+
+ cls.syntactically_invalid_tracker_file_name = (
+ '%s%ssynt_invalid_uri_tracker' % (cls.tmp_dir, os.sep))
+ f = open(cls.syntactically_invalid_tracker_file_name, 'w')
+ f.write('ftp://example.com')
+ f.close()
+
+ cls.invalid_upload_id = (
+ 'http://pub.commondatastorage.googleapis.com/?upload_id='
+ 'AyzB2Uo74W4EYxyi5dp_-r68jz8rtbvshsv4TX7srJVkJ57CxTY5Dw2')
+ cls.invalid_upload_id_tracker_file_name = (
+ '%s%sinvalid_upload_id_tracker' % (cls.tmp_dir, os.sep))
+ f = open(cls.invalid_upload_id_tracker_file_name, 'w')
+ f.write(cls.invalid_upload_id)
+ f.close()
+
+ cls.created_test_data = True
+
+ @classmethod
+ def tear_down_class(cls):
+ """
+ Deletes bucket and tmp dir created by set_up_class.
+ """
+ if not hasattr(cls, 'created_test_data'):
+ return
+ # Call cls.tearDown() in case the tests got interrupted, to ensure
+ # dst objects get deleted.
+ cls.tearDown()
+
+ # Retry (for up to 2 minutes) the bucket gets deleted (it may not
+ # the first time round, due to eventual consistency of bucket delete
+ # operations).
+ for i in range(60):
+ try:
+ cls.dst_bucket_uri.delete_bucket()
+ break
+ except StorageResponseError:
+ print 'Test bucket (%s) not yet deleted, still trying' % (
+ cls.dst_bucket_uri.uri)
+ time.sleep(2)
+ shutil.rmtree(cls.tmp_dir)
+ cls.tmp_dir = tempfile.mkdtemp(prefix=cls.tmpdir_prefix)
+
+ def test_non_resumable_upload(self):
+ """
+ Tests that non-resumable uploads work
+ """
+ self.dst_key.set_contents_from_file(self.small_src_file)
+ self.assertEqual(self.small_src_file_size, self.dst_key.size)
+ self.assertEqual(self.small_src_file_as_string,
+ self.dst_key.get_contents_as_string())
+
+ def test_upload_without_persistent_tracker(self):
+ """
+ Tests a single resumable upload, with no tracker URI persistence
+ """
+ res_upload_handler = ResumableUploadHandler()
+ self.dst_key.set_contents_from_file(
+ self.small_src_file, res_upload_handler=res_upload_handler)
+ self.assertEqual(self.small_src_file_size, self.dst_key.size)
+ self.assertEqual(self.small_src_file_as_string,
+ self.dst_key.get_contents_as_string())
+
+ def test_failed_upload_with_persistent_tracker(self):
+ """
+ Tests that failed resumable upload leaves a correct tracker URI file
+ """
+ harnass = CallbackTestHarnass()
+ res_upload_handler = ResumableUploadHandler(
+ tracker_file_name=self.tracker_file_name, num_retries=0)
+ try:
+ self.dst_key.set_contents_from_file(
+ self.small_src_file, cb=harnass.call,
+ res_upload_handler=res_upload_handler)
+ self.fail('Did not get expected ResumableUploadException')
+ except ResumableUploadException, e:
+ # We'll get a ResumableUploadException at this point because
+ # of CallbackTestHarnass (above). Check that the tracker file was
+ # created correctly.
+ self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
+ self.assertTrue(os.path.exists(self.tracker_file_name))
+ f = open(self.tracker_file_name)
+ uri_from_file = f.readline().strip()
+ f.close()
+ self.assertEqual(uri_from_file,
+ res_upload_handler.get_tracker_uri())
+
+ def test_retryable_exception_recovery(self):
+ """
+ Tests handling of a retryable exception
+ """
+ # Test one of the RETRYABLE_EXCEPTIONS.
+ exception = ResumableUploadHandler.RETRYABLE_EXCEPTIONS[0]
+ harnass = CallbackTestHarnass(exception=exception)
+ res_upload_handler = ResumableUploadHandler(num_retries=1)
+ self.dst_key.set_contents_from_file(
+ self.small_src_file, cb=harnass.call,
+ res_upload_handler=res_upload_handler)
+ # Ensure uploaded object has correct content.
+ self.assertEqual(self.small_src_file_size, self.dst_key.size)
+ self.assertEqual(self.small_src_file_as_string,
+ self.dst_key.get_contents_as_string())
+
+ def test_non_retryable_exception_handling(self):
+ """
+ Tests a resumable upload that fails with a non-retryable exception
+ """
+ harnass = CallbackTestHarnass(
+ exception=OSError(errno.EACCES, 'Permission denied'))
+ res_upload_handler = ResumableUploadHandler(num_retries=1)
+ try:
+ self.dst_key.set_contents_from_file(
+ self.small_src_file, cb=harnass.call,
+ res_upload_handler=res_upload_handler)
+ self.fail('Did not get expected OSError')
+ except OSError, e:
+ # Ensure the error was re-raised.
+ self.assertEqual(e.errno, 13)
+
+ def test_failed_and_restarted_upload_with_persistent_tracker(self):
+ """
+ Tests resumable upload that fails once and then completes, with tracker
+ file
+ """
+ harnass = CallbackTestHarnass()
+ res_upload_handler = ResumableUploadHandler(
+ tracker_file_name=self.tracker_file_name, num_retries=1)
+ self.dst_key.set_contents_from_file(
+ self.small_src_file, cb=harnass.call,
+ res_upload_handler=res_upload_handler)
+ # Ensure uploaded object has correct content.
+ self.assertEqual(self.small_src_file_size, self.dst_key.size)
+ self.assertEqual(self.small_src_file_as_string,
+ self.dst_key.get_contents_as_string())
+ # Ensure tracker file deleted.
+ self.assertFalse(os.path.exists(self.tracker_file_name))
+
+ def test_multiple_in_process_failures_then_succeed(self):
+ """
+ Tests resumable upload that fails twice in one process, then completes
+ """
+ res_upload_handler = ResumableUploadHandler(num_retries=3)
+ self.dst_key.set_contents_from_file(
+ self.small_src_file, res_upload_handler=res_upload_handler)
+ # Ensure uploaded object has correct content.
+ self.assertEqual(self.small_src_file_size, self.dst_key.size)
+ self.assertEqual(self.small_src_file_as_string,
+ self.dst_key.get_contents_as_string())
+
+ def test_multiple_in_process_failures_then_succeed_with_tracker_file(self):
+ """
+ Tests resumable upload that fails completely in one process,
+ then when restarted completes, using a tracker file
+ """
+ # Set up test harnass that causes more failures than a single
+ # ResumableUploadHandler instance will handle, writing enough data
+ # before the first failure that some of it survives that process run.
+ harnass = CallbackTestHarnass(
+ fail_after_n_bytes=self.larger_src_file_size/2, num_times_to_fail=2)
+ res_upload_handler = ResumableUploadHandler(
+ tracker_file_name=self.tracker_file_name, num_retries=1)
+ try:
+ self.dst_key.set_contents_from_file(
+ self.larger_src_file, cb=harnass.call,
+ res_upload_handler=res_upload_handler)
+ self.fail('Did not get expected ResumableUploadException')
+ except ResumableUploadException, e:
+ self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
+ # Ensure a tracker file survived.
+ self.assertTrue(os.path.exists(self.tracker_file_name))
+ # Try it one more time; this time should succeed.
+ self.dst_key.set_contents_from_file(
+ self.larger_src_file, cb=harnass.call,
+ res_upload_handler=res_upload_handler)
+ self.assertEqual(self.larger_src_file_size, self.dst_key.size)
+ self.assertEqual(self.larger_src_file_as_string,
+ self.dst_key.get_contents_as_string())
+ self.assertFalse(os.path.exists(self.tracker_file_name))
+ # Ensure some of the file was uploaded both before and after failure.
+ self.assertTrue(len(harnass.transferred_seq_before_first_failure) > 1
+ and
+ len(harnass.transferred_seq_after_first_failure) > 1)
+
+ def test_upload_with_inital_partial_upload_before_failure(self):
+ """
+ Tests resumable upload that successfully uploads some content
+ before it fails, then restarts and completes
+ """
+ # Set up harnass to fail upload after several hundred KB so upload
+ # server will have saved something before we retry.
+ harnass = CallbackTestHarnass(
+ fail_after_n_bytes=self.larger_src_file_size/2)
+ res_upload_handler = ResumableUploadHandler(num_retries=1)
+ self.dst_key.set_contents_from_file(
+ self.larger_src_file, cb=harnass.call,
+ res_upload_handler=res_upload_handler)
+ # Ensure uploaded object has correct content.
+ self.assertEqual(self.larger_src_file_size, self.dst_key.size)
+ self.assertEqual(self.larger_src_file_as_string,
+ self.dst_key.get_contents_as_string())
+ # Ensure some of the file was uploaded both before and after failure.
+ self.assertTrue(len(harnass.transferred_seq_before_first_failure) > 1
+ and
+ len(harnass.transferred_seq_after_first_failure) > 1)
+
+ def test_empty_file_upload(self):
+ """
+ Tests uploading an empty file (exercises boundary conditions).
+ """
+ res_upload_handler = ResumableUploadHandler()
+ self.dst_key.set_contents_from_file(
+ self.empty_src_file, res_upload_handler=res_upload_handler)
+ self.assertEqual(0, self.dst_key.size)
+
+ def test_upload_retains_metadata(self):
+ """
+ Tests that resumable upload correctly sets passed metadata
+ """
+ res_upload_handler = ResumableUploadHandler()
+ headers = {'Content-Type' : 'text/plain', 'Content-Encoding' : 'gzip',
+ 'x-goog-meta-abc' : 'my meta', 'x-goog-acl' : 'public-read'}
+ self.dst_key.set_contents_from_file(
+ self.small_src_file, headers=headers,
+ res_upload_handler=res_upload_handler)
+ self.assertEqual(self.small_src_file_size, self.dst_key.size)
+ self.assertEqual(self.small_src_file_as_string,
+ self.dst_key.get_contents_as_string())
+ self.dst_key.open_read()
+ self.assertEqual('text/plain', self.dst_key.content_type)
+ self.assertEqual('gzip', self.dst_key.content_encoding)
+ self.assertTrue('abc' in self.dst_key.metadata)
+ self.assertEqual('my meta', str(self.dst_key.metadata['abc']))
+ acl = self.dst_key.get_acl()
+ for entry in acl.entries.entry_list:
+ if str(entry.scope) == '<AllUsers>':
+ self.assertEqual('READ', str(acl.entries.entry_list[1].permission))
+ return
+ self.fail('No <AllUsers> scope found')
+
+ def test_upload_with_file_size_change_between_starts(self):
+ """
+ Tests resumable upload on a file that changes sizes between inital
+ upload start and restart
+ """
+ harnass = CallbackTestHarnass(
+ fail_after_n_bytes=self.larger_src_file_size/2)
+ # Set up first process' ResumableUploadHandler not to do any
+ # retries (initial upload request will establish expected size to
+ # upload server).
+ res_upload_handler = ResumableUploadHandler(
+ tracker_file_name=self.tracker_file_name, num_retries=0)
+ try:
+ self.dst_key.set_contents_from_file(
+ self.larger_src_file, cb=harnass.call,
+ res_upload_handler=res_upload_handler)
+ self.fail('Did not get expected ResumableUploadException')
+ except ResumableUploadException, e:
+ self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
+ # Ensure a tracker file survived.
+ self.assertTrue(os.path.exists(self.tracker_file_name))
+ # Try it again, this time with different size source file.
+ # Wait 1 second between retry attempts, to give upload server a
+ # chance to save state so it can respond to changed file size with
+ # 500 response in the next attempt.
+ time.sleep(1)
+ try:
+ self.dst_key.set_contents_from_file(
+ self.largest_src_file, res_upload_handler=res_upload_handler)
+ self.fail('Did not get expected ResumableUploadException')
+ except ResumableUploadException, e:
+ self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
+ self.assertNotEqual(
+ e.message.find('attempt to upload a different size file'), -1)
+
+ def test_upload_with_file_size_change_during_upload(self):
+ """
+ Tests resumable upload on a file that changes sizes while upload
+ in progress
+ """
+ # Create a file we can change during the upload.
+ test_file_size = 500 * 1024 # 500 KB.
+ test_file = self.build_test_input_file(test_file_size)[1]
+ harnass = CallbackTestHarnass(fp_to_change=test_file,
+ fp_change_pos=test_file_size)
+ res_upload_handler = ResumableUploadHandler(num_retries=1)
+ try:
+ self.dst_key.set_contents_from_file(
+ test_file, cb=harnass.call,
+ res_upload_handler=res_upload_handler)
+ self.fail('Did not get expected ResumableUploadException')
+ except ResumableUploadException, e:
+ self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
+ self.assertNotEqual(
+ e.message.find('File changed during upload'), -1)
+
+ def test_upload_with_file_content_change_during_upload(self):
+ """
+ Tests resumable upload on a file that changes one byte of content
+ (so, size stays the same) while upload in progress
+ """
+ test_file_size = 500 * 1024 # 500 KB.
+ test_file = self.build_test_input_file(test_file_size)[1]
+ harnass = CallbackTestHarnass(fail_after_n_bytes=test_file_size/2,
+ fp_to_change=test_file,
+ # Writing at file_size-5 won't change file
+ # size because CallbackTestHarnass only
+ # writes 3 bytes.
+ fp_change_pos=test_file_size-5)
+ res_upload_handler = ResumableUploadHandler(num_retries=1)
+ try:
+ self.dst_key.set_contents_from_file(
+ test_file, cb=harnass.call,
+ res_upload_handler=res_upload_handler)
+ self.fail('Did not get expected ResumableUploadException')
+ except ResumableUploadException, e:
+ self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
+ # Ensure the file size didn't change.
+ test_file.seek(0, os.SEEK_END)
+ self.assertEqual(test_file_size, test_file.tell())
+ self.assertNotEqual(
+ e.message.find('md5 signature doesn\'t match etag'), -1)
+ # Ensure the bad data wasn't left around.
+ all_keys = self.dst_key_uri.get_all_keys()
+ self.assertEqual(0, len(all_keys))
+
+ def test_upload_with_content_length_header_set(self):
+ """
+ Tests resumable upload on a file when the user supplies a
+ Content-Length header. This is used by gsutil, for example,
+ to set the content length when gzipping a file.
+ """
+ res_upload_handler = ResumableUploadHandler()
+ try:
+ self.dst_key.set_contents_from_file(
+ self.small_src_file, res_upload_handler=res_upload_handler,
+ headers={'Content-Length' : self.small_src_file_size})
+ self.fail('Did not get expected ResumableUploadException')
+ except ResumableUploadException, e:
+ self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
+ self.assertNotEqual(
+ e.message.find('Attempt to specify Content-Length header'), -1)
+
+ def test_upload_with_syntactically_invalid_tracker_uri(self):
+ """
+ Tests resumable upload with a syntactically invalid tracker URI
+ """
+ res_upload_handler = ResumableUploadHandler(
+ tracker_file_name=self.syntactically_invalid_tracker_file_name)
+ # An error should be printed about the invalid URI, but then it
+ # should run the update successfully.
+ self.dst_key.set_contents_from_file(
+ self.small_src_file, res_upload_handler=res_upload_handler)
+ self.assertEqual(self.small_src_file_size, self.dst_key.size)
+ self.assertEqual(self.small_src_file_as_string,
+ self.dst_key.get_contents_as_string())
+
+ def test_upload_with_invalid_upload_id_in_tracker_file(self):
+ """
+ Tests resumable upload with invalid upload ID
+ """
+ res_upload_handler = ResumableUploadHandler(
+ tracker_file_name=self.invalid_upload_id_tracker_file_name)
+ # An error should occur, but then the tracker URI should be
+ # regenerated and the the update should succeed.
+ self.dst_key.set_contents_from_file(
+ self.small_src_file, res_upload_handler=res_upload_handler)
+ self.assertEqual(self.small_src_file_size, self.dst_key.size)
+ self.assertEqual(self.small_src_file_as_string,
+ self.dst_key.get_contents_as_string())
+ self.assertNotEqual(self.invalid_upload_id,
+ res_upload_handler.get_tracker_uri())
+
+ def test_upload_with_unwritable_tracker_file(self):
+ """
+ Tests resumable upload with an unwritable tracker file
+ """
+ # Make dir where tracker_file lives temporarily unwritable.
+ save_mod = os.stat(self.tmp_dir).st_mode
+ try:
+ os.chmod(self.tmp_dir, 0)
+ res_upload_handler = ResumableUploadHandler(
+ tracker_file_name=self.tracker_file_name)
+ except ResumableUploadException, e:
+ self.assertEqual(e.disposition, ResumableTransferDisposition.ABORT)
+ self.assertNotEqual(
+ e.message.find('Couldn\'t write URI tracker file'), -1)
+ finally:
+ # Restore original protection of dir where tracker_file lives.
+ os.chmod(self.tmp_dir, save_mod)
+
+if __name__ == '__main__':
+ if sys.version_info[:3] < (2, 5, 1):
+ sys.exit('These tests must be run on at least Python 2.5.1\n')
+
+ # Use -d to see more HTTP protocol detail during tests.
+ debug = 0
+ opts, args = getopt.getopt(sys.argv[1:], 'd', ['debug'])
+ for o, a in opts:
+ if o in ('-d', '--debug'):
+ debug = 2
+
+ test_loader = unittest.TestLoader()
+ test_loader.testMethodPrefix = 'test_'
+ suite = test_loader.loadTestsFromTestCase(ResumableUploadTests)
+ # Seems like there should be a cleaner way to find the test_class.
+ test_class = suite.__getattribute__('_tests')[0]
+ # We call set_up_class() and tear_down_class() ourselves because we
+ # don't assume the user has Python 2.7 (which supports classmethods
+ # that do it, with camelCase versions of these names).
+ try:
+ print 'Setting up %s...' % test_class.get_suite_description()
+ test_class.set_up_class(debug)
+ print 'Running %s...' % test_class.get_suite_description()
+ unittest.TextTestRunner(verbosity=2).run(suite)
+ finally:
+ print 'Cleaning up after %s...' % test_class.get_suite_description()
+ test_class.tear_down_class()
+ print ''
diff --git a/boto/tests/test_s3connection.py b/boto/tests/test_s3connection.py
new file mode 100644
index 0000000..3dd936f
--- /dev/null
+++ b/boto/tests/test_s3connection.py
@@ -0,0 +1,187 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Some unit tests for the S3Connection
+"""
+
+import unittest
+import time
+import os
+import urllib
+from boto.s3.connection import S3Connection
+from boto.exception import S3PermissionsError
+
+class S3ConnectionTest (unittest.TestCase):
+
+ def test_1_basic(self):
+ print '--- running S3Connection tests ---'
+ c = S3Connection()
+ # create a new, empty bucket
+ bucket_name = 'test-%d' % int(time.time())
+ bucket = c.create_bucket(bucket_name)
+ # now try a get_bucket call and see if it's really there
+ bucket = c.get_bucket(bucket_name)
+ # test logging
+ logging_bucket = c.create_bucket(bucket_name + '-log')
+ logging_bucket.set_as_logging_target()
+ bucket.enable_logging(target_bucket=logging_bucket, target_prefix=bucket.name)
+ bucket.disable_logging()
+ c.delete_bucket(logging_bucket)
+ k = bucket.new_key()
+ k.name = 'foobar'
+ s1 = 'This is a test of file upload and download'
+ s2 = 'This is a second string to test file upload and download'
+ k.set_contents_from_string(s1)
+ fp = open('foobar', 'wb')
+ # now get the contents from s3 to a local file
+ k.get_contents_to_file(fp)
+ fp.close()
+ fp = open('foobar')
+ # check to make sure content read from s3 is identical to original
+ assert s1 == fp.read(), 'corrupted file'
+ fp.close()
+ # test generated URLs
+ url = k.generate_url(3600)
+ file = urllib.urlopen(url)
+ assert s1 == file.read(), 'invalid URL %s' % url
+ url = k.generate_url(3600, force_http=True)
+ file = urllib.urlopen(url)
+ assert s1 == file.read(), 'invalid URL %s' % url
+ bucket.delete_key(k)
+ # test a few variations on get_all_keys - first load some data
+ # for the first one, let's override the content type
+ phony_mimetype = 'application/x-boto-test'
+ headers = {'Content-Type': phony_mimetype}
+ k.name = 'foo/bar'
+ k.set_contents_from_string(s1, headers)
+ k.name = 'foo/bas'
+ k.set_contents_from_filename('foobar')
+ k.name = 'foo/bat'
+ k.set_contents_from_string(s1)
+ k.name = 'fie/bar'
+ k.set_contents_from_string(s1)
+ k.name = 'fie/bas'
+ k.set_contents_from_string(s1)
+ k.name = 'fie/bat'
+ k.set_contents_from_string(s1)
+ # try resetting the contents to another value
+ md5 = k.md5
+ k.set_contents_from_string(s2)
+ assert k.md5 != md5
+ os.unlink('foobar')
+ all = bucket.get_all_keys()
+ assert len(all) == 6
+ rs = bucket.get_all_keys(prefix='foo')
+ assert len(rs) == 3
+ rs = bucket.get_all_keys(prefix='', delimiter='/')
+ assert len(rs) == 2
+ rs = bucket.get_all_keys(maxkeys=5)
+ assert len(rs) == 5
+ # test the lookup method
+ k = bucket.lookup('foo/bar')
+ assert isinstance(k, bucket.key_class)
+ assert k.content_type == phony_mimetype
+ k = bucket.lookup('notthere')
+ assert k == None
+ # try some metadata stuff
+ k = bucket.new_key()
+ k.name = 'has_metadata'
+ mdkey1 = 'meta1'
+ mdval1 = 'This is the first metadata value'
+ k.set_metadata(mdkey1, mdval1)
+ mdkey2 = 'meta2'
+ mdval2 = 'This is the second metadata value'
+ k.set_metadata(mdkey2, mdval2)
+ # try a unicode metadata value
+ mdval3 = u'föö'
+ mdkey3 = 'meta3'
+ k.set_metadata(mdkey3, mdval3)
+ k.set_contents_from_string(s1)
+ k = bucket.lookup('has_metadata')
+ assert k.get_metadata(mdkey1) == mdval1
+ assert k.get_metadata(mdkey2) == mdval2
+ assert k.get_metadata(mdkey3) == mdval3
+ k = bucket.new_key()
+ k.name = 'has_metadata'
+ k.get_contents_as_string()
+ assert k.get_metadata(mdkey1) == mdval1
+ assert k.get_metadata(mdkey2) == mdval2
+ assert k.get_metadata(mdkey3) == mdval3
+ bucket.delete_key(k)
+ # test list and iterator
+ rs1 = bucket.list()
+ num_iter = 0
+ for r in rs1:
+ num_iter = num_iter + 1
+ rs = bucket.get_all_keys()
+ num_keys = len(rs)
+ assert num_iter == num_keys
+ # try a key with a funny character
+ k = bucket.new_key()
+ k.name = 'testnewline\n'
+ k.set_contents_from_string('This is a test')
+ rs = bucket.get_all_keys()
+ assert len(rs) == num_keys + 1
+ bucket.delete_key(k)
+ rs = bucket.get_all_keys()
+ assert len(rs) == num_keys
+ # try some acl stuff
+ bucket.set_acl('public-read')
+ policy = bucket.get_acl()
+ assert len(policy.acl.grants) == 2
+ bucket.set_acl('private')
+ policy = bucket.get_acl()
+ assert len(policy.acl.grants) == 1
+ k = bucket.lookup('foo/bar')
+ k.set_acl('public-read')
+ policy = k.get_acl()
+ assert len(policy.acl.grants) == 2
+ k.set_acl('private')
+ policy = k.get_acl()
+ assert len(policy.acl.grants) == 1
+ # try the convenience methods for grants
+ bucket.add_user_grant('FULL_CONTROL',
+ 'c1e724fbfa0979a4448393c59a8c055011f739b6d102fb37a65f26414653cd67')
+ try:
+ bucket.add_email_grant('foobar', 'foo@bar.com')
+ except S3PermissionsError:
+ pass
+ # now try to create an RRS key
+ k = bucket.new_key('reduced_redundancy')
+ k.set_contents_from_string('This key has reduced redundancy',
+ reduced_redundancy=True)
+
+ # now try to inject a response header
+ data = k.get_contents_as_string(response_headers={'response-content-type' : 'foo/bar'})
+ assert k.content_type == 'foo/bar'
+
+ # now delete all keys in bucket
+ for k in bucket:
+ if k.name == 'reduced_redundancy':
+ assert k.storage_class == 'REDUCED_REDUNDANCY'
+ bucket.delete_key(k)
+ # now delete bucket
+ time.sleep(5)
+ c.delete_bucket(bucket)
+ print '--- tests completed ---'
diff --git a/boto/tests/test_s3versioning.py b/boto/tests/test_s3versioning.py
new file mode 100644
index 0000000..b778db0
--- /dev/null
+++ b/boto/tests/test_s3versioning.py
@@ -0,0 +1,147 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Some unit tests for the S3 Versioning and MfaDelete
+"""
+
+import unittest
+import time
+from boto.s3.connection import S3Connection
+from boto.exception import S3ResponseError
+from boto.s3.deletemarker import DeleteMarker
+
+class S3VersionTest (unittest.TestCase):
+
+ def test_1_versions(self):
+ print '--- running S3Version tests ---'
+ c = S3Connection()
+ # create a new, empty bucket
+ bucket_name = 'version-%d' % int(time.time())
+ bucket = c.create_bucket(bucket_name)
+
+ # now try a get_bucket call and see if it's really there
+ bucket = c.get_bucket(bucket_name)
+
+ # enable versions
+ d = bucket.get_versioning_status()
+ assert not d.has_key('Versioning')
+ bucket.configure_versioning(versioning=True)
+ time.sleep(5)
+ d = bucket.get_versioning_status()
+ assert d['Versioning'] == 'Enabled'
+
+ # create a new key in the versioned bucket
+ k = bucket.new_key()
+ k.name = 'foobar'
+ s1 = 'This is a test of s3 versioning'
+ s2 = 'This is the second test of s3 versioning'
+ k.set_contents_from_string(s1)
+ time.sleep(5)
+
+ # remember the version id of this object
+ v1 = k.version_id
+
+ # now get the contents from s3
+ o1 = k.get_contents_as_string()
+
+ # check to make sure content read from s3 is identical to original
+ assert o1 == s1
+
+ # now overwrite that same key with new data
+ k.set_contents_from_string(s2)
+ v2 = k.version_id
+ time.sleep(5)
+
+ # now retrieve the contents as a string and compare
+ s3 = k.get_contents_as_string(version_id=v2)
+ assert s3 == s2
+
+ # Now list all versions and compare to what we have
+ rs = bucket.get_all_versions()
+ assert rs[0].version_id == v2
+ assert rs[1].version_id == v1
+
+ # Now do a regular list command and make sure only the new key shows up
+ rs = bucket.get_all_keys()
+ assert len(rs) == 1
+
+ # Now do regular delete
+ bucket.delete_key('foobar')
+ time.sleep(5)
+
+ # Now list versions and make sure old versions are there
+ # plus the DeleteMarker
+ rs = bucket.get_all_versions()
+ assert len(rs) == 3
+ assert isinstance(rs[0], DeleteMarker)
+
+ # Now delete v1 of the key
+ bucket.delete_key('foobar', version_id=v1)
+ time.sleep(5)
+
+ # Now list versions again and make sure v1 is not there
+ rs = bucket.get_all_versions()
+ versions = [k.version_id for k in rs]
+ assert v1 not in versions
+ assert v2 in versions
+
+ # Now try to enable MfaDelete
+ mfa_sn = raw_input('MFA S/N: ')
+ mfa_code = raw_input('MFA Code: ')
+ bucket.configure_versioning(True, mfa_delete=True, mfa_token=(mfa_sn, mfa_code))
+ i = 0
+ for i in range(1,8):
+ time.sleep(2**i)
+ d = bucket.get_versioning_status()
+ if d['Versioning'] == 'Enabled' and d['MfaDelete'] == 'Enabled':
+ break
+ assert d['Versioning'] == 'Enabled'
+ assert d['MfaDelete'] == 'Enabled'
+
+ # Now try to delete v2 without the MFA token
+ try:
+ bucket.delete_key('foobar', version_id=v2)
+ except S3ResponseError:
+ pass
+
+ # Now try to delete v2 with the MFA token
+ mfa_code = raw_input('MFA Code: ')
+ bucket.delete_key('foobar', version_id=v2, mfa_token=(mfa_sn, mfa_code))
+
+ # Now disable MfaDelete on the bucket
+ mfa_code = raw_input('MFA Code: ')
+ bucket.configure_versioning(True, mfa_delete=False, mfa_token=(mfa_sn, mfa_code))
+
+ # Now suspend Versioning on the bucket
+ bucket.configure_versioning(False)
+
+ # now delete all keys and deletemarkers in bucket
+ for k in bucket.list_versions():
+ bucket.delete_key(k.name, version_id=k.version_id)
+
+ # now delete bucket
+ c.delete_bucket(bucket)
+ print '--- tests completed ---'
diff --git a/boto/tests/test_sdbconnection.py b/boto/tests/test_sdbconnection.py
new file mode 100644
index 0000000..eac57f7
--- /dev/null
+++ b/boto/tests/test_sdbconnection.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Some unit tests for the SDBConnection
+"""
+
+import unittest
+import time
+from boto.sdb.connection import SDBConnection
+from boto.exception import SDBResponseError
+
+class SDBConnectionTest (unittest.TestCase):
+
+ def test_1_basic(self):
+ print '--- running SDBConnection tests ---'
+ c = SDBConnection()
+ rs = c.get_all_domains()
+ num_domains = len(rs)
+
+ # try illegal name
+ try:
+ domain = c.create_domain('bad:domain:name')
+ except SDBResponseError:
+ pass
+
+ # now create one that should work and should be unique (i.e. a new one)
+ domain_name = 'test%d' % int(time.time())
+ domain = c.create_domain(domain_name)
+ rs = c.get_all_domains()
+ assert len(rs) == num_domains + 1
+
+ # now let's a couple of items and attributes
+ item_1 = 'item1'
+ same_value = 'same_value'
+ attrs_1 = {'name1' : same_value, 'name2' : 'diff_value_1'}
+ domain.put_attributes(item_1, attrs_1)
+ item_2 = 'item2'
+ attrs_2 = {'name1' : same_value, 'name2' : 'diff_value_2'}
+ domain.put_attributes(item_2, attrs_2)
+
+ # try to get the attributes and see if they match
+ item = domain.get_attributes(item_1, consistent_read=True)
+ assert len(item.keys()) == len(attrs_1.keys())
+ assert item['name1'] == attrs_1['name1']
+ assert item['name2'] == attrs_1['name2']
+
+ # try a search or two
+ query = 'select * from %s where name1="%s"' % (domain_name, same_value)
+ rs = domain.select(query, consistent_read=True)
+ n = 0
+ for item in rs:
+ n += 1
+ assert n == 2
+ query = 'select * from %s where name2="diff_value_2"' % domain_name
+ rs = domain.select(query, consistent_read=True)
+ n = 0
+ for item in rs:
+ n += 1
+ assert n == 1
+
+ # delete all attributes associated with item_1
+ stat = domain.delete_attributes(item_1)
+ assert stat
+
+ # now try a batch put operation on the domain
+ item3 = {'name3_1' : 'value3_1',
+ 'name3_2' : 'value3_2',
+ 'name3_3' : ['value3_3_1', 'value3_3_2']}
+
+ item4 = {'name4_1' : 'value4_1',
+ 'name4_2' : ['value4_2_1', 'value4_2_2'],
+ 'name4_3' : 'value4_3'}
+ items = {'item3' : item3, 'item4' : item4}
+ domain.batch_put_attributes(items)
+
+ item = domain.get_attributes('item3', consistent_read=True)
+ assert item['name3_2'] == 'value3_2'
+
+ # now try a batch delete operation (variation #1)
+ items = {'item3' : item3}
+ stat = domain.batch_delete_attributes(items)
+
+ item = domain.get_attributes('item3', consistent_read=True)
+ assert not item
+
+ # now try a batch delete operation (variation #2)
+ stat = domain.batch_delete_attributes({'item4' : None})
+
+ item = domain.get_attributes('item4', consistent_read=True)
+ assert not item
+
+ # now delete the domain
+ stat = c.delete_domain(domain)
+ assert stat
+
+ print '--- tests completed ---'
+
diff --git a/boto/tests/test_sqsconnection.py b/boto/tests/test_sqsconnection.py
new file mode 100644
index 0000000..dd0cfcc
--- /dev/null
+++ b/boto/tests/test_sqsconnection.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Some unit tests for the SQSConnection
+"""
+
+import unittest
+import time
+from boto.sqs.connection import SQSConnection
+from boto.sqs.message import MHMessage
+from boto.exception import SQSError
+
+class SQSConnectionTest (unittest.TestCase):
+
+ def test_1_basic(self):
+ print '--- running SQSConnection tests ---'
+ c = SQSConnection()
+ rs = c.get_all_queues()
+ num_queues = 0
+ for q in rs:
+ num_queues += 1
+
+ # try illegal name
+ try:
+ queue = c.create_queue('bad_queue_name')
+ except SQSError:
+ pass
+
+ # now create one that should work and should be unique (i.e. a new one)
+ queue_name = 'test%d' % int(time.time())
+ timeout = 60
+ queue = c.create_queue(queue_name, timeout)
+ time.sleep(60)
+ rs = c.get_all_queues()
+ i = 0
+ for q in rs:
+ i += 1
+ assert i == num_queues+1
+ assert queue.count_slow() == 0
+
+ # check the visibility timeout
+ t = queue.get_timeout()
+ assert t == timeout, '%d != %d' % (t, timeout)
+
+ # now try to get queue attributes
+ a = q.get_attributes()
+ assert a.has_key('ApproximateNumberOfMessages')
+ assert a.has_key('VisibilityTimeout')
+ a = q.get_attributes('ApproximateNumberOfMessages')
+ assert a.has_key('ApproximateNumberOfMessages')
+ assert not a.has_key('VisibilityTimeout')
+ a = q.get_attributes('VisibilityTimeout')
+ assert not a.has_key('ApproximateNumberOfMessages')
+ assert a.has_key('VisibilityTimeout')
+
+ # now change the visibility timeout
+ timeout = 45
+ queue.set_timeout(timeout)
+ time.sleep(60)
+ t = queue.get_timeout()
+ assert t == timeout, '%d != %d' % (t, timeout)
+
+ # now add a message
+ message_body = 'This is a test\n'
+ message = queue.new_message(message_body)
+ queue.write(message)
+ time.sleep(60)
+ assert queue.count_slow() == 1
+ time.sleep(90)
+
+ # now read the message from the queue with a 10 second timeout
+ message = queue.read(visibility_timeout=10)
+ assert message
+ assert message.get_body() == message_body
+
+ # now immediately try another read, shouldn't find anything
+ message = queue.read()
+ assert message == None
+
+ # now wait 30 seconds and try again
+ time.sleep(30)
+ message = queue.read()
+ assert message
+
+ # now delete the message
+ queue.delete_message(message)
+ time.sleep(30)
+ assert queue.count_slow() == 0
+
+ # create another queue so we can test force deletion
+ # we will also test MHMessage with this queue
+ queue_name = 'test%d' % int(time.time())
+ timeout = 60
+ queue = c.create_queue(queue_name, timeout)
+ queue.set_message_class(MHMessage)
+ time.sleep(30)
+
+ # now add a couple of messages
+ message = queue.new_message()
+ message['foo'] = 'bar'
+ queue.write(message)
+ message_body = {'fie' : 'baz', 'foo' : 'bar'}
+ message = queue.new_message(body=message_body)
+ queue.write(message)
+ time.sleep(30)
+
+ m = queue.read()
+ assert m['foo'] == 'bar'
+
+ # now delete that queue and messages
+ c.delete_queue(queue, True)
+
+ print '--- tests completed ---'
+
diff --git a/boto/utils.py b/boto/utils.py
new file mode 100644
index 0000000..6bad25d
--- /dev/null
+++ b/boto/utils.py
@@ -0,0 +1,607 @@
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+#
+# Parts of this code were copied or derived from sample code supplied by AWS.
+# The following notice applies to that code.
+#
+# This software code is made available "AS IS" without warranties of any
+# kind. You may copy, display, modify and redistribute the software
+# code either by itself or as incorporated into your code; provided that
+# you do not remove any proprietary notices. Your use of this software
+# code is at your own risk and you waive any claim against Amazon
+# Digital Services, Inc. or its affiliates with respect to your use of
+# this software code. (c) 2006 Amazon Digital Services, Inc. or its
+# affiliates.
+
+"""
+Some handy utility functions used by several classes.
+"""
+
+import urllib
+import urllib2
+import imp
+import subprocess
+import StringIO
+import time
+import logging.handlers
+import boto
+import tempfile
+import smtplib
+import datetime
+from email.MIMEMultipart import MIMEMultipart
+from email.MIMEBase import MIMEBase
+from email.MIMEText import MIMEText
+from email.Utils import formatdate
+from email import Encoders
+
+try:
+ import hashlib
+ _hashfn = hashlib.sha512
+except ImportError:
+ import md5
+ _hashfn = md5.md5
+
+# List of Query String Arguments of Interest
+qsa_of_interest = ['acl', 'location', 'logging', 'partNumber', 'policy',
+ 'requestPayment', 'torrent', 'versioning', 'versionId',
+ 'versions', 'website', 'uploads', 'uploadId',
+ 'response-content-type', 'response-content-language',
+ 'response-expires', 'reponse-cache-control',
+ 'response-content-disposition',
+ 'response-content-encoding']
+
+# generates the aws canonical string for the given parameters
+def canonical_string(method, path, headers, expires=None,
+ provider=None):
+ if not provider:
+ provider = boto.provider.get_default()
+ interesting_headers = {}
+ for key in headers:
+ lk = key.lower()
+ if headers[key] != None and (lk in ['content-md5', 'content-type', 'date'] or
+ lk.startswith(provider.header_prefix)):
+ interesting_headers[lk] = headers[key].strip()
+
+ # these keys get empty strings if they don't exist
+ if not interesting_headers.has_key('content-type'):
+ interesting_headers['content-type'] = ''
+ if not interesting_headers.has_key('content-md5'):
+ interesting_headers['content-md5'] = ''
+
+ # just in case someone used this. it's not necessary in this lib.
+ if interesting_headers.has_key(provider.date_header):
+ interesting_headers['date'] = ''
+
+ # if you're using expires for query string auth, then it trumps date
+ # (and provider.date_header)
+ if expires:
+ interesting_headers['date'] = str(expires)
+
+ sorted_header_keys = interesting_headers.keys()
+ sorted_header_keys.sort()
+
+ buf = "%s\n" % method
+ for key in sorted_header_keys:
+ val = interesting_headers[key]
+ if key.startswith(provider.header_prefix):
+ buf += "%s:%s\n" % (key, val)
+ else:
+ buf += "%s\n" % val
+
+ # don't include anything after the first ? in the resource...
+ # unless it is one of the QSA of interest, defined above
+ t = path.split('?')
+ buf += t[0]
+
+ if len(t) > 1:
+ qsa = t[1].split('&')
+ qsa = [ a.split('=') for a in qsa]
+ qsa = [ a for a in qsa if a[0] in qsa_of_interest ]
+ if len(qsa) > 0:
+ qsa.sort(cmp=lambda x,y:cmp(x[0], y[0]))
+ qsa = [ '='.join(a) for a in qsa ]
+ buf += '?'
+ buf += '&'.join(qsa)
+
+ return buf
+
+def merge_meta(headers, metadata, provider=None):
+ if not provider:
+ provider = boto.provider.get_default()
+ metadata_prefix = provider.metadata_prefix
+ final_headers = headers.copy()
+ for k in metadata.keys():
+ if k.lower() in ['cache-control', 'content-md5', 'content-type',
+ 'content-encoding', 'content-disposition',
+ 'date', 'expires']:
+ final_headers[k] = metadata[k]
+ else:
+ final_headers[metadata_prefix + k] = metadata[k]
+
+ return final_headers
+
+def get_aws_metadata(headers, provider=None):
+ if not provider:
+ provider = boto.provider.get_default()
+ metadata_prefix = provider.metadata_prefix
+ metadata = {}
+ for hkey in headers.keys():
+ if hkey.lower().startswith(metadata_prefix):
+ val = urllib.unquote_plus(headers[hkey])
+ metadata[hkey[len(metadata_prefix):]] = unicode(val, 'utf-8')
+ del headers[hkey]
+ return metadata
+
+def retry_url(url, retry_on_404=True):
+ for i in range(0, 10):
+ try:
+ req = urllib2.Request(url)
+ resp = urllib2.urlopen(req)
+ return resp.read()
+ except urllib2.HTTPError, e:
+ # in 2.6 you use getcode(), in 2.5 and earlier you use code
+ if hasattr(e, 'getcode'):
+ code = e.getcode()
+ else:
+ code = e.code
+ if code == 404 and not retry_on_404:
+ return ''
+ except:
+ pass
+ boto.log.exception('Caught exception reading instance data')
+ time.sleep(2**i)
+ boto.log.error('Unable to read instance data, giving up')
+ return ''
+
+def _get_instance_metadata(url):
+ d = {}
+ data = retry_url(url)
+ if data:
+ fields = data.split('\n')
+ for field in fields:
+ if field.endswith('/'):
+ d[field[0:-1]] = _get_instance_metadata(url + field)
+ else:
+ p = field.find('=')
+ if p > 0:
+ key = field[p+1:]
+ resource = field[0:p] + '/openssh-key'
+ else:
+ key = resource = field
+ val = retry_url(url + resource)
+ p = val.find('\n')
+ if p > 0:
+ val = val.split('\n')
+ d[key] = val
+ return d
+
+def get_instance_metadata(version='latest'):
+ """
+ Returns the instance metadata as a nested Python dictionary.
+ Simple values (e.g. local_hostname, hostname, etc.) will be
+ stored as string values. Values such as ancestor-ami-ids will
+ be stored in the dict as a list of string values. More complex
+ fields such as public-keys and will be stored as nested dicts.
+ """
+ url = 'http://169.254.169.254/%s/meta-data/' % version
+ return _get_instance_metadata(url)
+
+def get_instance_userdata(version='latest', sep=None):
+ url = 'http://169.254.169.254/%s/user-data' % version
+ user_data = retry_url(url, retry_on_404=False)
+ if user_data:
+ if sep:
+ l = user_data.split(sep)
+ user_data = {}
+ for nvpair in l:
+ t = nvpair.split('=')
+ user_data[t[0].strip()] = t[1].strip()
+ return user_data
+
+ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
+
+def get_ts(ts=None):
+ if not ts:
+ ts = time.gmtime()
+ return time.strftime(ISO8601, ts)
+
+def parse_ts(ts):
+ return datetime.datetime.strptime(ts, ISO8601)
+
+def find_class(module_name, class_name=None):
+ if class_name:
+ module_name = "%s.%s" % (module_name, class_name)
+ modules = module_name.split('.')
+ c = None
+
+ try:
+ for m in modules[1:]:
+ if c:
+ c = getattr(c, m)
+ else:
+ c = getattr(__import__(".".join(modules[0:-1])), m)
+ return c
+ except:
+ return None
+
+def update_dme(username, password, dme_id, ip_address):
+ """
+ Update your Dynamic DNS record with DNSMadeEasy.com
+ """
+ dme_url = 'https://www.dnsmadeeasy.com/servlet/updateip'
+ dme_url += '?username=%s&password=%s&id=%s&ip=%s'
+ s = urllib2.urlopen(dme_url % (username, password, dme_id, ip_address))
+ return s.read()
+
+def fetch_file(uri, file=None, username=None, password=None):
+ """
+ Fetch a file based on the URI provided. If you do not pass in a file pointer
+ a tempfile.NamedTemporaryFile, or None if the file could not be
+ retrieved is returned.
+ The URI can be either an HTTP url, or "s3://bucket_name/key_name"
+ """
+ boto.log.info('Fetching %s' % uri)
+ if file == None:
+ file = tempfile.NamedTemporaryFile()
+ try:
+ if uri.startswith('s3://'):
+ bucket_name, key_name = uri[len('s3://'):].split('/', 1)
+ c = boto.connect_s3(aws_access_key_id=username, aws_secret_access_key=password)
+ bucket = c.get_bucket(bucket_name)
+ key = bucket.get_key(key_name)
+ key.get_contents_to_file(file)
+ else:
+ if username and password:
+ passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
+ passman.add_password(None, uri, username, password)
+ authhandler = urllib2.HTTPBasicAuthHandler(passman)
+ opener = urllib2.build_opener(authhandler)
+ urllib2.install_opener(opener)
+ s = urllib2.urlopen(uri)
+ file.write(s.read())
+ file.seek(0)
+ except:
+ raise
+ boto.log.exception('Problem Retrieving file: %s' % uri)
+ file = None
+ return file
+
+class ShellCommand(object):
+
+ def __init__(self, command, wait=True, fail_fast=False, cwd = None):
+ self.exit_code = 0
+ self.command = command
+ self.log_fp = StringIO.StringIO()
+ self.wait = wait
+ self.fail_fast = fail_fast
+ self.run(cwd = cwd)
+
+ def run(self, cwd=None):
+ boto.log.info('running:%s' % self.command)
+ self.process = subprocess.Popen(self.command, shell=True, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ cwd=cwd)
+ if(self.wait):
+ while self.process.poll() == None:
+ time.sleep(1)
+ t = self.process.communicate()
+ self.log_fp.write(t[0])
+ self.log_fp.write(t[1])
+ boto.log.info(self.log_fp.getvalue())
+ self.exit_code = self.process.returncode
+
+ if self.fail_fast and self.exit_code != 0:
+ raise Exception("Command " + self.command + " failed with status " + self.exit_code)
+
+ return self.exit_code
+
+ def setReadOnly(self, value):
+ raise AttributeError
+
+ def getStatus(self):
+ return self.exit_code
+
+ status = property(getStatus, setReadOnly, None, 'The exit code for the command')
+
+ def getOutput(self):
+ return self.log_fp.getvalue()
+
+ output = property(getOutput, setReadOnly, None, 'The STDIN and STDERR output of the command')
+
+class AuthSMTPHandler(logging.handlers.SMTPHandler):
+ """
+ This class extends the SMTPHandler in the standard Python logging module
+ to accept a username and password on the constructor and to then use those
+ credentials to authenticate with the SMTP server. To use this, you could
+ add something like this in your boto config file:
+
+ [handler_hand07]
+ class=boto.utils.AuthSMTPHandler
+ level=WARN
+ formatter=form07
+ args=('localhost', 'username', 'password', 'from@abc', ['user1@abc', 'user2@xyz'], 'Logger Subject')
+ """
+
+ def __init__(self, mailhost, username, password, fromaddr, toaddrs, subject):
+ """
+ Initialize the handler.
+
+ We have extended the constructor to accept a username/password
+ for SMTP authentication.
+ """
+ logging.handlers.SMTPHandler.__init__(self, mailhost, fromaddr, toaddrs, subject)
+ self.username = username
+ self.password = password
+
+ def emit(self, record):
+ """
+ Emit a record.
+
+ Format the record and send it to the specified addressees.
+ It would be really nice if I could add authorization to this class
+ without having to resort to cut and paste inheritance but, no.
+ """
+ try:
+ port = self.mailport
+ if not port:
+ port = smtplib.SMTP_PORT
+ smtp = smtplib.SMTP(self.mailhost, port)
+ smtp.login(self.username, self.password)
+ msg = self.format(record)
+ msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
+ self.fromaddr,
+ ','.join(self.toaddrs),
+ self.getSubject(record),
+ formatdate(), msg)
+ smtp.sendmail(self.fromaddr, self.toaddrs, msg)
+ smtp.quit()
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ self.handleError(record)
+
+class LRUCache(dict):
+ """A dictionary-like object that stores only a certain number of items, and
+ discards its least recently used item when full.
+
+ >>> cache = LRUCache(3)
+ >>> cache['A'] = 0
+ >>> cache['B'] = 1
+ >>> cache['C'] = 2
+ >>> len(cache)
+ 3
+
+ >>> cache['A']
+ 0
+
+ Adding new items to the cache does not increase its size. Instead, the least
+ recently used item is dropped:
+
+ >>> cache['D'] = 3
+ >>> len(cache)
+ 3
+ >>> 'B' in cache
+ False
+
+ Iterating over the cache returns the keys, starting with the most recently
+ used:
+
+ >>> for key in cache:
+ ... print key
+ D
+ A
+ C
+
+ This code is based on the LRUCache class from Genshi which is based on
+ Mighty's LRUCache from ``myghtyutils.util``, written
+ by Mike Bayer and released under the MIT license (Genshi uses the
+ BSD License). See:
+
+ http://svn.myghty.org/myghtyutils/trunk/lib/myghtyutils/util.py
+ """
+
+ class _Item(object):
+ def __init__(self, key, value):
+ self.previous = self.next = None
+ self.key = key
+ self.value = value
+ def __repr__(self):
+ return repr(self.value)
+
+ def __init__(self, capacity):
+ self._dict = dict()
+ self.capacity = capacity
+ self.head = None
+ self.tail = None
+
+ def __contains__(self, key):
+ return key in self._dict
+
+ def __iter__(self):
+ cur = self.head
+ while cur:
+ yield cur.key
+ cur = cur.next
+
+ def __len__(self):
+ return len(self._dict)
+
+ def __getitem__(self, key):
+ item = self._dict[key]
+ self._update_item(item)
+ return item.value
+
+ def __setitem__(self, key, value):
+ item = self._dict.get(key)
+ if item is None:
+ item = self._Item(key, value)
+ self._dict[key] = item
+ self._insert_item(item)
+ else:
+ item.value = value
+ self._update_item(item)
+ self._manage_size()
+
+ def __repr__(self):
+ return repr(self._dict)
+
+ def _insert_item(self, item):
+ item.previous = None
+ item.next = self.head
+ if self.head is not None:
+ self.head.previous = item
+ else:
+ self.tail = item
+ self.head = item
+ self._manage_size()
+
+ def _manage_size(self):
+ while len(self._dict) > self.capacity:
+ del self._dict[self.tail.key]
+ if self.tail != self.head:
+ self.tail = self.tail.previous
+ self.tail.next = None
+ else:
+ self.head = self.tail = None
+
+ def _update_item(self, item):
+ if self.head == item:
+ return
+
+ previous = item.previous
+ previous.next = item.next
+ if item.next is not None:
+ item.next.previous = previous
+ else:
+ self.tail = previous
+
+ item.previous = None
+ item.next = self.head
+ self.head.previous = self.head = item
+
+class Password(object):
+ """
+ Password object that stores itself as SHA512 hashed.
+ """
+ def __init__(self, str=None):
+ """
+ Load the string from an initial value, this should be the raw SHA512 hashed password
+ """
+ self.str = str
+
+ def set(self, value):
+ self.str = _hashfn(value).hexdigest()
+
+ def __str__(self):
+ return str(self.str)
+
+ def __eq__(self, other):
+ if other == None:
+ return False
+ return str(_hashfn(other).hexdigest()) == str(self.str)
+
+ def __len__(self):
+ if self.str:
+ return len(self.str)
+ else:
+ return 0
+
+def notify(subject, body=None, html_body=None, to_string=None, attachments=[], append_instance_id=True):
+ if append_instance_id:
+ subject = "[%s] %s" % (boto.config.get_value("Instance", "instance-id"), subject)
+ if not to_string:
+ to_string = boto.config.get_value('Notification', 'smtp_to', None)
+ if to_string:
+ try:
+ from_string = boto.config.get_value('Notification', 'smtp_from', 'boto')
+ msg = MIMEMultipart()
+ msg['From'] = from_string
+ msg['Reply-To'] = from_string
+ msg['To'] = to_string
+ msg['Date'] = formatdate(localtime=True)
+ msg['Subject'] = subject
+
+ if body:
+ msg.attach(MIMEText(body))
+
+ if html_body:
+ part = MIMEBase('text', 'html')
+ part.set_payload(html_body)
+ Encoders.encode_base64(part)
+ msg.attach(part)
+
+ for part in attachments:
+ msg.attach(part)
+
+ smtp_host = boto.config.get_value('Notification', 'smtp_host', 'localhost')
+
+ # Alternate port support
+ if boto.config.get_value("Notification", "smtp_port"):
+ server = smtplib.SMTP(smtp_host, int(boto.config.get_value("Notification", "smtp_port")))
+ else:
+ server = smtplib.SMTP(smtp_host)
+
+ # TLS support
+ if boto.config.getbool("Notification", "smtp_tls"):
+ server.ehlo()
+ server.starttls()
+ server.ehlo()
+ smtp_user = boto.config.get_value('Notification', 'smtp_user', '')
+ smtp_pass = boto.config.get_value('Notification', 'smtp_pass', '')
+ if smtp_user:
+ server.login(smtp_user, smtp_pass)
+ server.sendmail(from_string, to_string, msg.as_string())
+ server.quit()
+ except:
+ boto.log.exception('notify failed')
+
+def get_utf8_value(value):
+ if not isinstance(value, str) and not isinstance(value, unicode):
+ value = str(value)
+ if isinstance(value, unicode):
+ return value.encode('utf-8')
+ else:
+ return value
+
+def mklist(value):
+ if not isinstance(value, list):
+ if isinstance(value, tuple):
+ value = list(value)
+ else:
+ value = [value]
+ return value
+
+def pythonize_name(name, sep='_'):
+ s = ''
+ if name[0].isupper:
+ s = name[0].lower()
+ for c in name[1:]:
+ if c.isupper():
+ s += sep + c.lower()
+ else:
+ s += c
+ return s
+
+def awsify_name(name):
+ return name[0:1].upper()+name[1:]
diff --git a/boto/vpc/__init__.py b/boto/vpc/__init__.py
new file mode 100644
index 0000000..76eea82
--- /dev/null
+++ b/boto/vpc/__init__.py
@@ -0,0 +1,473 @@
+# Copyright (c) 2009 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents a connection to the EC2 service.
+"""
+
+from boto.ec2.connection import EC2Connection
+from boto.vpc.vpc import VPC
+from boto.vpc.customergateway import CustomerGateway
+from boto.vpc.vpngateway import VpnGateway, Attachment
+from boto.vpc.dhcpoptions import DhcpOptions
+from boto.vpc.subnet import Subnet
+from boto.vpc.vpnconnection import VpnConnection
+
+class VPCConnection(EC2Connection):
+
+ # VPC methods
+
+ def get_all_vpcs(self, vpc_ids=None, filters=None):
+ """
+ Retrieve information about your VPCs. You can filter results to
+ return information only about those VPCs that match your search
+ parameters. Otherwise, all VPCs associated with your account
+ are returned.
+
+ :type vpc_ids: list
+ :param vpc_ids: A list of strings with the desired VPC ID's
+
+ :type filters: list of tuples
+ :param filters: A list of tuples containing filters. Each tuple
+ consists of a filter key and a filter value.
+ Possible filter keys are:
+
+ - *state*, the state of the VPC (pending or available)
+ - *cidrBlock*, CIDR block of the VPC
+ - *dhcpOptionsId*, the ID of a set of DHCP options
+
+ :rtype: list
+ :return: A list of :class:`boto.vpc.vpc.VPC`
+ """
+ params = {}
+ if vpc_ids:
+ self.build_list_params(params, vpc_ids, 'VpcId')
+ if filters:
+ i = 1
+ for filter in filters:
+ params[('Filter.%d.Key' % i)] = filter[0]
+ params[('Filter.%d.Value.1')] = filter[1]
+ i += 1
+ return self.get_list('DescribeVpcs', params, [('item', VPC)])
+
+ def create_vpc(self, cidr_block):
+ """
+ Create a new Virtual Private Cloud.
+
+ :type cidr_block: str
+ :param cidr_block: A valid CIDR block
+
+ :rtype: The newly created VPC
+ :return: A :class:`boto.vpc.vpc.VPC` object
+ """
+ params = {'CidrBlock' : cidr_block}
+ return self.get_object('CreateVpc', params, VPC)
+
+ def delete_vpc(self, vpc_id):
+ """
+ Delete a Virtual Private Cloud.
+
+ :type vpc_id: str
+ :param vpc_id: The ID of the vpc to be deleted.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'VpcId': vpc_id}
+ return self.get_status('DeleteVpc', params)
+
+ # Customer Gateways
+
+ def get_all_customer_gateways(self, customer_gateway_ids=None, filters=None):
+ """
+ Retrieve information about your CustomerGateways. You can filter results to
+ return information only about those CustomerGateways that match your search
+ parameters. Otherwise, all CustomerGateways associated with your account
+ are returned.
+
+ :type customer_gateway_ids: list
+ :param customer_gateway_ids: A list of strings with the desired CustomerGateway ID's
+
+ :type filters: list of tuples
+ :param filters: A list of tuples containing filters. Each tuple
+ consists of a filter key and a filter value.
+ Possible filter keys are:
+
+ - *state*, the state of the CustomerGateway
+ (pending,available,deleting,deleted)
+ - *type*, the type of customer gateway (ipsec.1)
+ - *ipAddress* the IP address of customer gateway's
+ internet-routable external inteface
+
+ :rtype: list
+ :return: A list of :class:`boto.vpc.customergateway.CustomerGateway`
+ """
+ params = {}
+ if customer_gateway_ids:
+ self.build_list_params(params, customer_gateway_ids, 'CustomerGatewayId')
+ if filters:
+ i = 1
+ for filter in filters:
+ params[('Filter.%d.Key' % i)] = filter[0]
+ params[('Filter.%d.Value.1')] = filter[1]
+ i += 1
+ return self.get_list('DescribeCustomerGateways', params, [('item', CustomerGateway)])
+
+ def create_customer_gateway(self, type, ip_address, bgp_asn):
+ """
+ Create a new Customer Gateway
+
+ :type type: str
+ :param type: Type of VPN Connection. Only valid valid currently is 'ipsec.1'
+
+ :type ip_address: str
+ :param ip_address: Internet-routable IP address for customer's gateway.
+ Must be a static address.
+
+ :type bgp_asn: str
+ :param bgp_asn: Customer gateway's Border Gateway Protocol (BGP)
+ Autonomous System Number (ASN)
+
+ :rtype: The newly created CustomerGateway
+ :return: A :class:`boto.vpc.customergateway.CustomerGateway` object
+ """
+ params = {'Type' : type,
+ 'IpAddress' : ip_address,
+ 'BgpAsn' : bgp_asn}
+ return self.get_object('CreateCustomerGateway', params, CustomerGateway)
+
+ def delete_customer_gateway(self, customer_gateway_id):
+ """
+ Delete a Customer Gateway.
+
+ :type customer_gateway_id: str
+ :param customer_gateway_id: The ID of the customer_gateway to be deleted.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'CustomerGatewayId': customer_gateway_id}
+ return self.get_status('DeleteCustomerGateway', params)
+
+ # VPN Gateways
+
+ def get_all_vpn_gateways(self, vpn_gateway_ids=None, filters=None):
+ """
+ Retrieve information about your VpnGateways. You can filter results to
+ return information only about those VpnGateways that match your search
+ parameters. Otherwise, all VpnGateways associated with your account
+ are returned.
+
+ :type vpn_gateway_ids: list
+ :param vpn_gateway_ids: A list of strings with the desired VpnGateway ID's
+
+ :type filters: list of tuples
+ :param filters: A list of tuples containing filters. Each tuple
+ consists of a filter key and a filter value.
+ Possible filter keys are:
+
+ - *state*, the state of the VpnGateway
+ (pending,available,deleting,deleted)
+ - *type*, the type of customer gateway (ipsec.1)
+ - *availabilityZone*, the Availability zone the
+ VPN gateway is in.
+
+ :rtype: list
+ :return: A list of :class:`boto.vpc.customergateway.VpnGateway`
+ """
+ params = {}
+ if vpn_gateway_ids:
+ self.build_list_params(params, vpn_gateway_ids, 'VpnGatewayId')
+ if filters:
+ i = 1
+ for filter in filters:
+ params[('Filter.%d.Key' % i)] = filter[0]
+ params[('Filter.%d.Value.1')] = filter[1]
+ i += 1
+ return self.get_list('DescribeVpnGateways', params, [('item', VpnGateway)])
+
+ def create_vpn_gateway(self, type, availability_zone=None):
+ """
+ Create a new Vpn Gateway
+
+ :type type: str
+ :param type: Type of VPN Connection. Only valid valid currently is 'ipsec.1'
+
+ :type availability_zone: str
+ :param availability_zone: The Availability Zone where you want the VPN gateway.
+
+ :rtype: The newly created VpnGateway
+ :return: A :class:`boto.vpc.vpngateway.VpnGateway` object
+ """
+ params = {'Type' : type}
+ if availability_zone:
+ params['AvailabilityZone'] = availability_zone
+ return self.get_object('CreateVpnGateway', params, VpnGateway)
+
+ def delete_vpn_gateway(self, vpn_gateway_id):
+ """
+ Delete a Vpn Gateway.
+
+ :type vpn_gateway_id: str
+ :param vpn_gateway_id: The ID of the vpn_gateway to be deleted.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'VpnGatewayId': vpn_gateway_id}
+ return self.get_status('DeleteVpnGateway', params)
+
+ def attach_vpn_gateway(self, vpn_gateway_id, vpc_id):
+ """
+ Attaches a VPN gateway to a VPC.
+
+ :type vpn_gateway_id: str
+ :param vpn_gateway_id: The ID of the vpn_gateway to attach
+
+ :type vpc_id: str
+ :param vpc_id: The ID of the VPC you want to attach the gateway to.
+
+ :rtype: An attachment
+ :return: a :class:`boto.vpc.vpngateway.Attachment`
+ """
+ params = {'VpnGatewayId': vpn_gateway_id,
+ 'VpcId' : vpc_id}
+ return self.get_object('AttachVpnGateway', params, Attachment)
+
+ # Subnets
+
+ def get_all_subnets(self, subnet_ids=None, filters=None):
+ """
+ Retrieve information about your Subnets. You can filter results to
+ return information only about those Subnets that match your search
+ parameters. Otherwise, all Subnets associated with your account
+ are returned.
+
+ :type subnet_ids: list
+ :param subnet_ids: A list of strings with the desired Subnet ID's
+
+ :type filters: list of tuples
+ :param filters: A list of tuples containing filters. Each tuple
+ consists of a filter key and a filter value.
+ Possible filter keys are:
+
+ - *state*, the state of the Subnet
+ (pending,available)
+ - *vpdId*, the ID of teh VPC the subnet is in.
+ - *cidrBlock*, CIDR block of the subnet
+ - *availabilityZone*, the Availability Zone
+ the subnet is in.
+
+
+ :rtype: list
+ :return: A list of :class:`boto.vpc.subnet.Subnet`
+ """
+ params = {}
+ if subnet_ids:
+ self.build_list_params(params, subnet_ids, 'SubnetId')
+ if filters:
+ i = 1
+ for filter in filters:
+ params[('Filter.%d.Key' % i)] = filter[0]
+ params[('Filter.%d.Value.1' % i)] = filter[1]
+ i += 1
+ return self.get_list('DescribeSubnets', params, [('item', Subnet)])
+
+ def create_subnet(self, vpc_id, cidr_block, availability_zone=None):
+ """
+ Create a new Subnet
+
+ :type vpc_id: str
+ :param vpc_id: The ID of the VPC where you want to create the subnet.
+
+ :type cidr_block: str
+ :param cidr_block: The CIDR block you want the subnet to cover.
+
+ :type availability_zone: str
+ :param availability_zone: The AZ you want the subnet in
+
+ :rtype: The newly created Subnet
+ :return: A :class:`boto.vpc.customergateway.Subnet` object
+ """
+ params = {'VpcId' : vpc_id,
+ 'CidrBlock' : cidr_block}
+ if availability_zone:
+ params['AvailabilityZone'] = availability_zone
+ return self.get_object('CreateSubnet', params, Subnet)
+
+ def delete_subnet(self, subnet_id):
+ """
+ Delete a subnet.
+
+ :type subnet_id: str
+ :param subnet_id: The ID of the subnet to be deleted.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'SubnetId': subnet_id}
+ return self.get_status('DeleteSubnet', params)
+
+
+ # DHCP Options
+
+ def get_all_dhcp_options(self, dhcp_options_ids=None):
+ """
+ Retrieve information about your DhcpOptions.
+
+ :type dhcp_options_ids: list
+ :param dhcp_options_ids: A list of strings with the desired DhcpOption ID's
+
+ :rtype: list
+ :return: A list of :class:`boto.vpc.dhcpoptions.DhcpOptions`
+ """
+ params = {}
+ if dhcp_options_ids:
+ self.build_list_params(params, dhcp_options_ids, 'DhcpOptionsId')
+ return self.get_list('DescribeDhcpOptions', params, [('item', DhcpOptions)])
+
+ def create_dhcp_options(self, vpc_id, cidr_block, availability_zone=None):
+ """
+ Create a new DhcpOption
+
+ :type vpc_id: str
+ :param vpc_id: The ID of the VPC where you want to create the subnet.
+
+ :type cidr_block: str
+ :param cidr_block: The CIDR block you want the subnet to cover.
+
+ :type availability_zone: str
+ :param availability_zone: The AZ you want the subnet in
+
+ :rtype: The newly created DhcpOption
+ :return: A :class:`boto.vpc.customergateway.DhcpOption` object
+ """
+ params = {'VpcId' : vpc_id,
+ 'CidrBlock' : cidr_block}
+ if availability_zone:
+ params['AvailabilityZone'] = availability_zone
+ return self.get_object('CreateDhcpOption', params, DhcpOptions)
+
+ def delete_dhcp_options(self, dhcp_options_id):
+ """
+ Delete a DHCP Options
+
+ :type dhcp_options_id: str
+ :param dhcp_options_id: The ID of the DHCP Options to be deleted.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'DhcpOptionsId': dhcp_options_id}
+ return self.get_status('DeleteDhcpOptions', params)
+
+ def associate_dhcp_options(self, dhcp_options_id, vpc_id):
+ """
+ Associate a set of Dhcp Options with a VPC.
+
+ :type dhcp_options_id: str
+ :param dhcp_options_id: The ID of the Dhcp Options
+
+ :type vpc_id: str
+ :param vpc_id: The ID of the VPC.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'DhcpOptionsId': dhcp_options_id,
+ 'VpcId' : vpc_id}
+ return self.get_status('AssociateDhcpOptions', params)
+
+ # VPN Connection
+
+ def get_all_vpn_connections(self, vpn_connection_ids=None, filters=None):
+ """
+ Retrieve information about your VPN_CONNECTIONs. You can filter results to
+ return information only about those VPN_CONNECTIONs that match your search
+ parameters. Otherwise, all VPN_CONNECTIONs associated with your account
+ are returned.
+
+ :type vpn_connection_ids: list
+ :param vpn_connection_ids: A list of strings with the desired VPN_CONNECTION ID's
+
+ :type filters: list of tuples
+ :param filters: A list of tuples containing filters. Each tuple
+ consists of a filter key and a filter value.
+ Possible filter keys are:
+
+ - *state*, the state of the VPN_CONNECTION
+ pending,available,deleting,deleted
+ - *type*, the type of connection, currently 'ipsec.1'
+ - *customerGatewayId*, the ID of the customer gateway
+ associated with the VPN
+ - *vpnGatewayId*, the ID of the VPN gateway associated
+ with the VPN connection
+
+ :rtype: list
+ :return: A list of :class:`boto.vpn_connection.vpnconnection.VpnConnection`
+ """
+ params = {}
+ if vpn_connection_ids:
+ self.build_list_params(params, vpn_connection_ids, 'Vpn_ConnectionId')
+ if filters:
+ i = 1
+ for filter in filters:
+ params[('Filter.%d.Key' % i)] = filter[0]
+ params[('Filter.%d.Value.1')] = filter[1]
+ i += 1
+ return self.get_list('DescribeVpnConnections', params, [('item', VpnConnection)])
+
+ def create_vpn_connection(self, type, customer_gateway_id, vpn_gateway_id):
+ """
+ Create a new VPN Connection.
+
+ :type type: str
+ :param type: The type of VPN Connection. Currently only 'ipsec.1'
+ is supported
+
+ :type customer_gateway_id: str
+ :param customer_gateway_id: The ID of the customer gateway.
+
+ :type vpn_gateway_id: str
+ :param vpn_gateway_id: The ID of the VPN gateway.
+
+ :rtype: The newly created VpnConnection
+ :return: A :class:`boto.vpc.vpnconnection.VpnConnection` object
+ """
+ params = {'Type' : type,
+ 'CustomerGatewayId' : customer_gateway_id,
+ 'VpnGatewayId' : vpn_gateway_id}
+ return self.get_object('CreateVpnConnection', params, VpnConnection)
+
+ def delete_vpn_connection(self, vpn_connection_id):
+ """
+ Delete a VPN Connection.
+
+ :type vpn_connection_id: str
+ :param vpn_connection_id: The ID of the vpn_connection to be deleted.
+
+ :rtype: bool
+ :return: True if successful
+ """
+ params = {'VpnConnectionId': vpn_connection_id}
+ return self.get_status('DeleteVpnConnection', params)
+
+
diff --git a/boto/vpc/customergateway.py b/boto/vpc/customergateway.py
new file mode 100644
index 0000000..959d01f
--- /dev/null
+++ b/boto/vpc/customergateway.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents a Customer Gateway
+"""
+
+from boto.ec2.ec2object import TaggedEC2Object
+
+class CustomerGateway(TaggedEC2Object):
+
+ def __init__(self, connection=None):
+ TaggedEC2Object.__init__(self, connection)
+ self.id = None
+ self.type = None
+ self.state = None
+ self.ip_address = None
+ self.bgp_asn = None
+
+ def __repr__(self):
+ return 'CustomerGateway:%s' % self.id
+
+ def endElement(self, name, value, connection):
+ if name == 'customerGatewayId':
+ self.id = value
+ elif name == 'ipAddress':
+ self.ip_address = value
+ elif name == 'type':
+ self.type = value
+ elif name == 'state':
+ self.state = value
+ elif name == 'bgpAsn':
+ self.bgp_asn = value
+ else:
+ setattr(self, name, value)
+
diff --git a/boto/vpc/dhcpoptions.py b/boto/vpc/dhcpoptions.py
new file mode 100644
index 0000000..810d9cf
--- /dev/null
+++ b/boto/vpc/dhcpoptions.py
@@ -0,0 +1,72 @@
+# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents a DHCP Options set
+"""
+
+from boto.ec2.ec2object import TaggedEC2Object
+
+class DhcpValueSet(list):
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'value':
+ self.append(value)
+
+class DhcpConfigSet(dict):
+
+ def startElement(self, name, attrs, connection):
+ if name == 'valueSet':
+ if not self.has_key(self._name):
+ self[self._name] = DhcpValueSet()
+ return self[self._name]
+
+ def endElement(self, name, value, connection):
+ if name == 'key':
+ self._name = value
+
+class DhcpOptions(TaggedEC2Object):
+
+ def __init__(self, connection=None):
+ TaggedEC2Object.__init__(self, connection)
+ self.id = None
+ self.options = None
+
+ def __repr__(self):
+ return 'DhcpOptions:%s' % self.id
+
+ def startElement(self, name, attrs, connection):
+ retval = TaggedEC2Object.startElement(self, name, attrs, connection)
+ if retval is not None:
+ return retval
+ if name == 'dhcpConfigurationSet':
+ self.options = DhcpConfigSet()
+ return self.options
+
+ def endElement(self, name, value, connection):
+ if name == 'dhcpOptionsId':
+ self.id = value
+ else:
+ setattr(self, name, value)
+
diff --git a/boto/vpc/subnet.py b/boto/vpc/subnet.py
new file mode 100644
index 0000000..135e1a2
--- /dev/null
+++ b/boto/vpc/subnet.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents a Subnet
+"""
+
+from boto.ec2.ec2object import TaggedEC2Object
+
+class Subnet(TaggedEC2Object):
+
+ def __init__(self, connection=None):
+ TaggedEC2Object.__init__(self, connection)
+ self.id = None
+ self.state = None
+ self.cidr_block = None
+ self.available_ip_address_count = 0
+ self.availability_zone = None
+
+ def __repr__(self):
+ return 'Subnet:%s' % self.id
+
+ def endElement(self, name, value, connection):
+ if name == 'subnetId':
+ self.id = value
+ elif name == 'state':
+ self.state = value
+ elif name == 'cidrBlock':
+ self.cidr_block = value
+ elif name == 'availableIpAddressCount':
+ self.available_ip_address_count = int(value)
+ elif name == 'availabilityZone':
+ self.availability_zone = value
+ else:
+ setattr(self, name, value)
+
diff --git a/boto/vpc/vpc.py b/boto/vpc/vpc.py
new file mode 100644
index 0000000..0539acd
--- /dev/null
+++ b/boto/vpc/vpc.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents a Virtual Private Cloud.
+"""
+
+from boto.ec2.ec2object import TaggedEC2Object
+
+class VPC(TaggedEC2Object):
+
+ def __init__(self, connection=None):
+ TaggedEC2Object.__init__(self, connection)
+ self.id = None
+ self.dhcp_options_id = None
+ self.state = None
+ self.cidr_block = None
+
+ def __repr__(self):
+ return 'VPC:%s' % self.id
+
+ def endElement(self, name, value, connection):
+ if name == 'vpcId':
+ self.id = value
+ elif name == 'dhcpOptionsId':
+ self.dhcp_options_id = value
+ elif name == 'state':
+ self.state = value
+ elif name == 'cidrBlock':
+ self.cidr_block = value
+ else:
+ setattr(self, name, value)
+
+ def delete(self):
+ return self.connection.delete_vpc(self.id)
+
diff --git a/boto/vpc/vpnconnection.py b/boto/vpc/vpnconnection.py
new file mode 100644
index 0000000..2e089e7
--- /dev/null
+++ b/boto/vpc/vpnconnection.py
@@ -0,0 +1,60 @@
+# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents a VPN Connectionn
+"""
+
+from boto.ec2.ec2object import EC2Object
+
+class VpnConnection(EC2Object):
+
+ def __init__(self, connection=None):
+ EC2Object.__init__(self, connection)
+ self.id = None
+ self.state = None
+ self.customer_gateway_configuration = None
+ self.type = None
+ self.customer_gateway_id = None
+ self.vpn_gateway_id = None
+
+ def __repr__(self):
+ return 'VpnConnection:%s' % self.id
+
+ def endElement(self, name, value, connection):
+ if name == 'vpnConnectionId':
+ self.id = value
+ elif name == 'state':
+ self.state = value
+ elif name == 'CustomerGatewayConfiguration':
+ self.customer_gateway_configuration = value
+ elif name == 'type':
+ self.type = value
+ elif name == 'customerGatewayId':
+ self.customer_gateway_id = value
+ elif name == 'vpnGatewayId':
+ self.vpn_gateway_id = value
+ else:
+ setattr(self, name, value)
+
+ def delete(self):
+ return self.connection.delete_vpn_connection(self.id)
+
diff --git a/boto/vpc/vpngateway.py b/boto/vpc/vpngateway.py
new file mode 100644
index 0000000..83b912e
--- /dev/null
+++ b/boto/vpc/vpngateway.py
@@ -0,0 +1,83 @@
+# Copyright (c) 2009-2010 Mitch Garnaat http://garnaat.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+"""
+Represents a Vpn Gateway
+"""
+
+from boto.ec2.ec2object import TaggedEC2Object
+
+class Attachment(object):
+
+ def __init__(self, connection=None):
+ self.vpc_id = None
+ self.state = None
+
+ def startElement(self, name, attrs, connection):
+ pass
+
+ def endElement(self, name, value, connection):
+ if name == 'vpcId':
+ self.vpc_id = value
+ elif name == 'state':
+ self.state = value
+ else:
+ setattr(self, name, value)
+
+class VpnGateway(TaggedEC2Object):
+
+ def __init__(self, connection=None):
+ TaggedEC2Object.__init__(self, connection)
+ self.id = None
+ self.type = None
+ self.state = None
+ self.availability_zone = None
+ self.attachments = []
+
+ def __repr__(self):
+ return 'VpnGateway:%s' % self.id
+
+ def startElement(self, name, attrs, connection):
+ retval = TaggedEC2Object.startElement(self, name, attrs, connection)
+ if retval is not None:
+ return retval
+ if name == 'item':
+ att = Attachment()
+ self.attachments.append(att)
+ return att
+
+ def endElement(self, name, value, connection):
+ if name == 'vpnGatewayId':
+ self.id = value
+ elif name == 'type':
+ self.type = value
+ elif name == 'state':
+ self.state = value
+ elif name == 'availabilityZone':
+ self.availability_zone = value
+ elif name == 'attachments':
+ pass
+ else:
+ setattr(self, name, value)
+
+ def attach(self, vpc_id):
+ return self.connection.attach_vpn_gateway(self.id, vpc_id)
+
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000..5fd1f92
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,89 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = build
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+
+.PHONY: help clean html dirhtml pickle json htmlhelp qthelp latex changes linkcheck doctest
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+ -rm -rf $(BUILDDIR)/*
+
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/boto.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/boto.qhc"
+
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
+ "run these through (pdf)latex."
+
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
diff --git a/docs/make.bat b/docs/make.bat
new file mode 100644
index 0000000..d6b0b7b
--- /dev/null
+++ b/docs/make.bat
@@ -0,0 +1,113 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+set SPHINXBUILD=sphinx-build
+set BUILDDIR=build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source
+if NOT "%PAPER%" == "" (
+ set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+ :help
+ echo.Please use `make ^<target^>` where ^<target^> is one of
+ echo. html to make standalone HTML files
+ echo. dirhtml to make HTML files named index.html in directories
+ echo. pickle to make pickle files
+ echo. json to make JSON files
+ echo. htmlhelp to make HTML files and a HTML help project
+ echo. qthelp to make HTML files and a qthelp project
+ echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+ echo. changes to make an overview over all changed/added/deprecated items
+ echo. linkcheck to check all external links for integrity
+ echo. doctest to run all doctests embedded in the documentation if enabled
+ goto end
+)
+
+if "%1" == "clean" (
+ for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+ del /q /s %BUILDDIR%\*
+ goto end
+)
+
+if "%1" == "html" (
+ %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+ goto end
+)
+
+if "%1" == "dirhtml" (
+ %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+ goto end
+)
+
+if "%1" == "pickle" (
+ %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+ echo.
+ echo.Build finished; now you can process the pickle files.
+ goto end
+)
+
+if "%1" == "json" (
+ %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+ echo.
+ echo.Build finished; now you can process the JSON files.
+ goto end
+)
+
+if "%1" == "htmlhelp" (
+ %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+ echo.
+ echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+ goto end
+)
+
+if "%1" == "qthelp" (
+ %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+ echo.
+ echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+ echo.^> qcollectiongenerator %BUILDDIR%\qthelp\boto.qhcp
+ echo.To view the help file:
+ echo.^> assistant -collectionFile %BUILDDIR%\qthelp\boto.ghc
+ goto end
+)
+
+if "%1" == "latex" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ echo.
+ echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "changes" (
+ %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+ echo.
+ echo.The overview file is in %BUILDDIR%/changes.
+ goto end
+)
+
+if "%1" == "linkcheck" (
+ %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+ echo.
+ echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+ goto end
+)
+
+if "%1" == "doctest" (
+ %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+ echo.
+ echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+ goto end
+)
+
+:end
diff --git a/docs/source/_templates/layout.html b/docs/source/_templates/layout.html
new file mode 100644
index 0000000..cdf85bb
--- /dev/null
+++ b/docs/source/_templates/layout.html
@@ -0,0 +1,3 @@
+{% extends '!layout.html' %}
+
+{% block sidebarsearch %}{{ super() }}<div><a href="boto.pdf">PDF Version</a></div>{% endblock %}
diff --git a/docs/source/autoscale_tut.rst b/docs/source/autoscale_tut.rst
new file mode 100644
index 0000000..9f9d399
--- /dev/null
+++ b/docs/source/autoscale_tut.rst
@@ -0,0 +1,140 @@
+.. _autoscale_tut:
+
+=============================================
+An Introduction to boto's Autoscale interface
+=============================================
+
+This tutorial focuses on the boto interface to the Autoscale service. This
+assumes you are familiar with boto's EC2 interface and concepts.
+
+Autoscale Concepts
+------------------
+
+The AWS Autoscale service is comprised of three core concepts:
+
+ #. *Autoscale Group (AG):* An AG can be viewed as a collection of criteria for
+ maintaining or scaling a set of EC2 instances over one or more availability
+ zones. An AG is limited to a single region.
+ #. *Launch Configuration (LC):* An LC is the set of information needed by the
+ AG to launch new instances - this can encompass image ids, startup data,
+ security groups and keys. Only one LC is attached to an AG.
+ #. *Triggers*: A trigger is essentially a set of rules for determining when to
+ scale an AG up or down. These rules can encompass a set of metrics such as
+ average CPU usage across instances, or incoming requests, a threshold for
+ when an action will take place, as well as parameters to control how long
+ to wait after a threshold is crossed.
+
+Creating a Connection
+---------------------
+The first step in accessing autoscaling is to create a connection to the service.
+There are two ways to do this in boto. The first is:
+
+>>> from boto.ec2.autoscale import AutoScaleConnection
+>>> conn = AutoScaleConnection('<aws access key>', '<aws secret key>')
+
+Alternatively, you can use the shortcut:
+
+>>> conn = boto.connect_autoscale()
+
+A Note About Regions and Endpoints
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Like EC2 the Autoscale service has a different endpoint for each region. By
+default the US endpoint is used. To choose a specific region, instantiate the
+AutoScaleConnection object with that region's endpoint.
+
+>>> ec2 = boto.connect_autoscale(host='eu-west-1.autoscaling.amazonaws.com')
+
+Alternatively, edit your boto.cfg with the default Autoscale endpoint to use::
+
+ [Boto]
+ autoscale_endpoint = eu-west-1.autoscaling.amazonaws.com
+
+Getting Existing AutoScale Groups
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To retrieve existing autoscale groups:
+
+>>> conn.get_all_groups()
+
+You will get back a list of AutoScale group objects, one for each AG you have.
+
+Creating Autoscaling Groups
+---------------------------
+An Autoscaling group has a number of parameters associated with it.
+
+ #. *Name*: The name of the AG.
+ #. *Availability Zones*: The list of availability zones it is defined over.
+ #. *Minimum Size*: Minimum number of instances running at one time.
+ #. *Maximum Size*: Maximum number of instances running at one time.
+ #. *Launch Configuration (LC)*: A set of instructions on how to launch an instance.
+ #. *Load Balancer*: An optional ELB load balancer to use. See the ELB tutorial
+ for information on how to create a load balancer.
+
+For the purposes of this tutorial, let's assume we want to create one autoscale
+group over the us-east-1a and us-east-1b availability zones. We want to have
+two instances in each availability zone, thus a minimum size of 4. For now we
+won't worry about scaling up or down - we'll introduce that later when we talk
+about triggers. Thus we'll set a maximum size of 4 as well. We'll also associate
+the AG with a load balancer which we assume we've already created, called 'my_lb'.
+
+Our LC tells us how to start an instance. This will at least include the image
+id to use, security_group, and key information. We assume the image id, key
+name and security groups have already been defined elsewhere - see the EC2
+tutorial for information on how to create these.
+
+>>> from boto.ec2.autoscale import LaunchConfiguration
+>>> from boto.ec2.autoscale import AutoScalingGroup
+>>> lc = LaunchConfiguration(name='my-launch_config', image_id='my-ami',
+ key_name='my_key_name',
+ security_groups=['my_security_groups'])
+>>> conn.create_launch_configuration(lc)
+
+We now have created a launch configuration called 'my-launch-config'. We are now
+ready to associate it with our new autoscale group.
+
+>>> ag = AutoScalingGroup(group_name='my_group', load_balancers=['my-lb'],
+ availability_zones=['us-east-1a', 'us-east-1b'],
+ launch_config=lc, min_size=4, max_size=4)
+>>> conn.create_auto_scaling_group(ag)
+
+We now have a new autoscaling group defined! At this point instances should be
+starting to launch. To view activity on an autoscale group:
+
+>>> ag.get_activities()
+ [Activity:Launching a new EC2 instance status:Successful progress:100,
+ ...]
+
+or alternatively:
+
+>>> conn.get_all_activities(ag)
+
+This autoscale group is fairly useful in that it will maintain the minimum size without
+breaching the maximum size defined. That means if one instance crashes, the autoscale
+group will use the launch configuration to start a new one in an attempt to maintain
+its minimum defined size. It knows instance health using the health check defined on
+its associated load balancer.
+
+Scaling a Group Up or Down
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+It might be more useful to also define means to scale a group up or down
+depending on certain criteria. For example, if the average CPU utilization of
+all your instances goes above 60%, you may want to scale up a number of
+instances to deal with demand - likewise you might want to scale down if usage
+drops. These criteria are defined in *triggers*.
+
+For example, let's modify our above group to have a maxsize of 8 and define means
+of scaling up based on CPU utilization. We'll say we should scale up if the average
+CPU usage goes above 80% and scale down if it goes below 40%.
+
+>>> from boto.ec2.autoscale import Trigger
+>>> tr = Trigger(name='my_trigger', autoscale_group=ag,
+ measure_name='CPUUtilization', statistic='Average',
+ unit='Percent',
+ dimensions=[('AutoScalingGroupName', ag.name)],
+ period=60, lower_threshold=40,
+ lower_breach_scale_increment='-5',
+ upper_threshold=80,
+ upper_breach_scale_increment='10',
+ breach_duration=360)
+>> conn.create_trigger(tr)
+
diff --git a/docs/source/boto_theme/static/boto.css_t b/docs/source/boto_theme/static/boto.css_t
new file mode 100644
index 0000000..932e518
--- /dev/null
+++ b/docs/source/boto_theme/static/boto.css_t
@@ -0,0 +1,239 @@
+/**
+ * Sphinx stylesheet -- default theme
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+
+@import url("basic.css");
+
+/* -- page layout ----------------------------------------------------------- */
+
+body {
+ font-family: 'Lucida Grande', 'Lucida Sans Unicode', Geneva, Verdana, Arial, sans-serif;
+ font-size: 100%;
+ background-color: #111111;
+ color: #555555;
+ margin: 0;
+ padding: 0;
+}
+
+div.documentwrapper {
+ float: left;
+ width: 100%;
+}
+
+div.bodywrapper {
+ margin: 0 0 0 300px;
+}
+
+hr{
+ border: 1px solid #B1B4B6;
+}
+
+div.document {
+ background-color: #fafafa;
+}
+
+div.body {
+ background-color: #ffffff;
+ color: #3E4349;
+ padding: 1em 30px 30px 30px;
+ font-size: 0.9em;
+}
+
+div.footer {
+ color: #555;
+ width: 100%;
+ padding: 13px 0;
+ text-align: center;
+ font-size: 75%;
+}
+
+div.footer a {
+ color: #444444;
+}
+
+div.related {
+ background-color: #6F6555; /*#6BA81E;*/
+ line-height: 36px;
+ color: #CCCCCC;
+ text-shadow: 0px 1px 0 #444444;
+ font-size: 1.1em;
+}
+
+div.related a {
+ color: #D9C5A7;
+}
+
+div.related .right {
+ font-size: 0.9em;
+}
+
+div.sphinxsidebar {
+ font-size: 0.9em;
+ line-height: 1.5em;
+ width: 300px
+}
+
+div.sphinxsidebarwrapper{
+ padding: 20px 0;
+}
+
+div.sphinxsidebar h3,
+div.sphinxsidebar h4 {
+ font-family: 'Lucida Grande', 'Lucida Sans Unicode', Geneva, Verdana, Arial, sans-serif;
+ color: #222222;
+ font-size: 1.2em;
+ font-weight: bold;
+ margin: 0;
+ padding: 5px 10px;
+ text-shadow: 1px 1px 0 white
+}
+
+div.sphinxsidebar h3 a {
+ color: #444444;
+}
+
+div.sphinxsidebar p {
+ color: #888888;
+ padding: 5px 20px;
+ margin: 0.5em 0px;
+}
+
+div.sphinxsidebar p.topless {
+}
+
+div.sphinxsidebar ul {
+ margin: 10px 10px 10px 20px;
+ padding: 0;
+ color: #000000;
+}
+
+div.sphinxsidebar a {
+ color: #444444;
+}
+
+div.sphinxsidebar a:hover {
+ color: #E32E00;
+}
+
+div.sphinxsidebar input {
+ border: 1px solid #cccccc;
+ font-family: sans-serif;
+ font-size: 1.1em;
+ padding: 0.15em 0.3em;
+}
+
+div.sphinxsidebar input[type=text]{
+ margin-left: 20px;
+}
+
+/* -- body styles ----------------------------------------------------------- */
+
+a {
+ color: #005B81;
+ text-decoration: none;
+}
+
+a:hover {
+ color: #E32E00;
+}
+
+div.body h1,
+div.body h2,
+div.body h3,
+div.body h4,
+div.body h5,
+div.body h6 {
+ font-family: 'Lucida Grande', 'Lucida Sans Unicode', Geneva, Verdana, Arial, sans-serif;
+ font-weight: bold;
+ color: #069;
+ margin: 30px 0px 10px 0px;
+ padding: 5px 0 5px 0px;
+ text-shadow: 0px 1px 0 white;
+ border-bottom: 1px solid #C8D5E3;
+}
+
+div.body h1 { margin-top: 0; font-size: 165%; }
+div.body h2 { font-size: 135%; }
+div.body h3 { font-size: 120%; }
+div.body h4 { font-size: 110%; }
+div.body h5 { font-size: 100%; }
+div.body h6 { font-size: 100%; }
+
+a.headerlink {
+ color: #c60f0f;
+ font-size: 0.8em;
+ padding: 0 4px 0 4px;
+ text-decoration: none;
+}
+
+a.headerlink:hover {
+ background-color: #c60f0f;
+ color: white;
+}
+
+div.body p, div.body dd, div.body li {
+ line-height: 1.5em;
+}
+
+div.admonition p.admonition-title + p {
+ display: inline;
+}
+
+div.highlight{
+ background-color: white;
+}
+
+div.note {
+ background-color: #eeeeee;
+ border: 1px solid #cccccc;
+}
+
+div.seealso {
+ background-color: #ffffcc;
+ border: 1px solid #ffff66;
+}
+
+div.topic {
+ background-color: #fafafa;
+ border-width: 0;
+}
+
+div.warning {
+ background-color: #ffe4e4;
+ border: 1px solid #ff6666;
+}
+
+
+p.admonition-title {
+ display: inline;
+}
+
+p.admonition-title:after {
+ content: ":";
+}
+
+pre {
+ padding: 10px;
+ background-color: #fafafa;
+ color: #222222;
+ line-height: 1.5em;
+ font-size: 1.1em;
+ margin: 1.5em 0 1.5em 0;
+ -webkit-box-shadow: 0px 0px 4px #d8d8d8;
+ -moz-box-shadow: 0px 0px 4px #d8d8d8;
+ box-shadow: 0px 0px 4px #d8d8d8;
+}
+
+tt {
+ color: #222222;
+ padding: 1px 2px;
+ font-size: 1.2em;
+ font-family: monospace;
+}
+
+#table-of-contents ul {
+ padding-left: 2em;
+}
+
+div.sphinxsidebarwrapper div a {margin: 0.7em;}
\ No newline at end of file
diff --git a/docs/source/boto_theme/static/pygments.css b/docs/source/boto_theme/static/pygments.css
new file mode 100644
index 0000000..1f2d2b6
--- /dev/null
+++ b/docs/source/boto_theme/static/pygments.css
@@ -0,0 +1,61 @@
+.hll { background-color: #ffffcc }
+.c { color: #408090; font-style: italic } /* Comment */
+.err { border: 1px solid #FF0000 } /* Error */
+.k { color: #007020; font-weight: bold } /* Keyword */
+.o { color: #666666 } /* Operator */
+.cm { color: #408090; font-style: italic } /* Comment.Multiline */
+.cp { color: #007020 } /* Comment.Preproc */
+.c1 { color: #408090; font-style: italic } /* Comment.Single */
+.cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */
+.gd { color: #A00000 } /* Generic.Deleted */
+.ge { font-style: italic } /* Generic.Emph */
+.gr { color: #FF0000 } /* Generic.Error */
+.gh { color: #000080; font-weight: bold } /* Generic.Heading */
+.gi { color: #00A000 } /* Generic.Inserted */
+.go { color: #303030 } /* Generic.Output */
+.gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */
+.gs { font-weight: bold } /* Generic.Strong */
+.gu { color: #800080; font-weight: bold } /* Generic.Subheading */
+.gt { color: #0040D0 } /* Generic.Traceback */
+.kc { color: #007020; font-weight: bold } /* Keyword.Constant */
+.kd { color: #007020; font-weight: bold } /* Keyword.Declaration */
+.kn { color: #007020; font-weight: bold } /* Keyword.Namespace */
+.kp { color: #007020 } /* Keyword.Pseudo */
+.kr { color: #007020; font-weight: bold } /* Keyword.Reserved */
+.kt { color: #902000 } /* Keyword.Type */
+.m { color: #208050 } /* Literal.Number */
+.s { color: #4070a0 } /* Literal.String */
+.na { color: #4070a0 } /* Name.Attribute */
+.nb { color: #007020 } /* Name.Builtin */
+.nc { color: #0e84b5; font-weight: bold } /* Name.Class */
+.no { color: #60add5 } /* Name.Constant */
+.nd { color: #555555; font-weight: bold } /* Name.Decorator */
+.ni { color: #d55537; font-weight: bold } /* Name.Entity */
+.ne { color: #007020 } /* Name.Exception */
+.nf { color: #06287e } /* Name.Function */
+.nl { color: #002070; font-weight: bold } /* Name.Label */
+.nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */
+.nt { color: #062873; font-weight: bold } /* Name.Tag */
+.nv { color: #bb60d5 } /* Name.Variable */
+.ow { color: #007020; font-weight: bold } /* Operator.Word */
+.w { color: #bbbbbb } /* Text.Whitespace */
+.mf { color: #208050 } /* Literal.Number.Float */
+.mh { color: #208050 } /* Literal.Number.Hex */
+.mi { color: #208050 } /* Literal.Number.Integer */
+.mo { color: #208050 } /* Literal.Number.Oct */
+.sb { color: #4070a0 } /* Literal.String.Backtick */
+.sc { color: #4070a0 } /* Literal.String.Char */
+.sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */
+.s2 { color: #4070a0 } /* Literal.String.Double */
+.se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */
+.sh { color: #4070a0 } /* Literal.String.Heredoc */
+.si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */
+.sx { color: #c65d09 } /* Literal.String.Other */
+.sr { color: #235388 } /* Literal.String.Regex */
+.s1 { color: #4070a0 } /* Literal.String.Single */
+.ss { color: #517918 } /* Literal.String.Symbol */
+.bp { color: #007020 } /* Name.Builtin.Pseudo */
+.vc { color: #bb60d5 } /* Name.Variable.Class */
+.vg { color: #bb60d5 } /* Name.Variable.Global */
+.vi { color: #bb60d5 } /* Name.Variable.Instance */
+.il { color: #208050 } /* Literal.Number.Integer.Long */
\ No newline at end of file
diff --git a/docs/source/boto_theme/theme.conf b/docs/source/boto_theme/theme.conf
new file mode 100644
index 0000000..7d09085
--- /dev/null
+++ b/docs/source/boto_theme/theme.conf
@@ -0,0 +1,3 @@
+[theme]
+inherit = basic
+stylesheet = boto.css
\ No newline at end of file
diff --git a/docs/source/conf.py b/docs/source/conf.py
new file mode 100644
index 0000000..459c44f
--- /dev/null
+++ b/docs/source/conf.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+
+import sys, os
+
+extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo']
+autoclass_content="both"
+templates_path = ['_templates']
+source_suffix = '.rst'
+master_doc = 'index'
+project = u'boto'
+copyright = u'2009,2010, Mitch Garnaat'
+version = '2.0'
+exclude_trees = []
+pygments_style = 'sphinx'
+html_theme = 'boto_theme'
+html_theme_path = ["."]
+html_static_path = ['_static']
+htmlhelp_basename = 'botodoc'
+latex_documents = [
+ ('index', 'boto.tex', u'boto Documentation',
+ u'Mitch Garnaat', 'manual'),
+]
+intersphinx_mapping = {'http://docs.python.org/': None}
+
+try:
+ release = os.environ.get('SVN_REVISION', 'HEAD')
+ print release
+except Exception, e:
+ print e
+
+html_title = "boto v%s (r%s)" % (version, release)
diff --git a/docs/source/documentation.rst b/docs/source/documentation.rst
new file mode 100644
index 0000000..d4999d9
--- /dev/null
+++ b/docs/source/documentation.rst
@@ -0,0 +1,59 @@
+.. _documentation:
+
+=======================
+About the Documentation
+=======================
+
+boto's documentation uses the Sphinx__ documentation system, which in turn is
+based on docutils__. The basic idea is that lightly-formatted plain-text
+documentation is transformed into HTML, PDF, and any other output format.
+
+__ http://sphinx.pocoo.org/
+__ http://docutils.sf.net/
+
+To actually build the documentation locally, you'll currently need to install
+Sphinx -- ``easy_install Sphinx`` should do the trick.
+
+Then, building the html is easy; just ``make html`` from the ``docs`` directory.
+
+To get started contributing, you'll want to read the `ReStructuredText
+Primer`__. After that, you'll want to read about the `Sphinx-specific markup`__
+that's used to manage metadata, indexing, and cross-references.
+
+__ http://sphinx.pocoo.org/rest.html
+__ http://sphinx.pocoo.org/markup/
+
+The main thing to keep in mind as you write and edit docs is that the more
+semantic markup you can add the better. So::
+
+ Import ``boto`` to your script...
+
+Isn't nearly as helpful as::
+
+ Add :mod:`boto` to your script...
+
+This is because Sphinx will generate a proper link for the latter, which greatly
+helps readers. There's basically no limit to the amount of useful markup you can
+add.
+
+
+The fabfile
+-----------
+
+There is a Fabric__ file that can be used to build and deploy the documentation
+to a webserver that you ssh access to.
+
+__ http://fabfile.org
+
+To build and deploy::
+
+ cd docs/
+ fab deploy:remote_path='/var/www/folder/whatever' --hosts=user@host
+
+This will get the latest code from subversion, add the revision number to the
+docs conf.py file, call ``make html`` to build the documentation, then it will
+tarball it up and scp up to the host you specified and untarball it in the
+folder you specified creating a symbolic link from the untarballed versioned
+folder to ``{remote_path}/boto-docs``.
+
+
diff --git a/docs/source/ec2_tut.rst b/docs/source/ec2_tut.rst
new file mode 100644
index 0000000..6326243
--- /dev/null
+++ b/docs/source/ec2_tut.rst
@@ -0,0 +1,420 @@
+.. _ec2_tut:
+
+=======================================
+An Introduction to boto's EC2 interface
+=======================================
+
+This tutorial focuses on the boto interface to the Elastic Compute Cloud
+from Amazon Web Services. This tutorial assumes that you have already
+downloaded and installed boto.
+
+Creating a Connection
+---------------------
+The first step in accessing EC2 is to create a connection to the service.
+There are two ways to do this in boto. The first is:
+
+>>> from boto.ec2.connection import EC2Connection
+>>> conn = EC2Connection('<aws access key>', '<aws secret key>')
+
+At this point the variable conn will point to an EC2Connection object. In
+this example, the AWS access key and AWS secret key are passed in to the
+method explicitely. Alternatively, you can set the environment variables:
+
+AWS_ACCESS_KEY_ID - Your AWS Access Key ID
+AWS_SECRET_ACCESS_KEY - Your AWS Secret Access Key
+
+and then call the constructor without any arguments, like this:
+
+>>> conn = EC2Connection()
+
+There is also a shortcut function in the boto package, called connect_ec2
+that may provide a slightly easier means of creating a connection:
+
+>>> import boto
+>>> conn = boto.connect_ec2()
+
+In either case, conn will point to an EC2Connection object which we will
+use throughout the remainder of this tutorial.
+
+A Note About Regions
+--------------------
+The 2008-12-01 version of the EC2 API introduced the idea of Regions.
+A Region is geographically distinct and is completely isolated from
+other EC2 Regions. At the time of the launch of the 2008-12-01 API
+there were two available regions, us-east-1 and eu-west-1. Each
+Region has it's own service endpoint and therefore would require
+it's own EC2Connection object in boto.
+
+The default behavior in boto, as shown above, is to connect you with
+the us-east-1 region which is exactly the same as the behavior prior
+to the introduction of Regions.
+
+However, if you would like to connect to a region other than us-east-1,
+there are a couple of ways to accomplish that. The first way, is to
+as EC2 to provide a list of currently supported regions. You can do
+that using the regions function in the boto.ec2 module:
+
+>>> import boto.ec2
+>>> regions = boto.ec2.regions()
+>>> regions
+[RegionInfo:eu-west-1, RegionInfo:us-east-1]
+>>>
+
+As you can see, a list of available regions is returned. Each region
+is represented by a RegionInfo object. A RegionInfo object has two
+attributes; a name and an endpoint.
+
+>>> eu = regions[0]
+>>> eu.name
+u'eu-west-1'
+>>> eu.endpoint
+u'eu-west-1.ec2.amazonaws.com'
+>>>
+
+You can easily create a connection to a region by using the connect
+method of the RegionInfo object:
+
+>>> conn_eu = eu.connect()
+>>> conn_eu
+<boto.ec2.connection.EC2Connection instance at 0xccaaa8>
+>>>
+
+The variable conn_eu is now bound to an EC2Connection object connected
+to the endpoint of the eu-west-1 region and all operations performed via
+that connection and all objects created by that connection will be scoped
+to the eu-west-1 region. You can always tell which region a connection
+is associated with by accessing it's region attribute:
+
+>>> conn_eu.region
+RegionInfo:eu-west-1
+>>>
+
+Supporting EC2 objects such as SecurityGroups, KeyPairs, Addresses,
+Volumes, Images and SnapShots are local to a particular region. So
+don't expect to find the security groups you created in the us-east-1
+region to be available in the eu-west-1 region.
+
+Some objects in boto, such as SecurityGroup, have a new method called
+copy_to_region which will attempt to create a copy of the object in
+another region. For example:
+
+>>> regions
+[RegionInfo:eu-west-1, RegionInfo:us-east-1]
+>>> conn_us = regions[1].connect()
+>>> groups = conn_us.get_all_security_groups()
+>>> groups
+[SecurityGroup:alfresco, SecurityGroup:apache, SecurityGroup:vnc,
+SecurityGroup:appserver2, SecurityGroup:FTP, SecurityGroup:webserver,
+SecurityGroup:default, SecurityGroup:test-1228851996]
+>>> us_group = groups[0]
+>>> us_group
+SecurityGroup:alfresco
+>>> us_group.rules
+[IPPermissions:tcp(22-22), IPPermissions:tcp(80-80), IPPermissions:tcp(1445-1445)]
+>>> eu_group = us_group.copy_to_region(eu)
+>>> eu_group.rules
+[IPPermissions:tcp(22-22), IPPermissions:tcp(80-80), IPPermissions:tcp(1445-1445)]
+
+In the above example, we chose one of the security groups available
+in the us-east-1 region (the group alfresco) and copied that security
+group to the eu-west-1 region. All of the rules associated with the
+original security group will be copied as well.
+
+If you would like your default region to be something other than
+us-east-1, you can override that default in your boto config file
+(either ~/.boto for personal settings or /etc/boto.cfg for system-wide
+settings). For example:
+
+[Boto]
+ec2_region_name = eu-west-1
+ec2_region_endpoint = eu-west-1.ec2.amazonaws.com
+
+The above lines added to either boto config file would set the default
+region to be eu-west-1.
+
+Images & Instances
+------------------
+
+An Image object represents an Amazon Machine Image (AMI) which is an
+encrypted machine image stored in Amazon S3. It contains all of the
+information necessary to boot instances of your software in EC2.
+
+To get a listing of all available Images:
+
+>>> images = conn.get_all_images()
+>>> images
+[Image:ami-20b65349, Image:ami-22b6534b, Image:ami-23b6534a, Image:ami-25b6534c, Image:ami-26b6534f, Image:ami-2bb65342, Image:ami-78b15411, Image:ami-a4aa4fcd, Image:ami-c3b550aa, Image:ami-e4b6538d, Image:ami-f1b05598]
+>>> for image in images:
+... print image.location
+ec2-public-images/fedora-core4-base.manifest.xml
+ec2-public-images/fedora-core4-mysql.manifest.xml
+ec2-public-images/fedora-core4-apache.manifest.xml
+ec2-public-images/fedora-core4-apache-mysql.manifest.xml
+ec2-public-images/developer-image.manifest.xml
+ec2-public-images/getting-started.manifest.xml
+marcins_cool_public_images/fedora-core-6.manifest.xml
+khaz_fc6_win2003/image.manifest
+aes-images/django.manifest
+marcins_cool_public_images/ubuntu-6.10.manifest.xml
+ckk_public_ec2_images/centos-base-4.4.manifest.xml
+
+The most useful thing you can do with an Image is to actually run it, so let's
+run a new instance of the base Fedora image:
+
+>>> image = images[0]
+>>> image.location
+ec2-public-images/fedora-core4-base.manifest.xml
+>>> reservation = image.run()
+
+This will begin the boot process for a new EC2 instance. The run method
+returns a Reservation object which represents a collection of instances
+that are all started at the same time. In this case, we only started one
+but you can check the instances attribute of the Reservation object to see
+all of the instances associated with this reservation:
+
+>>> reservation.instances
+[Instance:i-6761850e]
+>>> instance = reservation.instances[0]
+>>> instance.state
+u'pending'
+>>>
+
+So, we have an instance booting up that is still in the pending state. We
+can call the update method on the instance to get a refreshed view of it's
+state:
+
+>>> instance.update()
+>>> instance.state
+u'pending'
+>>> # wait a few minutes
+>>> instance.update()
+>>> instance.state
+u'running'
+
+So, now our instance is running. The time it takes to boot a new instance
+varies based on a number of different factors but usually it takes less than
+five minutes.
+
+Now the instance is up and running you can find out its DNS name like this:
+
+>>> instance.dns_name
+u'ec2-72-44-40-153.z-2.compute-1.amazonaws.com'
+
+This provides the public DNS name for your instance. Since the 2007--3-22
+release of the EC2 service, the default addressing scheme for instances
+uses NAT-addresses which means your instance has both a public IP address and a
+non-routable private IP address. You can access each of these addresses
+like this:
+
+>>> instance.public_dns_name
+u'ec2-72-44-40-153.z-2.compute-1.amazonaws.com'
+>>> instance.private_dns_name
+u'domU-12-31-35-00-42-33.z-2.compute-1.internal'
+
+Even though your instance has a public DNS name, you won't be able to
+access it yet because you need to set up some security rules which are
+described later in this tutorial.
+
+Since you are now being charged for that instance we just created, you will
+probably want to know how to terminate the instance, as well. The simplest
+way is to use the stop method of the Instance object:
+
+>>> instance.stop()
+>>> instance.update()
+>>> instance.state
+u'shutting-down'
+>>> # wait a minute
+>>> instance.update()
+>>> instance.state
+u'terminated'
+>>>
+
+When we created our new instance, we didn't pass any args to the run method
+so we got all of the default values. The full set of possible parameters
+to the run method are:
+
+min_count - The minimum number of instances to launch.
+max_count - The maximum number of instances to launch.
+keypair - Keypair to launch instances with (either a KeyPair object or a string with the name of the desired keypair.
+security_groups - A list of security groups to associate with the instance. This can either be a list of SecurityGroup objects or a list of strings with the names of the desired security groups.
+user_data - Data to be made available to the launched instances. This should be base64 encoded according to the EC2 documentation.
+
+So, if I wanted to create two instances of the base image and launch them
+with my keypair, called gsg-keypair, I would to this:
+
+>>> reservation.image.run(2,2,'gsg-keypair')
+>>> reservation.instances
+[Instance:i-5f618536, Instance:i-5e618537]
+>>> for i in reservation.instances:
+... print i.status
+u'pending'
+u'pending'
+>>>
+
+Later, when you are finished with the instances you can either stop each
+individually or you can call the stop_all method on the Reservation object:
+
+>>> reservation.stop_all()
+>>>
+
+If you just want to get a list of all of your running instances, use
+the get_all_instances method of the connection object. Note that the
+list returned is actually a list of Reservation objects (which contain
+the Instances) and that the list may include recently terminated instances
+for a small period of time subsequent to their termination.
+
+>>> instances = conn.get_all_instances()
+>>> instances
+[Reservation:r-a76085ce, Reservation:r-a66085cf, Reservation:r-8c6085e5]
+>>> r = instances[0]
+>>> for inst in r.instances:
+... print inst.state
+u'terminated'
+>>>
+
+A recent addition to the EC2 api's is to allow other EC2 users to launch
+your images. There are a couple of ways of accessing this capability in
+boto but I'll show you the simplest way here. First of all, you need to
+know the Amazon ID for the user in question. The Amazon Id is a twelve
+digit number that appears on your Account Activity page at AWS. It looks
+like this:
+
+1234-5678-9012
+
+To use this number in API calls, you need to remove the dashes so in our
+example the user ID would be 12345678912. To allow the user associated
+with this ID to launch one of your images, let's assume that the variable
+image represents the Image you want to share. So:
+
+>>> image.get_launch_permissions()
+{}
+>>>
+
+The get_launch_permissions method returns a dictionary object two possible
+entries; user_ids or groups. In our case we haven't yet given anyone
+permission to launch our image so the dictionary is empty. To add our
+EC2 user:
+
+>>> image.set_launch_permissions(['123456789012'])
+True
+>>> image.get_launch_permissions()
+{'user_ids': [u'123456789012']}
+>>>
+
+We have now added the desired user to the launch permissions for the Image
+so that user will now be able to access and launch our Image. You can add
+multiple users at one time by adding them all to the list you pass in as
+a parameter to the method. To revoke the user's launch permissions:
+
+>>> image.remove_launch_permissions(['123456789012'])
+True
+>>> image.get_launch_permissions()
+{}
+>>>
+
+It is possible to pass a list of group names to the set_launch_permissions
+method, as well. The only group available at the moment is the group "all"
+which would allow any valid EC2 user to launch your image.
+
+Finally, you can completely reset the launch permissions for an Image with:
+
+>>> image.reset_launch_permissions()
+True
+>>>
+
+This will remove all users and groups from the launch permission list and
+makes the Image private, again.
+
+Security Groups
+----------------
+
+Amazon defines a security group as:
+
+"A security group is a named collection of access rules. These access rules
+ specify which ingress, i.e. incoming, network traffic should be delivered
+ to your instance."
+
+To get a listing of all currently defined security groups:
+
+>>> rs = conn.get_all_security_groups()
+>>> print rs
+[SecurityGroup:appserver, SecurityGroup:default, SecurityGroup:vnc, SecurityGroup:webserver]
+>>>
+
+Each security group can have an arbitrary number of rules which represent
+different network ports which are being enabled. To find the rules for a
+particular security group, use the rules attribute:
+
+>>> sg = rs[1]
+>>> sg.name
+u'default'
+>>> sg.rules
+[IPPermissions:tcp(0-65535),
+ IPPermissions:udp(0-65535),
+ IPPermissions:icmp(-1--1),
+ IPPermissions:tcp(22-22),
+ IPPermissions:tcp(80-80)]
+>>>
+
+In addition to listing the available security groups you can also create
+a new security group. I'll follow through the "Three Tier Web Service"
+example included in the EC2 Developer's Guide for an example of how to
+create security groups and add rules to them.
+
+First, let's create a group for our Apache web servers that allows HTTP
+access to the world:
+
+>>> web = conn.create_security_group('apache', 'Our Apache Group')
+>>> web
+SecurityGroup:apache
+>>> web.authorize('tcp', 80, 80, '0.0.0.0/0')
+True
+>>>
+
+The first argument is the ip protocol which can be one of; tcp, udp or icmp.
+The second argument is the FromPort or the beginning port in the range, the
+third argument is the ToPort or the ending port in the range and the last
+argument is the CIDR IP range to authorize access to.
+
+Next we create another group for the app servers:
+
+>>> app = conn.create_security_group('appserver', 'The application tier')
+>>>
+
+We then want to grant access between the web server group and the app
+server group. So, rather than specifying an IP address as we did in the
+last example, this time we will specify another SecurityGroup object.
+
+>>> app.authorize(src_group=web)
+True
+>>>
+
+Now, to verify that the web group now has access to the app servers, we want to
+temporarily allow SSH access to the web servers from our computer. Let's
+say that our IP address is 192.168.1.130 as it is in the EC2 Developer
+Guide. To enable that access:
+
+>>> web.authorize(ip_protocol='tcp', from_port=22, to_port=22, cidr_ip='192.168.1.130/32')
+True
+>>>
+
+Now that this access is authorized, we could ssh into an instance running in
+the web group and then try to telnet to specific ports on servers in the
+appserver group, as shown in the EC2 Developer's Guide. When this testing is
+complete, we would want to revoke SSH access to the web server group, like this:
+
+>>> web.rules
+[IPPermissions:tcp(80-80),
+ IPPermissions:tcp(22-22)]
+>>> web.revoke('tcp', 22, 22, cidr_ip='192.168.1.130/32')
+True
+>>> web.rules
+[IPPermissions:tcp(80-80)]
+>>>
+
+
+
+
+
+
+
diff --git a/docs/source/elb_tut.rst b/docs/source/elb_tut.rst
new file mode 100644
index 0000000..b873578
--- /dev/null
+++ b/docs/source/elb_tut.rst
@@ -0,0 +1,202 @@
+.. _elb_tut:
+
+==========================================================
+An Introduction to boto's Elastic Load Balancing interface
+==========================================================
+
+This tutorial focuses on the boto interface for Elastic Load Balancing
+from Amazon Web Services. This tutorial assumes that you have already
+downloaded and installed boto, and are familiar with the boto ec2 interface.
+
+Elastic Load Balancing Concepts
+-------------------------------
+Elastic Load Balancing (ELB) is intimately connected with Amazon's Elastic
+Compute Cloud (EC2) service. Using the ELB service allows you to create a load
+balancer - a DNS endpoint and set of ports that distributes incoming requests
+to a set of ec2 instances. The advantages of using a load balancer is that it
+allows you to truly scale up or down a set of backend instances without
+disrupting service. Before the ELB service you had to do this manually by
+launching an EC2 instance and installing load balancer software on it (nginx,
+haproxy, perlbal, etc.) to distribute traffic to other EC2 instances.
+
+Recall that the ec2 service is split into Regions and Availability Zones (AZ).
+At the time of writing, there are two Regions - US and Europe, and each region
+is divided into a number of AZs (for example, us-east-1a, us-east-1b, etc.).
+You can think of AZs as data centers - each runs off a different set of ISP
+backbones and power providers. ELB load balancers can span multiple AZs but
+cannot span multiple regions. That means that if you'd like to create a set of
+instances spanning both the US and Europe Regions you'd have to create two load
+balancers and have some sort of other means of distributing requests between
+the two loadbalancers. An example of this could be using GeoIP techniques to
+choose the correct load balancer, or perhaps DNS round robin. Keep in mind also
+that traffic is distributed equally over all AZs the ELB balancer spans. This
+means you should have an equal number of instances in each AZ if you want to
+equally distribute load amongst all your instances.
+
+Creating a Connection
+---------------------
+The first step in accessing ELB is to create a connection to the service.
+There are two ways to do this in boto. The first is:
+
+>>> from boto.ec2.elb import ELBConnection
+>>> conn = ELBConnection('<aws access key>', '<aws secret key>')
+
+There is also a shortcut function in the boto package, called connect_elb
+that may provide a slightly easier means of creating a connection:
+
+>>> import boto
+>>> conn = boto.connect_elb()
+
+In either case, conn will point to an ELBConnection object which we will
+use throughout the remainder of this tutorial.
+
+A Note About Regions and Endpoints
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Like EC2 the ELB service has a different endpoint for each region. By default
+the US endpoint is used. To choose a specific region, instantiate the
+ELBConnection object with that region's endpoint.
+
+>>> ec2 = boto.connect_elb(host='eu-west-1.elasticloadbalancing.amazonaws.com')
+
+Alternatively, edit your boto.cfg with the default ELB endpoint to use::
+
+ [Boto]
+ elb_endpoint = eu-west-1.elasticloadbalancing.amazonaws.com
+
+Getting Existing Load Balancers
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+To retrieve any exiting load balancers:
+
+>>> conn.get_all_load_balancers()
+
+You will get back a list of LoadBalancer objects.
+
+Creating a Load Balancer
+------------------------
+To create a load balancer you need the following:
+ #. The specific **ports and protocols** you want to load balancer over, and what port
+ you want to connect to all instances.
+ #. A **health check** - the ELB concept of a *heart beat* or *ping*. ELB will use this health
+ check to see whether your instances are up or down. If they go down, the load balancer
+ will no longer send requests to them.
+ #. A **list of Availability Zones** you'd like to create your load balancer over.
+
+Ports and Protocols
+^^^^^^^^^^^^^^^^^^^
+An incoming connection to your load balancer will come on one or more ports -
+for example 80 (HTTP) and 443 (HTTPS). Each can be using a protocol -
+currently, the supported protocols are TCP and HTTP. We also need to tell the
+load balancer which port to route connects *to* on each instance. For example,
+to create a load balancer for a website that accepts connections on 80 and 443,
+and that routes connections to port 8080 and 8443 on each instance, you would
+specify that the load balancer ports and protocols are:
+
+ * 80, 8080, HTTP
+ * 443, 8443, TCP
+
+This says that the load balancer will listen on two ports - 80 and 443.
+Connections on 80 will use an HTTP load balancer to forward connections to port
+8080 on instances. Likewise, the load balancer will listen on 443 to forward
+connections to 8443 on each instance using the TCP balancer. We need to
+use TCP for the HTTPS port because it is encrypted at the application
+layer. Of course, we could specify the load balancer use TCP for port 80,
+however specifying HTTP allows you to let ELB handle some work for you -
+for example HTTP header parsing.
+
+
+Configuring a Health Check
+^^^^^^^^^^^^^^^^^^^^^^^^^^
+A health check allows ELB to determine which instances are alive and able to
+respond to requests. A health check is essentially a tuple consisting of:
+
+ * *target*: What to check on an instance. For a TCP check this is comprised of::
+
+ TCP:PORT_TO_CHECK
+
+ Which attempts to open a connection on PORT_TO_CHECK. If the connection opens
+ successfully, that specific instance is deemed healthy, otherwise it is marked
+ temporarily as unhealthy. For HTTP, the situation is slightly different::
+
+ HTTP:PORT_TO_CHECK/RESOURCE
+
+ This means that the health check will connect to the resource /RESOURCE on
+ PORT_TO_CHECK. If an HTTP 200 status is returned the instance is deemed healthy.
+ * *interval*: How often the check is made. This is given in seconds and defaults to 30.
+ The valid range of intervals goes from 5 seconds to 600 seconds.
+ * *timeout*: The number of seconds the load balancer will wait for a check to return a
+ result.
+ * *UnhealthyThreshold*: The number of consecutive failed checks to deem the instance
+ as being dead. The default is 5, and the range of valid values lies from 2 to 10.
+
+The following example creates a health check called *instance_health* that simply checks
+instances every 20 seconds on port 80 over HTTP at the resource /health for 200 successes.
+
+>>> import boto
+>>> from boto.ec2.elb import HealthCheck
+>>> conn = boto.connect_elb()
+>>> hc = HealthCheck('instance_health', interval=20, target='HTTP:8080/health')
+
+Putting It All Together
+^^^^^^^^^^^^^^^^^^^^^^^
+
+Finally, let's create a load balancer in the US region that listens on ports 80 and 443
+and distributes requests to instances on 8080 and 8443 over HTTP and TCP. We want the
+load balancer to span the availability zones *us-east-1a* and *us-east-1b*:
+
+>>> lb = conn.create_load_balancer('my_lb', ['us-east-1a', 'us-east-1b'],
+ [(80, 8080, 'http'), (443, 8443, 'tcp')])
+>>> lb.configure_health_check(hc)
+
+The load balancer has been created. To see where you can actually connect to it, do:
+
+>>> print lb.dns_name
+my_elb-123456789.us-east-1.elb.amazonaws.com
+
+You can then CNAME map a better name, i.e. www.MYWEBSITE.com to the above address.
+
+Adding Instances To a Load Balancer
+-----------------------------------
+
+Now that the load balancer has been created, there are two ways to add instances to it:
+
+ #. Manually, adding each instance in turn.
+ #. Mapping an autoscale group to the load balancer. Please see the Autoscale
+ tutorial for information on how to do this.
+
+Manually Adding and Removing Instances
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Assuming you have a list of instance ids, you can add them to the load balancer
+
+>>> instance_ids = ['i-4f8cf126', 'i-0bb7ca62']
+>>> lb.register_instances(instance_ids)
+
+Keep in mind that these instances should be in Security Groups that match the
+internal ports of the load balancer you just created (for this example, they
+should allow incoming connections on 8080 and 8443).
+
+To remove instances:
+
+>>> lb.degregister_instances(instance_ids)
+
+Modifying Availability Zones for a Load Balancer
+------------------------------------------------
+
+If you wanted to disable one or more zones from an existing load balancer:
+
+>>> lb.disable_zones(['us-east-1a'])
+
+You can then terminate each instance in the disabled zone and then deregister then from your load
+balancer.
+
+To enable zones:
+
+>>> lb.enable_zones(['us-east-1c'])
+
+Deleting a Load Balancer
+------------------------
+
+>>> lb.delete()
+
+
diff --git a/docs/source/emr_tut.rst b/docs/source/emr_tut.rst
new file mode 100644
index 0000000..996781e
--- /dev/null
+++ b/docs/source/emr_tut.rst
@@ -0,0 +1,108 @@
+.. _emr_tut:
+
+=====================================================
+An Introduction to boto's Elastic Mapreduce interface
+=====================================================
+
+This tutorial focuses on the boto interface to Elastic Mapreduce from
+Amazon Web Services. This tutorial assumes that you have already
+downloaded and installed boto.
+
+Creating a Connection
+---------------------
+The first step in accessing Elastic Mapreduce is to create a connection
+to the service. There are two ways to do this in boto. The first is:
+
+>>> from boto.emr.connection import EmrConnection
+>>> conn = EmrConnection('<aws access key>', '<aws secret key>')
+
+At this point the variable conn will point to an EmrConnection object.
+In this example, the AWS access key and AWS secret key are passed in to
+the method explicitly. Alternatively, you can set the environment variables:
+
+AWS_ACCESS_KEY_ID - Your AWS Access Key ID \
+AWS_SECRET_ACCESS_KEY - Your AWS Secret Access Key
+
+and then call the constructor without any arguments, like this:
+
+>>> conn = EmrConnection()
+
+There is also a shortcut function in the boto package called connect_emr
+that may provide a slightly easier means of creating a connection:
+
+>>> import boto
+>>> conn = boto.connect_emr()
+
+In either case, conn points to an EmrConnection object which we will use
+throughout the remainder of this tutorial.
+
+Creating Streaming JobFlow Steps
+--------------------------------
+Upon creating a connection to Elastic Mapreduce you will next
+want to create one or more jobflow steps. There are two types of steps, streaming
+and custom jar, both of which have a class in the boto Elastic Mapreduce implementation.
+
+Creating a streaming step that runs the AWS wordcount example, itself written in Python, can be accomplished by:
+
+>>> from boto.emr.step import StreamingStep
+>>> step = StreamingStep(name='My wordcount example',
+... mapper='s3n://elasticmapreduce/samples/wordcount/wordSplitter.py',
+... reducer='aggregate',
+... input='s3n://elasticmapreduce/samples/wordcount/input',
+... output='s3n://<my output bucket>/output/wordcount_output')
+
+where <my output bucket> is a bucket you have created in S3.
+
+Note that this statement does not run the step, that is accomplished later when we create a jobflow.
+
+Additional arguments of note to the streaming jobflow step are cache_files, cache_archive and step_args. The options cache_files and cache_archive enable you to use the Hadoops distributed cache to share files amongst the instances that run the step. The argument step_args allows one to pass additional arguments to Hadoop streaming, for example modifications to the Hadoop job configuration.
+
+Creating Custom Jar Job Flow Steps
+----------------------------------
+
+The second type of jobflow step executes tasks written with a custom jar. Creating a custom jar step for the AWS CloudBurst example can be accomplished by:
+
+>>> from boto.emr.step import JarStep
+>>> step = JarStep(name='Coudburst example',
+... jar='s3n://elasticmapreduce/samples/cloudburst/cloudburst.jar',
+... step_args=['s3n://elasticmapreduce/samples/cloudburst/input/s_suis.br',
+... 's3n://elasticmapreduce/samples/cloudburst/input/100k.br',
+... 's3n://<my output bucket>/output/cloudfront_output',
+... 36, 3, 0, 1, 240, 48, 24, 24, 128, 16])
+
+Note that this statement does not actually run the step, that is accomplished later when we create a jobflow. Also note that this JarStep does not include a main_class argument since the jar MANIFEST.MF has a Main-Class entry.
+
+Creating JobFlows
+-----------------
+Once you have created one or more jobflow steps, you will next want to create and run a jobflow. Creating a jobflow that executes either of the steps we created above can be accomplished by:
+
+>>> import boto
+>>> conn = boto.connect_emr()
+>>> jobid = conn.run_jobflow(name='My jobflow',
+... log_uri='s3://<my log uri>/jobflow_logs',
+... steps=[step])
+
+The method will not block for the completion of the jobflow, but will immediately return. The status of the jobflow can be determined by:
+
+>>> status = conn.describe_jobflow(jobid)
+>>> status.state
+u'STARTING'
+
+One can then use this state to block for a jobflow to complete. Valid jobflow states currently defined in the AWS API are COMPLETED, FAILED, TERMINATED, RUNNING, SHUTTING_DOWN, STARTING and WAITING.
+
+In some cases you may not have built all of the steps prior to running the jobflow. In these cases additional steps can be added to a jobflow by running:
+
+>>> conn.add_jobflow_steps(jobid, [second_step])
+
+If you wish to add additional steps to a running jobflow you may want to set the keep_alive parameter to True in run_jobflow so that the jobflow does not automatically terminate when the first step completes.
+
+The run_jobflow method has a number of important parameters that are worth investigating. They include parameters to change the number and type of EC2 instances on which the jobflow is executed, set a SSH key for manual debugging and enable AWS console debugging.
+
+Terminating JobFlows
+--------------------
+By default when all the steps of a jobflow have finished or failed the jobflow terminates. However, if you set the keep_alive parameter to True or just want to halt the execution of a jobflow early you can terminate a jobflow by:
+
+>>> import boto
+>>> conn = boto.connect_emr()
+>>> conn.terminate_jobflow('<jobflow id>')
+
diff --git a/docs/source/index.rst b/docs/source/index.rst
new file mode 100644
index 0000000..2ecd1d6
--- /dev/null
+++ b/docs/source/index.rst
@@ -0,0 +1,64 @@
+.. _index:
+
+===============================================
+boto: A Python interface to Amazon Web Services
+===============================================
+
+An integrated interface to current and future infrastructural services
+offered by Amazon Web Services.
+
+Currently, this includes:
+
+- Simple Storage Service (S3)
+- Simple Queue Service (SQS)
+- Elastic Compute Cloud (EC2)
+
+ - Elastic Load Balancer (ELB)
+ - CloudWatch
+ - AutoScale
+
+- Mechanical Turk
+- SimpleDB (SDB) - See SimpleDbPage for details
+- CloudFront
+- Virtual Private Cloud (VPC)
+- Relational Data Services (RDS)
+- Elastic Map Reduce (EMR)
+- Flexible Payment Service (FPS)
+- Identity and Access Management (IAM)
+
+The boto project page is at http://boto.googlecode.com/
+
+The boto source repository is at http://github.com/boto
+
+Follow project updates on Twitter (http://twitter.com/pythonboto).
+
+Follow Mitch on Twitter (http://twitter.com/garnaat).
+
+Join our `IRC channel`_ (#boto on FreeNode).
+
+.. _IRC channel: http://webchat.freenode.net/?channels=boto
+
+Documentation Contents
+----------------------
+
+.. toctree::
+ :maxdepth: 2
+
+ sqs_tut
+ s3_tut
+ ec2_tut
+ elb_tut
+ autoscale_tut
+ vpc_tut
+ emr_tut
+ ref/index
+ documentation
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/docs/source/ref/boto.rst b/docs/source/ref/boto.rst
new file mode 100644
index 0000000..5a241b3
--- /dev/null
+++ b/docs/source/ref/boto.rst
@@ -0,0 +1,47 @@
+.. _ref-boto:
+
+====
+boto
+====
+
+boto
+----
+
+.. automodule:: boto
+ :members:
+ :undoc-members:
+
+boto.connection
+---------------
+
+.. automodule:: boto.connection
+ :members:
+ :undoc-members:
+
+boto.exception
+--------------
+
+.. automodule:: boto.exception
+ :members:
+ :undoc-members:
+
+boto.handler
+------------
+
+.. automodule:: boto.handler
+ :members:
+ :undoc-members:
+
+boto.resultset
+--------------
+
+.. automodule:: boto.resultset
+ :members:
+ :undoc-members:
+
+boto.utils
+----------
+
+.. automodule:: boto.utils
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/cloudfront.rst b/docs/source/ref/cloudfront.rst
new file mode 100644
index 0000000..5cb80be
--- /dev/null
+++ b/docs/source/ref/cloudfront.rst
@@ -0,0 +1,108 @@
+.. ref-cloudfront
+
+==========
+cloudfront
+==========
+
+A Crash Course in CloudFront in Boto
+------------------------------------
+
+This new boto module provides an interface to Amazon's new Content Service, CloudFront.
+
+Caveats:
+
+This module is not well tested. Paging of distributions is not yet
+supported. CNAME support is completely untested. Use with caution.
+Feedback and bug reports are greatly appreciated.
+
+The following shows the main features of the cloudfront module from an interactive shell:
+
+Create an cloudfront connection:
+
+>>> from boto.cloudfront import CloudFrontConnection
+>>> c = CloudFrontConnection()
+
+Create a new :class:`boto.cloudfront.distribution.Distribution`:
+
+>>> distro = c.create_distribution(origin='mybucket.s3.amazonaws.com', enabled=False, comment='My new Distribution')
+>>> d.domain_name
+u'd2oxf3980lnb8l.cloudfront.net'
+>>> d.id
+u'ECH69MOIW7613'
+>>> d.status
+u'InProgress'
+>>> d.config.comment
+u'My new distribution'
+>>> d.config.origin
+u'mybucket.s3.amazonaws.com'
+>>> d.config.caller_reference
+u'31b8d9cf-a623-4a28-b062-a91856fac6d0'
+>>> d.config.enabled
+False
+
+Note that a new caller reference is created automatically, using
+uuid.uuid4(). The :class:`boto.cloudfront.distribution.Distribution`, :class:`boto.cloudfront.distribution.DistributionConfig` and
+:class:`boto.cloudfront.distribution.DistributionSummary` objects are defined in the :mod:`boto.cloudfront.distribution`
+module.
+
+To get a listing of all current distributions:
+
+>>> rs = c.get_all_distributions()
+>>> rs
+[<boto.cloudfront.distribution.DistributionSummary instance at 0xe8d4e0>,
+ <boto.cloudfront.distribution.DistributionSummary instance at 0xe8d788>]
+
+This returns a list of :class:`boto.cloudfront.distribution.DistributionSummary` objects. Note that paging
+is not yet supported! To get a :class:`boto.cloudfront.distribution.DistributionObject` from a
+:class:`boto.cloudfront.distribution.DistributionSummary` object:
+
+>>> ds = rs[1]
+>>> distro = ds.get_distribution()
+>>> distro.domain_name
+u'd2oxf3980lnb8l.cloudfront.net'
+
+To change a property of a distribution object:
+
+>>> distro.comment
+u'My new distribution'
+>>> distro.update(comment='This is a much better comment')
+>>> distro.comment
+'This is a much better comment'
+
+You can also enable/disable a distribution using the following
+convenience methods:
+
+>>> distro.enable() # just calls distro.update(enabled=True)
+
+or
+
+>>> distro.disable() # just calls distro.update(enabled=False)
+
+The only attributes that can be updated for a Distribution are
+comment, enabled and cnames.
+
+To delete a :class:`boto.cloudfront.distribution.Distribution`:
+
+>>> distro.delete()
+
+
+boto.cloudfront
+---------------
+
+.. automodule:: boto.cloudfront
+ :members:
+ :undoc-members:
+
+boto.cloudfront.distribution
+----------------------------
+
+.. automodule:: boto.cloudfront.distribution
+ :members:
+ :undoc-members:
+
+boto.cloudfront.exception
+-------------------------
+
+.. automodule:: boto.cloudfront.exception
+ :members:
+ :undoc-members:
\ No newline at end of file
diff --git a/docs/source/ref/contrib.rst b/docs/source/ref/contrib.rst
new file mode 100644
index 0000000..9262a0d
--- /dev/null
+++ b/docs/source/ref/contrib.rst
@@ -0,0 +1,32 @@
+.. ref-contrib
+
+=======
+contrib
+=======
+
+boto.contrib
+------------
+
+.. automodule:: boto.contrib
+ :members:
+ :undoc-members:
+
+boto.contrib.m2helpers
+----------------------
+
+.. note::
+
+ This module requires installation of M2Crypto__ in your Python path.
+
+ __ http://sandbox.rulemaker.net/ngps/m2/
+
+.. automodule:: boto.contrib.m2helpers
+ :members:
+ :undoc-members:
+
+boto.contrib.ymlmessage
+-----------------------
+
+.. automodule:: boto.contrib.ymlmessage
+ :members:
+ :undoc-members:
\ No newline at end of file
diff --git a/docs/source/ref/ec2.rst b/docs/source/ref/ec2.rst
new file mode 100644
index 0000000..e6215d7
--- /dev/null
+++ b/docs/source/ref/ec2.rst
@@ -0,0 +1,223 @@
+.. ref-ec2
+
+===
+EC2
+===
+
+boto.ec2
+--------
+
+.. automodule:: boto.ec2
+ :members:
+ :undoc-members:
+
+boto.ec2.address
+----------------
+
+.. automodule:: boto.ec2.address
+ :members:
+ :undoc-members:
+
+boto.ec2.autoscale
+------------------
+
+.. automodule:: boto.ec2.autoscale
+ :members:
+ :undoc-members:
+
+boto.ec2.autoscale.activity
+---------------------------
+
+.. automodule:: boto.ec2.autoscale.activity
+ :members:
+ :undoc-members:
+
+boto.ec2.autoscale.group
+------------------------
+
+.. automodule:: boto.ec2.autoscale.group
+ :members:
+ :undoc-members:
+
+
+boto.ec2.autoscale.instance
+---------------------------
+
+.. automodule:: boto.ec2.autoscale.instance
+ :members:
+ :undoc-members:
+
+boto.ec2.autoscale.launchconfig
+-------------------------------
+
+.. automodule:: boto.ec2.autoscale.launchconfig
+ :members:
+ :undoc-members:
+
+boto.ec2.autoscale.request
+--------------------------
+
+.. automodule:: boto.ec2.autoscale.request
+ :members:
+ :undoc-members:
+
+boto.ec2.autoscale.trigger
+--------------------------
+
+.. automodule:: boto.ec2.autoscale.trigger
+ :members:
+ :undoc-members:
+
+boto.ec2.buyreservation
+-----------------------
+
+.. automodule:: boto.ec2.buyreservation
+ :members:
+ :undoc-members:
+
+boto.ec2.cloudwatch
+-------------------
+
+.. automodule:: boto.ec2.cloudwatch
+ :members:
+ :undoc-members:
+
+boto.ec2.cloudwatch.datapoint
+-----------------------------
+
+.. automodule:: boto.ec2.cloudwatch.datapoint
+ :members:
+ :undoc-members:
+
+boto.ec2.cloudwatch.metric
+--------------------------
+
+.. automodule:: boto.ec2.cloudwatch.metric
+ :members:
+ :undoc-members:
+
+boto.ec2.connection
+-------------------
+
+.. automodule:: boto.ec2.connection
+ :members:
+ :undoc-members:
+
+boto.ec2.ec2object
+------------------
+
+.. automodule:: boto.ec2.ec2object
+ :members:
+ :undoc-members:
+
+boto.ec2.elb
+------------
+
+.. automodule:: boto.ec2.elb
+ :members:
+ :undoc-members:
+
+boto.ec2.elb.healthcheck
+------------------------
+
+.. automodule:: boto.ec2.elb.healthcheck
+ :members:
+ :undoc-members:
+
+boto.ec2.elb.instancestate
+--------------------------
+
+.. automodule:: boto.ec2.elb.instancestate
+ :members:
+ :undoc-members:
+
+boto.ec2.elb.listelement
+------------------------
+
+.. automodule:: boto.ec2.elb.listelement
+ :members:
+ :undoc-members:
+
+boto.ec2.elb.listener
+---------------------
+
+.. automodule:: boto.ec2.elb.listener
+ :members:
+ :undoc-members:
+
+boto.ec2.elb.loadbalancer
+-------------------------
+
+.. automodule:: boto.ec2.elb.loadbalancer
+ :members:
+ :undoc-members:
+
+boto.ec2.image
+--------------
+
+.. automodule:: boto.ec2.image
+ :members:
+ :undoc-members:
+
+boto.ec2.instance
+-----------------
+
+.. automodule:: boto.ec2.instance
+ :members:
+ :undoc-members:
+
+boto.ec2.instanceinfo
+---------------------
+
+.. automodule:: boto.ec2.instanceinfo
+ :members:
+ :undoc-members:
+
+boto.ec2.keypair
+----------------
+
+.. automodule:: boto.ec2.keypair
+ :members:
+ :undoc-members:
+
+boto.ec2.regioninfo
+-------------------
+
+.. automodule:: boto.ec2.regioninfo
+ :members:
+ :undoc-members:
+
+boto.ec2.reservedinstance
+-------------------------
+
+.. automodule:: boto.ec2.reservedinstance
+ :members:
+ :undoc-members:
+
+boto.ec2.securitygroup
+----------------------
+
+.. automodule:: boto.ec2.securitygroup
+ :members:
+ :undoc-members:
+
+boto.ec2.snapshot
+-----------------
+
+.. automodule:: boto.ec2.snapshot
+ :members:
+ :undoc-members:
+
+boto.ec2.volume
+---------------
+
+.. automodule:: boto.ec2.volume
+ :members:
+ :undoc-members:
+
+boto.ec2.zone
+-------------
+
+.. automodule:: boto.ec2.zone
+ :members:
+ :undoc-members:
\ No newline at end of file
diff --git a/docs/source/ref/emr.rst b/docs/source/ref/emr.rst
new file mode 100644
index 0000000..4392d24
--- /dev/null
+++ b/docs/source/ref/emr.rst
@@ -0,0 +1,34 @@
+.. _ref-emr:
+
+===
+EMR
+===
+
+boto.emr
+--------
+
+.. automodule:: boto.emr
+ :members:
+ :undoc-members:
+
+boto.emr.connection
+-------------------
+
+.. automodule:: boto.emr.connection
+ :members:
+ :undoc-members:
+
+boto.emr.step
+-------------
+
+.. automodule:: boto.emr.step
+ :members:
+ :undoc-members:
+
+boto.emr.emrobject
+------------------
+
+.. automodule:: boto.emr.emrobject
+ :members:
+ :undoc-members:
+
diff --git a/docs/source/ref/file.rst b/docs/source/ref/file.rst
new file mode 100644
index 0000000..f128477
--- /dev/null
+++ b/docs/source/ref/file.rst
@@ -0,0 +1,34 @@
+.. ref-s3:
+
+====
+file
+====
+
+boto.file.bucket
+----------------
+
+.. automodule:: boto.file.bucket
+ :members:
+ :undoc-members:
+
+boto.file.simpleresultset
+-------------------------
+
+.. automodule:: boto.file.simpleresultset
+ :members:
+ :undoc-members:
+
+boto.file.connection
+--------------------
+
+.. automodule:: boto.file.connection
+ :members:
+ :undoc-members:
+
+boto.file.key
+-------------
+
+.. automodule:: boto.file.key
+ :members:
+ :undoc-members:
+
diff --git a/docs/source/ref/fps.rst b/docs/source/ref/fps.rst
new file mode 100644
index 0000000..c160eee
--- /dev/null
+++ b/docs/source/ref/fps.rst
@@ -0,0 +1,19 @@
+.. ref-fps
+
+===
+fps
+===
+
+boto.fps
+--------
+
+.. automodule:: boto.fps
+ :members:
+ :undoc-members:
+
+boto.fps.connection
+-------------------
+
+.. automodule:: boto.fps.connection
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/gs.rst b/docs/source/ref/gs.rst
new file mode 100644
index 0000000..6f24a19
--- /dev/null
+++ b/docs/source/ref/gs.rst
@@ -0,0 +1,48 @@
+.. ref-gs:
+
+==
+GS
+==
+
+boto.gs.acl
+-----------
+
+.. automodule:: boto.gs.acl
+ :members:
+ :undoc-members:
+
+boto.gs.bucket
+--------------
+
+.. automodule:: boto.gs.bucket
+ :members:
+ :undoc-members:
+
+boto.gs.connection
+------------------
+
+.. automodule:: boto.gs.connection
+ :members:
+ :undoc-members:
+
+boto.gs.key
+-----------
+
+.. automodule:: boto.gs.key
+ :members:
+ :undoc-members:
+
+boto.gs.user
+------------
+
+.. automodule:: boto.gs.user
+ :members:
+ :undoc-members:
+
+boto.gs.resumable_upload_handler
+--------------------------------
+
+.. automodule:: boto.gs.resumable_upload_handler
+ :members:
+ :undoc-members:
+
diff --git a/docs/source/ref/iam.rst b/docs/source/ref/iam.rst
new file mode 100644
index 0000000..ace6170
--- /dev/null
+++ b/docs/source/ref/iam.rst
@@ -0,0 +1,20 @@
+.. ref-iam
+
+===
+IAM
+===
+
+boto.iam
+--------
+
+.. automodule:: boto.iam
+ :members:
+ :undoc-members:
+
+boto.iam.response
+-----------------
+
+.. automodule:: boto.iam.response
+ :members:
+ :undoc-members:
+
diff --git a/docs/source/ref/index.rst b/docs/source/ref/index.rst
new file mode 100644
index 0000000..08b8ef2
--- /dev/null
+++ b/docs/source/ref/index.rst
@@ -0,0 +1,30 @@
+.. _ref-index:
+
+=============
+API Reference
+=============
+
+.. toctree::
+ :maxdepth: 4
+
+ boto
+ cloudfront
+ contrib
+ ec2
+ fps
+ manage
+ mashups
+ mturk
+ pyami
+ rds
+ s3
+ gs
+ file
+ sdb
+ services
+ sns
+ sqs
+ vpc
+ emr
+ iam
+ route53
diff --git a/docs/source/ref/manage.rst b/docs/source/ref/manage.rst
new file mode 100644
index 0000000..a175d88
--- /dev/null
+++ b/docs/source/ref/manage.rst
@@ -0,0 +1,47 @@
+.. ref-manage
+
+======
+manage
+======
+
+boto.manage
+-----------
+
+.. automodule:: boto.manage
+ :members:
+ :undoc-members:
+
+boto.manage.cmdshell
+--------------------
+
+.. automodule:: boto.manage.cmdshell
+ :members:
+ :undoc-members:
+
+boto.manage.propget
+-------------------
+
+.. automodule:: boto.manage.propget
+ :members:
+ :undoc-members:
+
+boto.manage.server
+------------------
+
+.. automodule:: boto.manage.server
+ :members:
+ :undoc-members:
+
+boto.manage.task
+----------------
+
+.. automodule:: boto.manage.task
+ :members:
+ :undoc-members:
+
+boto.manage.volume
+------------------
+
+.. automodule:: boto.manage.volume
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/mashups.rst b/docs/source/ref/mashups.rst
new file mode 100644
index 0000000..5eca846
--- /dev/null
+++ b/docs/source/ref/mashups.rst
@@ -0,0 +1,40 @@
+.. ref-mashups
+
+=======
+mashups
+=======
+
+boto.mashups
+------------
+
+.. automodule:: boto.mashups
+ :members:
+ :undoc-members:
+
+boto.mashups.interactive
+------------------------
+
+.. automodule:: boto.mashups.interactive
+ :members:
+ :undoc-members:
+
+boto.mashups.iobject
+--------------------
+
+.. automodule:: boto.mashups.iobject
+ :members:
+ :undoc-members:
+
+boto.mashups.order
+------------------
+
+.. automodule:: boto.mashups.order
+ :members:
+ :undoc-members:
+
+boto.mashups.server
+-------------------
+
+.. automodule:: boto.mashups.server
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/mturk.rst b/docs/source/ref/mturk.rst
new file mode 100644
index 0000000..1c8429b
--- /dev/null
+++ b/docs/source/ref/mturk.rst
@@ -0,0 +1,47 @@
+.. ref-mturk
+
+=====
+mturk
+=====
+
+boto.mturk
+------------
+
+.. automodule:: boto.mturk
+ :members:
+ :undoc-members:
+
+boto.mturk.connection
+---------------------
+
+.. automodule:: boto.mturk.connection
+ :members:
+ :undoc-members:
+
+boto.mturk.notification
+-----------------------
+
+.. automodule:: boto.mturk.notification
+ :members:
+ :undoc-members:
+
+boto.mturk.price
+----------------
+
+.. automodule:: boto.mturk.price
+ :members:
+ :undoc-members:
+
+boto.mturk.qualification
+------------------------
+
+.. automodule:: boto.mturk.qualification
+ :members:
+ :undoc-members:
+
+boto.mturk.question
+-------------------
+
+.. automodule:: boto.mturk.question
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/pyami.rst b/docs/source/ref/pyami.rst
new file mode 100644
index 0000000..e573b34
--- /dev/null
+++ b/docs/source/ref/pyami.rst
@@ -0,0 +1,103 @@
+.. ref-pyami
+
+=====
+pyami
+=====
+
+boto.pyami
+--------------
+
+.. automodule:: boto.pyami
+ :members:
+ :undoc-members:
+
+boto.pyami.bootstrap
+--------------------
+
+.. automodule:: boto.pyami.bootstrap
+ :members:
+ :undoc-members:
+
+boto.pyami.config
+-----------------
+
+.. automodule:: boto.pyami.config
+ :members:
+ :undoc-members:
+
+boto.pyami.copybot
+------------------
+
+.. automodule:: boto.pyami.copybot
+ :members:
+ :undoc-members:
+
+boto.pyami.installers
+---------------------
+
+.. automodule:: boto.pyami.installers
+ :members:
+ :undoc-members:
+
+boto.pyami.installers.ubuntu
+----------------------------
+
+.. automodule:: boto.pyami.installers.ubuntu
+ :members:
+ :undoc-members:
+
+boto.pyami.installers.ubuntu.apache
+-----------------------------------
+
+.. automodule:: boto.pyami.installers.ubuntu.apache
+ :members:
+ :undoc-members:
+
+boto.pyami.installers.ubuntu.ebs
+--------------------------------
+
+.. automodule:: boto.pyami.installers.ubuntu.ebs
+ :members:
+ :undoc-members:
+
+boto.pyami.installers.ubuntu.installer
+--------------------------------------
+
+.. automodule:: boto.pyami.installers.ubuntu.installer
+ :members:
+ :undoc-members:
+
+boto.pyami.installers.ubuntu.mysql
+----------------------------------
+
+.. automodule:: boto.pyami.installers.ubuntu.mysql
+ :members:
+ :undoc-members:
+
+boto.pyami.installers.ubuntu.trac
+---------------------------------
+
+.. automodule:: boto.pyami.installers.ubuntu.trac
+ :members:
+ :undoc-members:
+
+boto.pyami.launch_ami
+---------------------
+
+.. automodule:: boto.pyami.launch_ami
+ :members:
+ :undoc-members:
+
+boto.pyami.scriptbase
+---------------------
+
+.. automodule:: boto.pyami.scriptbase
+ :members:
+ :undoc-members:
+
+boto.pyami.startup
+------------------
+
+.. automodule:: boto.pyami.startup
+ :members:
+ :undoc-members:
\ No newline at end of file
diff --git a/docs/source/ref/rds.rst b/docs/source/ref/rds.rst
new file mode 100644
index 0000000..7f02d33
--- /dev/null
+++ b/docs/source/ref/rds.rst
@@ -0,0 +1,47 @@
+.. ref-rds
+
+===
+RDS
+===
+
+boto.rds
+--------
+
+.. automodule:: boto.rds
+ :members:
+ :undoc-members:
+
+boto.rds.dbinstance
+-------------------
+
+.. automodule:: boto.rds.dbinstance
+ :members:
+ :undoc-members:
+
+boto.rds.dbsecuritygroup
+------------------------
+
+.. automodule:: boto.rds.dbsecuritygroup
+ :members:
+ :undoc-members:
+
+boto.rds.dbsnapshot
+-------------------
+
+.. automodule:: boto.rds.dbsnapshot
+ :members:
+ :undoc-members:
+
+boto.rds.event
+--------------
+
+.. automodule:: boto.rds.event
+ :members:
+ :undoc-members:
+
+boto.rds.parametergroup
+-----------------------
+
+.. automodule:: boto.rds.parametergroup
+ :members:
+ :undoc-members:
\ No newline at end of file
diff --git a/docs/source/ref/route53.rst b/docs/source/ref/route53.rst
new file mode 100644
index 0000000..3100801
--- /dev/null
+++ b/docs/source/ref/route53.rst
@@ -0,0 +1,27 @@
+.. ref-route53
+
+=======
+route53
+=======
+
+
+boto.route53
+------------
+
+.. automodule:: boto.route53
+ :members:
+ :undoc-members:
+
+boto.route53.hostedzone
+----------------------------
+
+.. automodule:: boto.route53.hostedzone
+ :members:
+ :undoc-members:
+
+boto.route53.exception
+-------------------------
+
+.. automodule:: boto.route53.exception
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/s3.rst b/docs/source/ref/s3.rst
new file mode 100644
index 0000000..86b411a
--- /dev/null
+++ b/docs/source/ref/s3.rst
@@ -0,0 +1,76 @@
+.. ref-s3:
+
+===
+S3
+===
+
+boto.s3.acl
+-----------
+
+.. automodule:: boto.s3.acl
+ :members:
+ :undoc-members:
+
+boto.s3.bucket
+--------------
+
+.. automodule:: boto.s3.bucket
+ :members:
+ :undoc-members:
+
+boto.s3.bucketlistresultset
+---------------------------
+
+.. automodule:: boto.s3.bucketlistresultset
+ :members:
+ :undoc-members:
+
+boto.s3.connection
+------------------
+
+.. automodule:: boto.s3.connection
+ :members:
+ :undoc-members:
+
+boto.s3.key
+-----------
+
+.. automodule:: boto.s3.key
+ :members:
+ :undoc-members:
+
+boto.s3.prefix
+--------------
+
+.. automodule:: boto.s3.prefix
+ :members:
+ :undoc-members:
+
+boto.s3.user
+------------
+
+.. automodule:: boto.s3.user
+ :members:
+ :undoc-members:
+
+boto.s3.multipart
+-----------------
+
+.. automodule:: boto.s3.multipart
+ :members:
+ :undoc-members:
+
+boto.s3.resumable_download_handler
+----------------------------------
+
+.. automodule:: boto.s3.resumable_download_handler
+ :members:
+ :undoc-members:
+
+boto.s3.deletemarker
+--------------------
+
+.. automodule:: boto.s3.deletemarker
+ :members:
+ :undoc-members:
+
diff --git a/docs/source/ref/sdb.rst b/docs/source/ref/sdb.rst
new file mode 100644
index 0000000..8b96d00
--- /dev/null
+++ b/docs/source/ref/sdb.rst
@@ -0,0 +1,144 @@
+.. ref-sdb
+
+===
+sdb
+===
+
+boto.sdb
+--------
+
+.. automodule:: boto.sdb
+ :members:
+ :undoc-members:
+
+boto.sdb.connection
+-------------------
+
+.. automodule:: boto.sdb.connection
+ :members:
+ :undoc-members:
+
+boto.sdb.db
+-----------
+
+.. automodule:: boto.sdb.db
+ :members:
+ :undoc-members:
+
+boto.sdb.db.blob
+----------------
+
+.. automodule:: boto.sdb.db.blob
+ :members:
+ :undoc-members:
+
+boto.sdb.db.key
+---------------
+
+.. automodule:: boto.sdb.db.key
+ :members:
+ :undoc-members:
+
+boto.sdb.db.manager
+-------------------
+
+.. automodule:: boto.sdb.db.manager
+ :members:
+ :undoc-members:
+
+boto.sdb.db.manager.pgmanager
+-----------------------------
+
+.. note::
+
+ This module requires psycopg2__ to be installed in the Python path.
+
+ __ http://initd.org/
+
+.. automodule:: boto.sdb.db.manager.pgmanager
+ :members:
+ :undoc-members:
+
+boto.sdb.db.manager.sdbmanager
+------------------------------
+
+.. automodule:: boto.sdb.db.manager.sdbmanager
+ :members:
+ :undoc-members:
+
+boto.sdb.db.manager.xmlmanager
+------------------------------
+
+.. automodule:: boto.sdb.db.manager.xmlmanager
+ :members:
+ :undoc-members:
+
+boto.sdb.db.model
+-----------------
+
+.. automodule:: boto.sdb.db.model
+ :members:
+ :undoc-members:
+
+boto.sdb.db.property
+--------------------
+
+.. automodule:: boto.sdb.db.property
+ :members:
+ :undoc-members:
+
+boto.sdb.db.query
+-----------------
+
+.. automodule:: boto.sdb.db.query
+ :members:
+ :undoc-members:
+
+boto.sdb.domain
+---------------
+
+.. automodule:: boto.sdb.domain
+ :members:
+ :undoc-members:
+
+boto.sdb.item
+-------------
+
+.. automodule:: boto.sdb.item
+ :members:
+ :undoc-members:
+
+boto.sdb.persist
+----------------
+
+.. automodule:: boto.sdb.persist
+ :members:
+ :undoc-members:
+
+boto.sdb.persist.checker
+------------------------
+
+.. automodule:: boto.sdb.persist.checker
+ :members:
+ :undoc-members:
+
+boto.sdb.persist.object
+-----------------------
+
+.. automodule:: boto.sdb.persist.object
+ :members:
+ :undoc-members:
+
+boto.sdb.persist.property
+-------------------------
+
+.. automodule:: boto.sdb.persist.property
+ :members:
+ :undoc-members:
+
+boto.sdb.queryresultset
+-----------------------
+
+.. automodule:: boto.sdb.queryresultset
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/services.rst b/docs/source/ref/services.rst
new file mode 100644
index 0000000..aa73dcc
--- /dev/null
+++ b/docs/source/ref/services.rst
@@ -0,0 +1,61 @@
+.. ref-services
+
+========
+services
+========
+
+boto.services
+-------------
+
+.. automodule:: boto.services
+ :members:
+ :undoc-members:
+
+boto.services.bs
+----------------
+
+.. automodule:: boto.services.bs
+ :members:
+ :undoc-members:
+
+boto.services.message
+---------------------
+
+.. automodule:: boto.services.message
+ :members:
+ :undoc-members:
+
+boto.services.result
+--------------------
+
+.. automodule:: boto.services.result
+ :members:
+ :undoc-members:
+
+boto.services.service
+---------------------
+
+.. automodule:: boto.services.service
+ :members:
+ :undoc-members:
+
+boto.services.servicedef
+------------------------
+
+.. automodule:: boto.services.servicedef
+ :members:
+ :undoc-members:
+
+boto.services.sonofmmm
+----------------------
+
+.. automodule:: boto.services.sonofmmm
+ :members:
+ :undoc-members:
+
+boto.services.submit
+--------------------
+
+.. automodule:: boto.services.submit
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/sns.rst b/docs/source/ref/sns.rst
new file mode 100644
index 0000000..6f840f8
--- /dev/null
+++ b/docs/source/ref/sns.rst
@@ -0,0 +1,17 @@
+.. ref-sns
+
+===
+SNS
+===
+
+boto.sns
+--------
+
+.. automodule:: boto.sns
+ :members:
+ :undoc-members:
+
+.. autoclass:: boto.sns.SNSConnection
+ :members:
+ :undoc-members:
+
diff --git a/docs/source/ref/sqs.rst b/docs/source/ref/sqs.rst
new file mode 100644
index 0000000..86aa2b4
--- /dev/null
+++ b/docs/source/ref/sqs.rst
@@ -0,0 +1,54 @@
+.. ref-sqs
+
+====
+SQS
+====
+
+boto.sqs
+--------
+
+.. automodule:: boto.sqs
+ :members:
+ :undoc-members:
+
+boto.sqs.attributes
+-------------------
+
+.. automodule:: boto.sqs.attributes
+ :members:
+ :undoc-members:
+
+boto.sqs.connection
+-------------------
+
+.. automodule:: boto.sqs.connection
+ :members:
+ :undoc-members:
+
+boto.sqs.jsonmessage
+--------------------
+
+.. automodule:: boto.sqs.jsonmessage
+ :members:
+ :undoc-members:
+
+boto.sqs.message
+----------------
+
+.. automodule:: boto.sqs.message
+ :members:
+ :undoc-members:
+
+boto.sqs.queue
+--------------
+
+.. automodule:: boto.sqs.queue
+ :members:
+ :undoc-members:
+
+boto.sqs.regioninfo
+-------------------
+
+.. automodule:: boto.sqs.regioninfo
+ :members:
+ :undoc-members:
diff --git a/docs/source/ref/vpc.rst b/docs/source/ref/vpc.rst
new file mode 100644
index 0000000..dfa4c91
--- /dev/null
+++ b/docs/source/ref/vpc.rst
@@ -0,0 +1,54 @@
+.. _ref-vpc:
+
+====
+VPC
+====
+
+boto.vpc
+--------
+
+.. automodule:: boto.vpc
+ :members:
+ :undoc-members:
+
+boto.vpc.customergateway
+------------------------
+
+.. automodule:: boto.vpc.customergateway
+ :members:
+ :undoc-members:
+
+boto.vpc.dhcpoptions
+--------------------
+
+.. automodule:: boto.vpc.dhcpoptions
+ :members:
+ :undoc-members:
+
+boto.vpc.subnet
+---------------
+
+.. automodule:: boto.vpc.subnet
+ :members:
+ :undoc-members:
+
+boto.vpc.vpc
+------------
+
+.. automodule:: boto.vpc.vpc
+ :members:
+ :undoc-members:
+
+boto.vpc.vpnconnection
+----------------------
+
+.. automodule:: boto.vpc.vpnconnection
+ :members:
+ :undoc-members:
+
+boto.vpc.vpngateway
+-------------------
+
+.. automodule:: boto.vpc.vpngateway
+ :members:
+ :undoc-members:
diff --git a/docs/source/s3_tut.rst b/docs/source/s3_tut.rst
new file mode 100644
index 0000000..d1bdbae
--- /dev/null
+++ b/docs/source/s3_tut.rst
@@ -0,0 +1,245 @@
+.. _s3_tut:
+
+======================================
+An Introduction to boto's S3 interface
+======================================
+
+This tutorial focuses on the boto interface to the Simple Storage Service
+from Amazon Web Services. This tutorial assumes that you have already
+downloaded and installed boto.
+
+Creating a Connection
+---------------------
+The first step in accessing S3 is to create a connection to the service.
+There are two ways to do this in boto. The first is:
+
+>>> from boto.s3.connection import S3Connection
+>>> conn = S3Connection('<aws access key>', '<aws secret key>')
+
+At this point the variable conn will point to an S3Connection object. In
+this example, the AWS access key and AWS secret key are passed in to the
+method explicitely. Alternatively, you can set the environment variables:
+
+AWS_ACCESS_KEY_ID - Your AWS Access Key ID
+AWS_SECRET_ACCESS_KEY - Your AWS Secret Access Key
+
+and then call the constructor without any arguments, like this:
+
+>>> conn = S3Connection()
+
+There is also a shortcut function in the boto package, called connect_s3
+that may provide a slightly easier means of creating a connection:
+
+>>> import boto
+>>> conn = boto.connect_s3()
+
+In either case, conn will point to an S3Connection object which we will
+use throughout the remainder of this tutorial.
+
+Creating a Bucket
+-----------------
+
+Once you have a connection established with S3, you will probably want to
+create a bucket. A bucket is a container used to store key/value pairs
+in S3. A bucket can hold un unlimited about of data so you could potentially
+have just one bucket in S3 for all of your information. Or, you could create
+separate buckets for different types of data. You can figure all of that out
+later, first let's just create a bucket. That can be accomplished like this:
+
+>>> bucket = conn.create_bucket('mybucket')
+Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+ File "boto/connection.py", line 285, in create_bucket
+ raise S3CreateError(response.status, response.reason)
+boto.exception.S3CreateError: S3Error[409]: Conflict
+
+Whoa. What happended there? Well, the thing you have to know about
+buckets is that they are kind of like domain names. It's one flat name
+space that everyone who uses S3 shares. So, someone has already create
+a bucket called "mybucket" in S3 and that means no one else can grab that
+bucket name. So, you have to come up with a name that hasn't been taken yet.
+For example, something that uses a unique string as a prefix. Your
+AWS_ACCESS_KEY (NOT YOUR SECRET KEY!) could work but I'll leave it to
+your imagination to come up with something. I'll just assume that you
+found an acceptable name.
+
+The create_bucket method will create the requested bucket if it does not
+exist or will return the existing bucket if it does exist.
+
+Creating a Bucket In Another Location
+-------------------------------------
+
+The example above assumes that you want to create a bucket in the
+standard US region. However, it is possible to create buckets in
+other locations. To do so, first import the Location object from the
+boto.s3.connection module, like this:
+
+>>> from boto.s3.connection import Location
+>>> dir(Location)
+['DEFAULT', 'EU', 'USWest', 'APSoutheast', '__doc__', '__module__']
+>>>
+
+As you can see, the Location object defines three possible locations;
+DEFAULT, EU, USWest, and APSoutheast. By default, the location is the
+empty string which is interpreted as the US Classic Region, the
+original S3 region. However, by specifying another location at the
+time the bucket is created, you can instruct S3 to create the bucket
+in that location. For example:
+
+>>> conn.create_bucket('mybucket', location=Location.EU)
+
+will create the bucket in the EU region (assuming the name is available).
+
+Storing Data
+----------------
+
+Once you have a bucket, presumably you will want to store some data
+in it. S3 doesn't care what kind of information you store in your objects
+or what format you use to store it. All you need is a key that is unique
+within your bucket.
+
+The Key object is used in boto to keep track of data stored in S3. To store
+new data in S3, start by creating a new Key object:
+
+>>> from boto.s3.key import Key
+>>> k = Key(bucket)
+>>> k.key = 'foobar'
+>>> k.set_contents_from_string('This is a test of S3')
+
+The net effect of these statements is to create a new object in S3 with a
+key of "foobar" and a value of "This is a test of S3". To validate that
+this worked, quit out of the interpreter and start it up again. Then:
+
+>>> import boto
+>>> c = boto.connect_s3()
+>>> b = c.create_bucket('mybucket') # substitute your bucket name here
+>>> from boto.s3.key import Key
+>>> k = Key(b)
+>>> k.key = 'foobar'
+>>> k.get_contents_as_string()
+'This is a test of S3'
+
+So, we can definitely store and retrieve strings. A more interesting
+example may be to store the contents of a local file in S3 and then retrieve
+the contents to another local file.
+
+>>> k = Key(b)
+>>> k.key = 'myfile'
+>>> k.set_contents_from_filename('foo.jpg')
+>>> k.get_contents_to_filename('bar.jpg')
+
+There are a couple of things to note about this. When you send data to
+S3 from a file or filename, boto will attempt to determine the correct
+mime type for that file and send it as a Content-Type header. The boto
+package uses the standard mimetypes package in Python to do the mime type
+guessing. The other thing to note is that boto does stream the content
+to and from S3 so you should be able to send and receive large files without
+any problem.
+
+Listing All Available Buckets
+-----------------------------
+In addition to accessing specific buckets via the create_bucket method
+you can also get a list of all available buckets that you have created.
+
+>>> rs = conn.get_all_buckets()
+
+This returns a ResultSet object (see the SQS Tutorial for more info on
+ResultSet objects). The ResultSet can be used as a sequence or list type
+object to retrieve Bucket objects.
+
+>>> len(rs)
+11
+>>> for b in rs:
+... print b.name
+...
+<listing of available buckets>
+>>> b = rs[0]
+
+Setting / Getting the Access Control List for Buckets and Keys
+--------------------------------------------------------------
+The S3 service provides the ability to control access to buckets and keys
+within s3 via the Access Control List (ACL) associated with each object in
+S3. There are two ways to set the ACL for an object:
+
+1. Create a custom ACL that grants specific rights to specific users. At the
+ moment, the users that are specified within grants have to be registered
+ users of Amazon Web Services so this isn't as useful or as general as it
+ could be.
+
+2. Use a "canned" access control policy. There are four canned policies
+ defined:
+ a. private: Owner gets FULL_CONTROL. No one else has any access rights.
+ b. public-read: Owners gets FULL_CONTROL and the anonymous principal is granted READ access.
+ c. public-read-write: Owner gets FULL_CONTROL and the anonymous principal is granted READ and WRITE access.
+ d. authenticated-read: Owner gets FULL_CONTROL and any principal authenticated as a registered Amazon S3 user is granted READ access.
+
+To set a canned ACL for a bucket, use the set_acl method of the Bucket object.
+The argument passed to this method must be one of the four permissable
+canned policies named in the list CannedACLStrings contained in acl.py.
+For example, to make a bucket readable by anyone:
+
+>>> b.set_acl('public-read')
+
+You can also set the ACL for Key objects, either by passing an additional
+argument to the above method:
+
+>>> b.set_acl('public-read', 'foobar')
+
+where 'foobar' is the key of some object within the bucket b or you can
+call the set_acl method of the Key object:
+
+>>> k.set_acl('public-read')
+
+You can also retrieve the current ACL for a Bucket or Key object using the
+get_acl object. This method parses the AccessControlPolicy response sent
+by S3 and creates a set of Python objects that represent the ACL.
+
+>>> acp = b.get_acl()
+>>> acp
+<boto.acl.Policy instance at 0x2e6940>
+>>> acp.acl
+<boto.acl.ACL instance at 0x2e69e0>
+>>> acp.acl.grants
+[<boto.acl.Grant instance at 0x2e6a08>]
+>>> for grant in acp.acl.grants:
+... print grant.permission, grant.display_name, grant.email_address, grant.id
+...
+FULL_CONTROL <boto.user.User instance at 0x2e6a30>
+
+The Python objects representing the ACL can be found in the acl.py module
+of boto.
+
+Both the Bucket object and the Key object also provide shortcut
+methods to simplify the process of granting individuals specific
+access. For example, if you want to grant an individual user READ
+access to a particular object in S3 you could do the following:
+
+>>> key = b.lookup('mykeytoshare')
+>>> key.add_email_grant('READ', 'foo@bar.com')
+
+The email address provided should be the one associated with the users
+AWS account. There is a similar method called add_user_grant that accepts the
+canonical id of the user rather than the email address.
+
+Setting/Getting Metadata Values on Key Objects
+----------------------------------------------
+S3 allows arbitrary user metadata to be assigned to objects within a bucket.
+To take advantage of this S3 feature, you should use the set_metadata and
+get_metadata methods of the Key object to set and retrieve metadata associated
+with an S3 object. For example:
+
+>>> k = Key(b)
+>>> k.key = 'has_metadata'
+>>> k.set_metadata('meta1', 'This is the first metadata value')
+>>> k.set_metadata('meta2', 'This is the second metadata value')
+>>> k.set_contents_from_filename('foo.txt')
+
+This code associates two metadata key/value pairs with the Key k. To retrieve
+those values later:
+
+>>> k = b.get_key('has_metadata)
+>>> k.get_metadata('meta1')
+'This is the first metadata value'
+>>> k.get_metadata('meta2')
+'This is the second metadata value'
+>>>
diff --git a/docs/source/sqs_tut.rst b/docs/source/sqs_tut.rst
new file mode 100644
index 0000000..8c3edc5
--- /dev/null
+++ b/docs/source/sqs_tut.rst
@@ -0,0 +1,231 @@
+.. _sqs_tut:
+
+=======================================
+An Introduction to boto's SQS interface
+=======================================
+
+This tutorial focuses on the boto interface to the Simple Queue Service
+from Amazon Web Services. This tutorial assumes that you have already
+downloaded and installed boto.
+
+Creating a Connection
+---------------------
+The first step in accessing SQS is to create a connection to the service.
+There are two ways to do this in boto. The first is:
+
+>>> from boto.sqs.connection import SQSConnection
+>>> conn = SQSConnection('<aws access key>', '<aws secret key>')
+
+At this point the variable conn will point to an SQSConnection object. In
+this example, the AWS access key and AWS secret key are passed in to the
+method explicitely. Alternatively, you can set the environment variables:
+
+AWS_ACCESS_KEY_ID - Your AWS Access Key ID
+AWS_SECRET_ACCESS_KEY - Your AWS Secret Access Key
+
+and then call the constructor without any arguments, like this:
+
+>>> conn = SQSConnection()
+
+There is also a shortcut function in the boto package, called connect_sqs
+that may provide a slightly easier means of creating a connection:
+
+>>> import boto
+>>> conn = boto.connect_sqs()
+
+In either case, conn will point to an SQSConnection object which we will
+use throughout the remainder of this tutorial.
+
+Creating a Queue
+----------------
+
+Once you have a connection established with SQS, you will probably want to
+create a queue. That can be accomplished like this:
+
+>>> q = conn.create_queue('myqueue')
+
+The create_queue method will create the requested queue if it does not
+exist or will return the existing queue if it does exist. There is an
+optional parameter to create_queue called visibility_timeout. This basically
+controls how long a message will remain invisible to other queue readers
+once it has been read (see SQS documentation for more detailed explanation).
+If this is not explicitly specified the queue will be created with whatever
+default value SQS provides (currently 30 seconds). If you would like to
+specify another value, you could do so like this:
+
+>>> q = conn.create_queue('myqueue', 120)
+
+This would establish a default visibility timeout for this queue of 120
+seconds. As you will see later on, this default value for the queue can
+also be overridden each time a message is read from the queue. If you want
+to check what the default visibility timeout is for a queue:
+
+>>> q.get_timeout()
+30
+>>>
+
+Writing Messages
+----------------
+
+Once you have a queue, presumably you will want to write some messages
+to it. SQS doesn't care what kind of information you store in your messages
+or what format you use to store it. As long as the amount of data per
+message is less than or equal to 256Kb, it's happy.
+
+However, you may have a lot of specific requirements around the format of
+that data. For example, you may want to store one big string or you might
+want to store something that looks more like RFC822 messages or you might want
+to store a binary payload such as pickled Python objects.
+
+The way boto deals with this is to define a simple Message object that
+treats the message data as one big string which you can set and get. If that
+Message object meets your needs, you're good to go. However, if you need to
+incorporate different behavior in your message or handle different types of
+data you can create your own Message class. You just need to register that
+class with the queue so that it knows that when you read a message from the
+queue that it should create one of your message objects rather than the
+default boto Message object. To register your message class, you would:
+
+>>> q.set_message_class(MyMessage)
+
+where MyMessage is the class definition for your message class. Your
+message class should subclass the boto Message because there is a small
+bit of Python magic happening in the __setattr__ method of the boto Message
+class.
+
+For this tutorial, let's just assume that we are using the boto Message
+class. So, first we need to create a Message object:
+
+>>> from boto.sqs.message import Message
+>>> m = Message()
+>>> m.set_body('This is my first message.')
+>>> status = q.write(m)
+
+The write method returns a True if everything went well. If the write
+didn't succeed it will either return a False (meaning SQS simply chose
+not to write the message for some reason) or an exception if there was
+some sort of problem with the request.
+
+Reading Messages
+----------------
+
+So, now we have a message in our queue. How would we go about reading it?
+Here's one way:
+
+>>> rs = q.get_messages()
+>>> len(rs)
+1
+>>> m = rs[0]
+>>> m.get_body()
+u'This is my first message'
+
+The get_messages method also returns a ResultSet object as described
+above. In addition to the special attributes that we already talked
+about the ResultSet object also contains any results returned by the
+request. To get at the results you can treat the ResultSet as a
+sequence object (e.g. a list). We can check the length (how many results)
+and access particular items within the list using the slice notation
+familiar to Python programmers.
+
+At this point, we have read the message from the queue and SQS will make
+sure that this message remains invisible to other readers of the queue
+until the visibility timeout period for the queue expires. If I delete
+the message before the timeout period expires then no one will ever see
+the message again. However, if I don't delete it (maybe because I crashed
+or failed in some way, for example) it will magically reappear in my queue
+for someone else to read. If you aren't happy with the default visibility
+timeout defined for the queue, you can override it when you read a message:
+
+>>> q.get_messages(visibility_timeout=60)
+
+This means that regardless of what the default visibility timeout is for
+the queue, this message will remain invisible to other readers for 60
+seconds.
+
+The get_messages method can also return more than a single message. By
+passing a num_messages parameter (defaults to 1) you can control the maximum
+number of messages that will be returned by the method. To show this
+feature off, first let's load up a few more messages.
+
+>>> for i in range(1, 11):
+... m = Message()
+... m.set_body('This is message %d' % i)
+... q.write(m)
+...
+>>> rs = q.get_messages(10)
+>>> len(rs)
+10
+
+Don't be alarmed if the length of the result set returned by the get_messages
+call is less than 10. Sometimes it takes some time for new messages to become
+visible in the queue. Give it a minute or two and they will all show up.
+
+If you want a slightly simpler way to read messages from a queue, you
+can use the read method. It will either return the message read or
+it will return None if no messages were available. You can also pass
+a visibility_timeout parameter to read, if you desire:
+
+>>> m = q.read(60)
+>>> m.get_body()
+u'This is my first message'
+
+Deleting Messages and Queues
+----------------------------
+
+Note that the first message we put in the queue is still there, even though
+we have read it a number of times. That's because we never deleted it. To
+remove a message from a queue:
+
+>>> q.delete_message(m)
+[]
+
+If I want to delete the entire queue, I would use:
+
+>>> conn.delete_queue(q)
+
+However, this won't succeed unless the queue is empty.
+
+Listing All Available Queues
+----------------------------
+In addition to accessing specific queues via the create_queue method
+you can also get a list of all available queues that you have created.
+
+>>> rs = conn.get_all_queues()
+
+This returns a ResultSet object, as described above. The ResultSet
+can be used as a sequence or list type object to retrieve Queue objects.
+
+>>> len(rs)
+11
+>>> for q in rs:
+... print q.id
+...
+<listing of available queues>
+>>> q = rs[0]
+
+Other Stuff
+-----------
+
+That covers the basic operations of creating queues, writing messages,
+reading messages, deleting messages, and deleting queues. There are a
+few utility methods in boto that might be useful as well. For example,
+to count the number of messages in a queue:
+
+>>> q.count()
+10
+
+This can be handy but is command as well as the other two utility methods
+I'll describe in a minute are inefficient and should be used with caution
+on queues with lots of messages (e.g. many hundreds or more). Similarly,
+you can clear (delete) all messages in a queue with:
+
+>>> q.clear()
+
+Be REAL careful with that one! Finally, if you want to dump all of the
+messages in a queue to a local file:
+
+>>> q.dump('messages.txt', sep='\n------------------\n')
+
+This will read all of the messages in the queue and write the bodies of
+each of the messages to the file messages.txt. The option sep argument
+is a separator that will be printed between each message body in the file.
diff --git a/docs/source/vpc_tut.rst b/docs/source/vpc_tut.rst
new file mode 100644
index 0000000..0040866
--- /dev/null
+++ b/docs/source/vpc_tut.rst
@@ -0,0 +1,88 @@
+.. _vpc_tut:
+
+=======================================
+An Introduction to boto's VPC interface
+=======================================
+
+This tutorial is based on the examples in the Amazon Virtual Private
+Cloud Getting Started Guide (http://docs.amazonwebservices.com/AmazonVPC/latest/GettingStartedGuide/).
+In each example, it tries to show the boto request that correspond to
+the AWS command line tools.
+
+Creating a VPC connection
+-------------------------
+First, we need to create a new VPC connection:
+
+>>> from boto.vpc import VPCConnection
+>>> c = VPCConnection()
+
+To create a VPC
+---------------
+Now that we have a VPC connection, we can create our first VPC.
+
+>>> vpc = c.create_vpc('10.0.0.0/24')
+>>> vpc
+VPC:vpc-6b1fe402
+>>> vpc.id
+u'vpc-6b1fe402'
+>>> vpc.state
+u'pending'
+>>> vpc.cidr_block
+u'10.0.0.0/24'
+>>> vpc.dhcp_options_id
+u'default'
+>>>
+
+To create a subnet
+------------------
+The next step is to create a subnet to associate with your VPC.
+
+>>> subnet = c.create_subnet(vpc.id, '10.0.0.0/25')
+>>> subnet.id
+u'subnet-6a1fe403'
+>>> subnet.state
+u'pending'
+>>> subnet.cidr_block
+u'10.0.0.0/25'
+>>> subnet.available_ip_address_count
+123
+>>> subnet.availability_zone
+u'us-east-1b'
+>>>
+
+To create a customer gateway
+----------------------------
+Next, we create a customer gateway.
+
+>>> cg = c.create_customer_gateway('ipsec.1', '12.1.2.3', 65534)
+>>> cg.id
+u'cgw-b6a247df'
+>>> cg.type
+u'ipsec.1'
+>>> cg.state
+u'available'
+>>> cg.ip_address
+u'12.1.2.3'
+>>> cg.bgp_asn
+u'65534'
+>>>
+
+To create a VPN gateway
+-----------------------
+
+>>> vg = c.create_vpn_gateway('ipsec.1')
+>>> vg.id
+u'vgw-44ad482d'
+>>> vg.type
+u'ipsec.1'
+>>> vg.state
+u'pending'
+>>> vg.availability_zone
+u'us-east-1b'
+>>>
+
+Attaching a VPN Gateway to a VPC
+--------------------------------
+
+>>> vg.attach(vpc.id)
+>>>
diff --git a/pylintrc b/pylintrc
new file mode 100644
index 0000000..44ed077
--- /dev/null
+++ b/pylintrc
@@ -0,0 +1,305 @@
+# lint Python modules using external checkers.
+#
+# This is the main checker controlling the other ones and the reports
+# generation. It is itself both a raw checker and an astng checker in order
+# to:
+# * handle message activation / deactivation at the module level
+# * handle some basic but necessary stats'data (number of classes, methods...)
+#
+[MASTER]
+
+
+# Specify a configuration file.
+#rcfile=
+
+# Profiled execution.
+profile=no
+
+# Add <file or directory> to the black list. It should be a base name, not a
+# path. You may set this option multiple times.
+ignore=.svn
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# Set the cache size for astng objects.
+cache-size=500
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+
+[MESSAGES CONTROL]
+
+# Enable only checker(s) with the given id(s). This option conflict with the
+# disable-checker option
+#enable-checker=
+
+# Enable all checker(s) except those with the given id(s). This option conflict
+# with the disable-checker option
+#disable-checker=
+
+# Enable all messages in the listed categories.
+#enable-msg-cat=
+
+# Disable all messages in the listed categories.
+#disable-msg-cat=
+
+# Enable the message(s) with the given id(s).
+#enable-msg=
+
+# Disable the message(s) with the given id(s).
+# disable-msg=C0323,W0142,C0301,C0103,C0111,E0213,C0302,C0203,W0703,R0201
+disable-msg=C0111,C0103,W0703,W0702
+
+[REPORTS]
+
+# set the output format. Available formats are text, parseable, colorized and
+# html
+output-format=colorized
+
+# Include message's id in output
+include-ids=yes
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells wether to display a full report or only the messages
+reports=yes
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note).You have access to the variables errors warning, statement which
+# respectivly contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (R0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Add a comment according to your evaluation note. This is used by the global
+# evaluation report (R0004).
+comment=no
+
+# Enable the report(s) with the given id(s).
+#enable-report=
+
+# Disable the report(s) with the given id(s).
+#disable-report=
+
+# checks for
+# * unused variables / imports
+# * undefined variables
+# * redefinition of variable from builtins or from an outer scope
+# * use of variable before assigment
+#
+[VARIABLES]
+
+# Tells wether we should check for unused import in __init__ files.
+init-import=yes
+
+# A regular expression matching names used for dummy variables (i.e. not used).
+dummy-variables-rgx=_|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+
+# try to find bugs in the code using type inference
+#
+[TYPECHECK]
+
+# Tells wether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# When zope mode is activated, consider the acquired-members option to ignore
+# access to some undefined attributes.
+zope=no
+
+# List of members which are usually get through zope's acquisition mecanism and
+# so shouldn't trigger E0201 when accessed (need zope=yes to be considered).
+acquired-members=REQUEST,acl_users,aq_parent
+
+
+# checks for :
+# * doc strings
+# * modules / classes / functions / methods / arguments / variables name
+# * number of arguments, local variables, branches, returns and statements in
+# functions, methods
+# * required module attributes
+# * dangerous default values as arguments
+# * redefinition of function / method / class
+# * uses of the global statement
+#
+[BASIC]
+
+# Required attributes for module, separated by a comma
+required-attributes=
+
+# Regular expression which should only match functions or classes name which do
+# not require a docstring
+no-docstring-rgx=__.*__
+
+# Regular expression which should only match correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression which should only match correct module level names
+const-rgx=(([A-Z_][A-Z1-9_]*)|(__.*__))$
+
+# Regular expression which should only match correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression which should only match correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct instance attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match correct list comprehension /
+# generator expression variable names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=apply,input
+
+
+# checks for sign of poor/misdesign:
+# * number of methods, attributes, local variables...
+# * size, complexity of functions, methods
+#
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=12
+
+# Maximum number of locals for function / method body
+max-locals=30
+
+# Maximum number of return / yield for function / method body
+max-returns=12
+
+# Maximum number of branch for function / method body
+max-branchs=30
+
+# Maximum number of statements in function / method body
+max-statements=60
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=20
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=0
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+
+# checks for
+# * external modules dependencies
+# * relative / wildcard imports
+# * cyclic imports
+# * uses of deprecated modules
+#
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,string,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report R0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report R0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report R0402 must
+# not be disabled)
+int-import-graph=
+
+
+# checks for :
+# * methods without self as first argument
+# * overridden methods signature
+# * access only to existant members via self
+# * attributes not defined in the __init__ method
+# * supported interfaces implementation
+# * unreachable code
+#
+[CLASSES]
+
+# List of interface methods to ignore, separated by a comma. This is used for
+# instance to not check methods defines in Zope's Interface base class.
+# ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+
+# checks for similarities and duplicated code. This computation may be
+# memory / CPU intensive, so you should disable it if you experiments some
+# problems.
+#
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=5
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+
+# checks for:
+# * warning notes in the code like FIXME, XXX
+# * PEP 263: source code with non ascii character but no encoding declaration
+#
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO,BUG:
+
+
+# checks for :
+# * unauthorized constructions
+# * strict indentation
+# * line length
+# * use of <> instead of !=
+#
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=80
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+
+[MESSAGES CONTROL]
+disable-msg=C0301,C0111,C0103,R0201,W0702,C0324
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..36af722
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,70 @@
+#!/usr/bin/python
+
+# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/
+# Copyright (c) 2010, Eucalyptus Systems, Inc.
+# All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+try:
+ from setuptools import setup
+except ImportError:
+ from distutils.core import setup
+
+import sys
+
+from boto import Version
+
+install_requires = []
+maj, min, micro, rel, serial = sys.version_info
+if (maj, min) == (2, 4):
+ # boto needs hashlib module which is not in py2.4
+ install_requires.append("hashlib")
+
+setup(name = "boto",
+ version = Version,
+ description = "Amazon Web Services Library",
+ long_description="Python interface to Amazon's Web Services.",
+ author = "Mitch Garnaat",
+ author_email = "mitch@garnaat.com",
+ scripts = ["bin/sdbadmin", "bin/elbadmin", "bin/cfadmin",
+ "bin/s3put", "bin/fetch_file", "bin/launch_instance",
+ "bin/list_instances", "bin/taskadmin", "bin/kill_instance",
+ "bin/bundle_image", "bin/pyami_sendmail", "bin/lss3",
+ "bin/cq", "bin/route53"],
+ install_requires=install_requires,
+ url = "http://code.google.com/p/boto/",
+ packages = [ 'boto', 'boto.sqs', 'boto.s3', 'boto.gs', 'boto.file',
+ 'boto.ec2', 'boto.ec2.cloudwatch', 'boto.ec2.autoscale',
+ 'boto.ec2.elb', 'boto.sdb', 'boto.sdb.persist',
+ 'boto.sdb.db', 'boto.sdb.db.manager', 'boto.mturk',
+ 'boto.pyami', 'boto.mashups', 'boto.contrib', 'boto.manage',
+ 'boto.services', 'boto.tests', 'boto.cloudfront',
+ 'boto.rds', 'boto.vpc', 'boto.fps', 'boto.emr', 'boto.sns',
+ 'boto.ecs', 'boto.iam', 'boto.route53', 'boto.ses'],
+ license = 'MIT',
+ platforms = 'Posix; MacOS X; Windows',
+ classifiers = [ 'Development Status :: 5 - Production/Stable',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: MIT License',
+ 'Operating System :: OS Independent',
+ 'Topic :: Internet',
+ ],
+ )
diff --git a/tests/db/test_lists.py b/tests/db/test_lists.py
new file mode 100644
index 0000000..d9c7639
--- /dev/null
+++ b/tests/db/test_lists.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2010 Chris Moyer http://coredumped.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.sdb.db.property import ListProperty
+from boto.sdb.db.model import Model
+import time
+
+class SimpleListModel(Model):
+ """Test the List Property"""
+ nums = ListProperty(int)
+ strs = ListProperty(str)
+
+class TestLists(object):
+ """Test the List property"""
+
+ def setup_class(cls):
+ """Setup this class"""
+ cls.objs = []
+
+ def teardown_class(cls):
+ """Remove our objects"""
+ for o in cls.objs:
+ try:
+ o.delete()
+ except:
+ pass
+
+ def test_list_order(self):
+ """Testing the order of lists"""
+ t = SimpleListModel()
+ t.nums = [5,4,1,3,2]
+ t.strs = ["B", "C", "A", "D", "Foo"]
+ t.put()
+ self.objs.append(t)
+ time.sleep(3)
+ t = SimpleListModel.get_by_id(t.id)
+ assert(t.nums == [5,4,1,3,2])
+ assert(t.strs == ["B", "C", "A", "D", "Foo"])
+
+ def test_old_compat(self):
+ """Testing to make sure the old method of encoding lists will still return results"""
+ t = SimpleListModel()
+ t.put()
+ self.objs.append(t)
+ time.sleep(3)
+ item = t._get_raw_item()
+ item['strs'] = ["A", "B", "C"]
+ item.save()
+ time.sleep(3)
+ t = SimpleListModel.get_by_id(t.id)
+ i1 = item['strs']
+ i1.sort()
+ i2 = t.strs
+ i2.sort()
+ assert(i1 == i2)
+
+ def test_query_equals(self):
+ """We noticed a slight problem with querying, since the query uses the same encoder,
+ it was asserting that the value was at the same position in the list, not just "in" the list"""
+ t = SimpleListModel()
+ t.strs = ["Bizzle", "Bar"]
+ t.put()
+ self.objs.append(t)
+ time.sleep(3)
+ assert(SimpleListModel.find(strs="Bizzle").count() == 1)
+ assert(SimpleListModel.find(strs="Bar").count() == 1)
+ assert(SimpleListModel.find(strs=["Bar","Bizzle"]).count() == 1)
+
+ def test_query_not_equals(self):
+ """Test a not equal filter"""
+ t = SimpleListModel()
+ t.strs = ["Fizzle"]
+ t.put()
+ self.objs.append(t)
+ time.sleep(3)
+ print SimpleListModel.all().filter("strs !=", "Fizzle").get_query()
+ for tt in SimpleListModel.all().filter("strs !=", "Fizzle"):
+ print tt.strs
+ assert("Fizzle" not in tt.strs)
diff --git a/tests/db/test_query.py b/tests/db/test_query.py
new file mode 100644
index 0000000..047bf87
--- /dev/null
+++ b/tests/db/test_query.py
@@ -0,0 +1,152 @@
+# Copyright (c) 2010 Chris Moyer http://coredumped.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+#
+from boto.sdb.db.property import ListProperty, StringProperty, ReferenceProperty, IntegerProperty
+from boto.sdb.db.model import Model
+import time
+
+class SimpleModel(Model):
+ """Simple Test Model"""
+ name = StringProperty()
+ strs = ListProperty(str)
+ num = IntegerProperty()
+
+class SubModel(SimpleModel):
+ """Simple Subclassed Model"""
+ ref = ReferenceProperty(SimpleModel, collection_name="reverse_ref")
+
+
+class TestQuerying(object):
+ """Test different querying capabilities"""
+
+ def setup_class(cls):
+ """Setup this class"""
+ cls.objs = []
+
+ o = SimpleModel()
+ o.name = "Simple Object"
+ o.strs = ["B", "A", "C", "Foo"]
+ o.num = 1
+ o.put()
+ cls.objs.append(o)
+
+ o2 = SimpleModel()
+ o2.name = "Referenced Object"
+ o2.num = 2
+ o2.put()
+ cls.objs.append(o2)
+
+ o3 = SubModel()
+ o3.name = "Sub Object"
+ o3.num = 3
+ o3.ref = o2
+ o3.put()
+ cls.objs.append(o3)
+
+ time.sleep(3)
+
+
+
+ def teardown_class(cls):
+ """Remove our objects"""
+ for o in cls.objs:
+ try:
+ o.delete()
+ except:
+ pass
+
+ def test_find(self):
+ """Test using the "Find" method"""
+ assert(SimpleModel.find(name="Simple Object").next().id == self.objs[0].id)
+ assert(SimpleModel.find(name="Referenced Object").next().id == self.objs[1].id)
+ assert(SimpleModel.find(name="Sub Object").next().id == self.objs[2].id)
+
+ def test_like_filter(self):
+ """Test a "like" filter"""
+ query = SimpleModel.all()
+ query.filter("name like", "% Object")
+ assert(query.count() == 3)
+
+ query = SimpleModel.all()
+ query.filter("name not like", "% Object")
+ assert(query.count() == 0)
+
+ def test_equals_filter(self):
+ """Test an "=" and "!=" filter"""
+ query = SimpleModel.all()
+ query.filter("name =", "Simple Object")
+ assert(query.count() == 1)
+
+ query = SimpleModel.all()
+ query.filter("name !=", "Simple Object")
+ assert(query.count() == 2)
+
+ def test_or_filter(self):
+ """Test a filter function as an "or" """
+ query = SimpleModel.all()
+ query.filter("name =", ["Simple Object", "Sub Object"])
+ assert(query.count() == 2)
+
+ def test_and_filter(self):
+ """Test Multiple filters which are an "and" """
+ query = SimpleModel.all()
+ query.filter("name like", "% Object")
+ query.filter("name like", "Simple %")
+ assert(query.count() == 1)
+
+ def test_none_filter(self):
+ """Test filtering for a value that's not set"""
+ query = SimpleModel.all()
+ query.filter("ref =", None)
+ assert(query.count() == 2)
+
+ def test_greater_filter(self):
+ """Test filtering Using >, >="""
+ query = SimpleModel.all()
+ query.filter("num >", 1)
+ assert(query.count() == 2)
+
+ query = SimpleModel.all()
+ query.filter("num >=", 1)
+ assert(query.count() == 3)
+
+ def test_less_filter(self):
+ """Test filtering Using <, <="""
+ query = SimpleModel.all()
+ query.filter("num <", 3)
+ assert(query.count() == 2)
+
+ query = SimpleModel.all()
+ query.filter("num <=", 3)
+ assert(query.count() == 3)
+
+ def test_query_on_list(self):
+ """Test querying on a list"""
+ assert(SimpleModel.find(strs="A").next().id == self.objs[0].id)
+ assert(SimpleModel.find(strs="B").next().id == self.objs[0].id)
+ assert(SimpleModel.find(strs="C").next().id == self.objs[0].id)
+
+ def test_like(self):
+ """Test with a "like" expression"""
+ query = SimpleModel.all()
+ query.filter("strs like", "%oo%")
+ print query.get_query()
+ assert(query.count() == 1)
diff --git a/tests/db/test_sequence.py b/tests/db/test_sequence.py
new file mode 100644
index 0000000..35f4b35
--- /dev/null
+++ b/tests/db/test_sequence.py
@@ -0,0 +1,109 @@
+# Copyright (c) 2010 Chris Moyer http://coredumped.org/
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish, dis-
+# tribute, sublicense, and/or sell copies of the Software, and to permit
+# persons to whom the Software is furnished to do so, subject to the fol-
+# lowing conditions:
+#
+# The above copyright notice and this permission notice shall be included
+# in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
+# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+
+class TestDBHandler(object):
+ """Test the DBHandler"""
+
+ def setup_class(cls):
+ """Setup this class"""
+ cls.sequences = []
+
+ def teardown_class(cls):
+ """Remove our sequences"""
+ for s in cls.sequences:
+ try:
+ s.delete()
+ except:
+ pass
+
+ def test_sequence_generator_no_rollover(self):
+ """Test the sequence generator without rollover"""
+ from boto.sdb.db.sequence import SequenceGenerator
+ gen = SequenceGenerator("ABC")
+ assert(gen("") == "A")
+ assert(gen("A") == "B")
+ assert(gen("B") == "C")
+ assert(gen("C") == "AA")
+ assert(gen("AC") == "BA")
+
+ def test_sequence_generator_with_rollover(self):
+ """Test the sequence generator with rollover"""
+ from boto.sdb.db.sequence import SequenceGenerator
+ gen = SequenceGenerator("ABC", rollover=True)
+ assert(gen("") == "A")
+ assert(gen("A") == "B")
+ assert(gen("B") == "C")
+ assert(gen("C") == "A")
+
+ def test_sequence_simple_int(self):
+ """Test a simple counter sequence"""
+ from boto.sdb.db.sequence import Sequence
+ s = Sequence()
+ self.sequences.append(s)
+ assert(s.val == 0)
+ assert(s.next() == 1)
+ assert(s.next() == 2)
+ s2 = Sequence(s.id)
+ assert(s2.val == 2)
+ assert(s.next() == 3)
+ assert(s.val == 3)
+ assert(s2.val == 3)
+
+ def test_sequence_simple_string(self):
+ from boto.sdb.db.sequence import Sequence,increment_string
+ s = Sequence(fnc=increment_string)
+ self.sequences.append(s)
+ assert(s.val == "A")
+ assert(s.next() == "B")
+
+ def test_fib(self):
+ """Test the fibonacci sequence generator"""
+ from boto.sdb.db.sequence import fib
+ # Just check the first few numbers in the sequence
+ lv = 0
+ for v in [1,2,3,5,8,13,21,34,55,89,144]:
+ assert(fib(v,lv) == lv+v)
+ lv = fib(v,lv)
+
+ def test_sequence_fib(self):
+ """Test the fibonacci sequence"""
+ from boto.sdb.db.sequence import Sequence,fib
+ s = Sequence(fnc=fib)
+ s2 = Sequence(s.id)
+ self.sequences.append(s)
+ assert(s.val == 1)
+ # Just check the first few numbers in the sequence
+ for v in [1,2,3,5,8,13,21,34,55,89,144]:
+ assert(s.next() == v)
+ assert(s.val == v)
+ assert(s2.val == v) # it shouldn't matter which reference we use since it's garunteed to be consistent
+
+ def test_sequence_string(self):
+ """Test the String incrementation sequence"""
+ from boto.sdb.db.sequence import Sequence,increment_string
+ s = Sequence(fnc=increment_string)
+ self.sequences.append(s)
+ assert(s.val == "A")
+ assert(s.next() == "B")
+ s.val = "Z"
+ assert(s.val == "Z")
+ assert(s.next() == "AA")