You are viewing a plain text version of this content. The canonical link for it is here.
Posted to notifications@libcloud.apache.org by to...@apache.org on 2016/03/12 22:52:37 UTC
[03/10] libcloud git commit: [LIBCLOUD-802] Add drivers for Aliyun
cloud
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/loadbalancer/drivers/slb.py
----------------------------------------------------------------------
diff --git a/libcloud/loadbalancer/drivers/slb.py b/libcloud/loadbalancer/drivers/slb.py
new file mode 100644
index 0000000..a7eab40
--- /dev/null
+++ b/libcloud/loadbalancer/drivers/slb.py
@@ -0,0 +1,754 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+__all__ = [
+ 'SLBDriver'
+]
+
+import sys
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+from libcloud.common.aliyun import AliyunXmlResponse, SignedAliyunConnection
+from libcloud.common.types import LibcloudError
+from libcloud.loadbalancer.types import State
+from libcloud.loadbalancer.base import Algorithm, Driver, LoadBalancer, Member
+from libcloud.utils.misc import ReprMixin
+from libcloud.utils.py3 import u
+from libcloud.utils.xml import findattr, findtext, findall
+
+
+SLB_API_VERSION = '2014-05-15'
+SLB_API_HOST = 'slb.aliyuncs.com'
+DEFAULT_SIGNATURE_VERSION = '1.0'
+
+
+STATE_MAPPINGS = {
+ 'inactive': State.UNKNOWN,
+ 'active': State.RUNNING,
+ 'locked': State.PENDING
+}
+
+
+RESOURCE_EXTRA_ATTRIBUTES_MAP = {
+ 'balancer': {
+ 'create_timestamp': {
+ 'xpath': 'CreateTimeStamp',
+ 'transform_func': int
+ },
+ 'address_type': {
+ 'xpath': 'AddressType',
+ 'transform_func': u
+ },
+ 'region_id': {
+ 'xpath': 'RegionId',
+ 'transform_func': u
+ },
+ 'region_id_alias': {
+ 'xpath': 'RegionIdAlias',
+ 'transform_func': u
+ },
+ 'create_time': {
+ 'xpath': 'CreateTime',
+ 'transform_func': u
+ },
+ 'master_zone_id': {
+ 'xpath': 'MasterZoneId',
+ 'transform_func': u
+ },
+ 'slave_zone_id': {
+ 'xpath': 'SlaveZoneId',
+ 'transform_func': u
+ },
+ 'network_type': {
+ 'xpath': 'NetworkType',
+ 'transform_func': u
+ }
+ }
+}
+
+
+SLB_SCHEDULER_TO_ALGORITHM = {
+ 'wrr': Algorithm.WEIGHTED_ROUND_ROBIN,
+ 'wlc': Algorithm.WEIGHTED_LEAST_CONNECTIONS
+}
+
+
+ALGORITHM_TO_SLB_SCHEDULER = {
+ Algorithm.WEIGHTED_ROUND_ROBIN: 'wrr',
+ Algorithm.WEIGHTED_LEAST_CONNECTIONS: 'wlc'
+}
+
+
+class SLBConnection(SignedAliyunConnection):
+ version = SLB_API_VERSION
+ host = SLB_API_HOST
+ responseCls = AliyunXmlResponse
+ service_name = 'slb'
+
+
+class SLBLoadBalancerAttribute(object):
+ """
+ This class used to get listeners and backend servers related to a balancer
+ listeners is a ``list`` of ``dict``, each element contains
+ 'ListenerPort' and 'ListenerProtocol' keys.
+ backend_servers is a ``list`` of ``dict``, each element contains
+ 'ServerId' and 'Weight' keys.
+ """
+ def __init__(self, balancer, listeners, backend_servers, extra=None):
+ self.balancer = balancer
+ self.listeners = listeners or []
+ self.backend_servers = backend_servers or []
+ self.extra = extra or {}
+
+ def is_listening(self, port):
+ for listener in self.listeners:
+ if listener.get('ListenerPort') == port:
+ return True
+ return False
+
+ def is_attached(self, member):
+ for server in self.backend_servers:
+ if server.get('Serverid') == member.id:
+ return True
+ return False
+
+ def __repr__(self):
+ return ('<SLBLoadBalancerAttribute id=%s, ports=%s, servers=%s ...>' %
+ (self.balancer.id, self.listeners, self.backend_servers))
+
+
+class SLBLoadBalancerListener(ReprMixin, object):
+ """
+ Base SLB load balancer listener class
+ """
+ _repr_attributes = ['port', 'backend_port', 'scheduler', 'bandwidth']
+ action = None
+ option_keys = []
+
+ def __init__(self, port, backend_port, algorithm, bandwidth, extra=None):
+ self.port = port
+ self.backend_port = backend_port
+ self.scheduler = ALGORITHM_TO_SLB_SCHEDULER.get(algorithm, 'wrr')
+ self.bandwidth = bandwidth
+ self.extra = extra or {}
+
+ @classmethod
+ def create(cls, port, backend_port, algorithm, bandwidth, extra=None):
+ return cls(port, backend_port, algorithm, bandwidth, extra=extra)
+
+ def get_create_params(self):
+ params = self.get_required_params()
+ options = self.get_optional_params()
+ options.update(params)
+ return options
+
+ def get_required_params(self):
+ params = {'Action': self.action,
+ 'ListenerPort': self.port,
+ 'BackendServerPort': self.backend_port,
+ 'Scheduler': self.scheduler,
+ 'Bandwidth': self.bandwidth}
+ return params
+
+ def get_optional_params(self):
+ options = {}
+ for option in self.option_keys:
+ if self.extra and option in self.extra:
+ options[option] = self.extra[option]
+ return options
+
+
+class SLBLoadBalancerHttpListener(SLBLoadBalancerListener):
+ """
+ This class represents a rule to route http request to the backends.
+ """
+ action = 'CreateLoadBalancerHTTPListener'
+ option_keys = ['XForwardedFor', 'StickySessionType', 'CookieTimeout',
+ 'Cookie', 'HealthCheckDomain', 'HealthCheckURI',
+ 'HealthCheckConnectPort', 'HealthyThreshold',
+ 'UnhealthyThreshold', 'HealthCheckTimeout',
+ 'HealthCheckInterval', 'HealthCheckHttpCode']
+
+ def __init__(self, port, backend_port, algorithm, bandwidth,
+ sticky_session, health_check, extra=None):
+ super(SLBLoadBalancerHttpListener, self).__init__(
+ port, backend_port, algorithm, bandwidth, extra=extra)
+ self.sticky_session = sticky_session
+ self.health_check = health_check
+
+ def get_required_params(self):
+ params = super(SLBLoadBalancerHttpListener,
+ self).get_required_params()
+ params['StickySession'] = self.sticky_session
+ params['HealthCheck'] = self.health_check
+ return params
+
+ @classmethod
+ def create(cls, port, backend_port, algorithm, bandwidth, extra={}):
+ if 'StickySession' not in extra:
+ raise AttributeError('StickySession is required')
+ if 'HealthCheck' not in extra:
+ raise AttributeError('HealthCheck is required')
+ sticky_session = extra['StickySession']
+ health_check = extra['HealthCheck']
+ return cls(port, backend_port, algorithm, bandwidth, sticky_session,
+ health_check, extra=extra)
+
+
+class SLBLoadBalancerHttpsListener(SLBLoadBalancerListener):
+ """
+ This class represents a rule to route https request to the backends.
+ """
+ action = 'CreateLoadBalancerHTTPSListener'
+ option_keys = ['XForwardedFor', 'StickySessionType', 'CookieTimeout',
+ 'Cookie', 'HealthCheckDomain', 'HealthCheckURI',
+ 'HealthCheckConnectPort', 'HealthyThreshold',
+ 'UnhealthyThreshold', 'HealthCheckTimeout',
+ 'HealthCheckInterval', 'HealthCheckHttpCode']
+
+ def __init__(self, port, backend_port, algorithm, bandwidth,
+ sticky_session, health_check, certificate_id, extra=None):
+ super(SLBLoadBalancerHttpsListener, self).__init__(
+ port, backend_port, algorithm, bandwidth, extra=extra)
+ self.sticky_session = sticky_session
+ self.health_check = health_check
+ self.certificate_id = certificate_id
+
+ def get_required_params(self):
+ params = super(SLBLoadBalancerHttpsListener,
+ self).get_required_params()
+ params['StickySession'] = self.sticky_session
+ params['HealthCheck'] = self.health_check
+ params['ServerCertificateId'] = self.certificate_id
+ return params
+
+ @classmethod
+ def create(cls, port, backend_port, algorithm, bandwidth, extra={}):
+ if 'StickySession' not in extra:
+ raise AttributeError('StickySession is required')
+ if 'HealthCheck' not in extra:
+ raise AttributeError('HealthCheck is required')
+ if 'ServerCertificateId' not in extra:
+ raise AttributeError('ServerCertificateId is required')
+ sticky_session = extra['StickySession']
+ health_check = extra['HealthCheck']
+ certificate_id = extra['ServerCertificateId']
+ return cls(port, backend_port, algorithm, bandwidth, sticky_session,
+ health_check, certificate_id, extra=extra)
+
+
+class SLBLoadBalancerTcpListener(SLBLoadBalancerListener):
+ """
+ This class represents a rule to route tcp request to the backends.
+ """
+ action = 'CreateLoadBalancerTCPListener'
+ option_keys = ['PersistenceTimeout', 'HealthCheckType',
+ 'HealthCheckDomain', 'HealthCheckURI',
+ 'HealthCheckConnectPort', 'HealthyThreshold',
+ 'UnhealthyThreshold', 'HealthCheckConnectTimeout',
+ 'HealthCheckInterval', 'HealthCheckHttpCode']
+
+
+class SLBLoadBalancerUdpListener(SLBLoadBalancerTcpListener):
+ """
+ This class represents a rule to route udp request to the backends.
+ """
+ action = 'CreateLoadBalancerUDPListener'
+ option_keys = ['PersistenceTimeout', 'HealthCheckConnectPort',
+ 'HealthyThreshold', 'UnhealthyThreshold',
+ 'HealthCheckConnectTimeout', 'HealthCheckInterval']
+
+
+class SLBServerCertificate(ReprMixin, object):
+ _repr_attributes = ['id', 'name', 'fingerprint']
+
+ def __init__(self, id, name, fingerprint):
+ self.id = id
+ self.name = name
+ self.fingerprint = fingerprint
+
+
+PROTOCOL_TO_LISTENER_MAP = {
+ 'http': SLBLoadBalancerHttpListener,
+ 'https': SLBLoadBalancerHttpsListener,
+ 'tcp': SLBLoadBalancerTcpListener,
+ 'udp': SLBLoadBalancerUdpListener
+}
+
+
+class SLBDriver(Driver):
+ """
+ Aliyun SLB load balancer driver.
+ """
+ name = 'Aliyun Server Load Balancer'
+ website = 'https://www.aliyun.com/product/slb'
+ connectionCls = SLBConnection
+ path = '/'
+ namespace = None
+
+ _VALUE_TO_ALGORITHM_MAP = SLB_SCHEDULER_TO_ALGORITHM
+
+ _ALGORITHM_TO_VALUE_MAP = ALGORITHM_TO_SLB_SCHEDULER
+
+ def __init__(self, access_id, secret, region):
+ super(SLBDriver, self).__init__(access_id, secret)
+ self.region = region
+
+ def list_protocols(self):
+ return list(PROTOCOL_TO_LISTENER_MAP.keys())
+
+ def list_balancers(self, ex_balancer_ids=[], ex_filters=None):
+ """
+ List all loadbalancers
+ @inherits :class:`Driver.list_balancers`
+ :keyword ex_balancer_ids: a list of balancer ids to filter results
+ Only balancers which's id in this list
+ will be returned
+ :type ex_balancer_ids: ``list``
+ :keyword ex_filters: attributes to filter results. Only balancers
+ which have all the desired attributes
+ and values will be returned
+ :type ex_filters: ``dict``
+ """
+ params = {'Action': 'DescribeLoadBalancers',
+ 'RegionId': self.region}
+ if ex_balancer_ids and isinstance(ex_balancer_ids, list):
+ params['LoadBalancerId'] = ','.join(ex_balancer_ids)
+
+ if ex_filters and isinstance(ex_filters, dict):
+ ex_filters.update(params)
+ params = ex_filters
+ resp_body = self.connection.request(self.path, params=params).object
+ return self._to_balancers(resp_body)
+
+ def create_balancer(self, name, port, protocol, algorithm, members,
+ **kwargs):
+ # 1.Create load balancer
+ params = {'Action': 'CreateLoadBalancer',
+ 'RegionId': self.region}
+ if name:
+ params['LoadBalancerName'] = name
+ if not port:
+ raise AttributeError('port is required')
+ if not protocol:
+ # NOTE(samsong8610): Use http listener as default
+ protocol = 'http'
+ if protocol not in PROTOCOL_TO_LISTENER_MAP:
+ raise AttributeError('unsupport protocol %s' % protocol)
+
+ extra_param_keys = [
+ 'AddressType',
+ 'VSwitchId',
+ 'InternetChargeType',
+ 'Bandwidth',
+ 'ClientToken',
+ 'MasterZoneId',
+ 'SlaveZoneId'
+ ]
+ extra = self._get_extra_params(extra_param_keys, kwargs)
+ # Bandwidth in range [1, 1000] Mbps
+ bandwidth = -1
+ if 'Bandwidth' in extra and extra['Bandwidth']:
+ try:
+ bandwidth = int(extra['Bandwidth'])
+ except ValueError:
+ raise AttributeError('Bandwidth should be a integer in '
+ 'range [1, 1000].')
+
+ charge_type = extra.get('InternetChargeType', None)
+ if charge_type and charge_type.lower() == 'paybybandwidth':
+ if bandwidth == -1:
+ raise AttributeError('PayByBandwidth need Bandwidth be set')
+ params.update(extra)
+
+ if members and isinstance(members, list):
+ backend_ports = [member.port for member in members]
+ if len(set(backend_ports)) != 1:
+ raise AttributeError('the ports of members should be unique')
+ # NOTE(samsong8610): If members do not provide backend port,
+ # default to listening port
+ backend_port = backend_ports[0] or port
+ else:
+ backend_port = port
+
+ balancer = None
+ try:
+ resp_body = self.connection.request(self.path, params).object
+ balancer = self._to_balancer(resp_body)
+ balancer.port = port
+
+ # 2.Add backend servers
+ if members is None:
+ members = []
+ for member in members:
+ self.balancer_attach_member(balancer, member)
+ # 3.Create listener
+ # NOTE(samsong8610): Assume only create a listener which uses all
+ # the bandwidth.
+ self.ex_create_listener(balancer, backend_port, protocol,
+ algorithm, bandwidth, **kwargs)
+ self.ex_start_listener(balancer, port)
+ return balancer
+ except Exception:
+ e = sys.exc_info()[1]
+ if balancer is not None:
+ try:
+ self.destroy_balancer(balancer)
+ except Exception:
+ pass
+ raise e
+
+ def destroy_balancer(self, balancer):
+ params = {'Action': 'DeleteLoadBalancer',
+ 'LoadBalancerId': balancer.id}
+ resp = self.connection.request(self.path, params)
+ return resp.success()
+
+ def get_balancer(self, balancer_id):
+ balancers = self.list_balancers(ex_balancer_ids=[balancer_id])
+ if len(balancers) != 1:
+ raise LibcloudError('could not find load balancer with id %s' %
+ balancer_id)
+ return balancers[0]
+
+ def balancer_attach_compute_node(self, balancer, node):
+ if len(node.public_ips) > 0:
+ ip = node.public_ips[0]
+ else:
+ ip = node.private_ips[0]
+ member = Member(id=node.id, ip=ip, port=balancer.port)
+ return self.balancer_attach_member(balancer, member)
+
+ def balancer_attach_member(self, balancer, member):
+ params = {'Action': 'AddBackendServers',
+ 'LoadBalancerId': balancer.id}
+ if member and isinstance(member, Member):
+ params['BackendServers'] = self._to_servers_json([member])
+ self.connection.request(self.path, params)
+ return member
+
+ def balancer_detach_member(self, balancer, member):
+ params = {'Action': 'RemoveBackendServers',
+ 'LoadBalancerId': balancer.id}
+ if member and isinstance(member, Member):
+ params['BackendServers'] = self._list_to_json([member.id])
+ self.connection.request(self.path, params)
+ return member
+
+ def balancer_list_members(self, balancer):
+ attribute = self.ex_get_balancer_attribute(balancer)
+ members = [Member(server['ServerId'], None, None, balancer=balancer,
+ extra={'Weight': server['Weight']})
+ for server in attribute.backend_servers]
+ return members
+
+ def ex_get_balancer_attribute(self, balancer):
+ """
+ Get balancer attribute
+ :param balancer: the balancer to get attribute
+ :type balancer: ``LoadBalancer``
+ :return: the balancer attribute
+ :rtype: ``SLBLoadBalancerAttribute``
+ """
+ params = {'Action': 'DescribeLoadBalancerAttribute',
+ 'LoadBalancerId': balancer.id}
+ resp_body = self.connection.request(self.path, params).object
+ attribute = self._to_balancer_attribute(resp_body)
+ return attribute
+
+ def ex_list_listeners(self, balancer):
+ """
+ Get all listener related to the given balancer
+ :param balancer: the balancer to list listeners
+ :type balancer: ``LoadBalancer``
+ :return: a list of listeners
+ :rtype: ``list`` of ``SLBLoadBalancerListener``
+ """
+ attribute = self.ex_get_balancer_attribute(balancer)
+ listeners = [SLBLoadBalancerListener(each['ListenerPort'], None,
+ None, None)
+ for each in attribute.listeners]
+ return listeners
+
+ def ex_create_listener(self, balancer, backend_port, protocol, algorithm,
+ bandwidth, **kwargs):
+ """
+ Create load balancer listening rule.
+ :param balancer: the balancer which the rule belongs to.
+ The listener created will listen on the port of the
+ the balancer as default. 'ListenerPort' in kwargs
+ will *OVERRIDE* it.
+ :type balancer: ``LoadBalancer``
+ :param backend_port: the backend server port
+ :type backend_port: ``int``
+ :param protocol: the balancer protocol, default to http
+ :type protocol: ``str``
+ :param algorithm: the balancer routing algorithm
+ :type algorithm: ``Algorithm``
+ :param bandwidth: the listener bandwidth limits
+ :type bandwidth: ``str``
+ :return: the created listener
+ :rtype: ``SLBLoadBalancerListener``
+ """
+ cls = PROTOCOL_TO_LISTENER_MAP.get(protocol,
+ SLBLoadBalancerHttpListener)
+ if 'ListenerPort' in kwargs:
+ port = kwargs['ListenerPort']
+ else:
+ port = balancer.port
+ listener = cls.create(port, backend_port, algorithm,
+ bandwidth, extra=kwargs)
+ params = listener.get_create_params()
+ params['LoadBalancerId'] = balancer.id
+ params['RegionId'] = self.region
+ resp = self.connection.request(self.path, params)
+ return resp.success()
+
+ def ex_start_listener(self, balancer, port):
+ """
+ Start balancer's listener listening the given port.
+ :param balancer: a load balancer
+ :type balancer: ``LoadBalancer``
+ :param port: listening port
+ :type port: ``int``
+ :return: whether operation is success
+ :rtype: ``bool``
+ """
+ params = {'Action': 'StartLoadBalancerListener',
+ 'LoadBalancerId': balancer.id,
+ 'ListenerPort': port}
+ resp = self.connection.request(self.path, params)
+ return resp.success()
+
+ def ex_stop_listener(self, balancer, port):
+ """
+ Stop balancer's listener listening the given port.
+ :param balancer: a load balancer
+ :type balancer: ``LoadBalancer``
+ :param port: listening port
+ :type port: ``int``
+ :return: whether operation is success
+ :rtype: ``bool``
+ """
+ params = {'Action': 'StopLoadBalancerListener',
+ 'LoadBalancerId': balancer.id,
+ 'ListenerPort': port}
+ resp = self.connection.request(self.path, params)
+ return resp.success()
+
+ def ex_upload_certificate(self, name, server_certificate,
+ private_key):
+ """
+ Upload certificate and private key for https load balancer listener
+ :param name: the certificate name
+ :type name: ``str``
+ :param server_certificate: the content of the certificate to upload
+ in PEM format
+ :type server_certificate: ``str``
+ :param private_key: the content of the private key to upload
+ in PEM format
+ :type private_key: ``str``
+ :return: new created certificate info
+ :rtype: ``SLBServerCertificate``
+ """
+ params = {'Action': 'UploadServerCertificate',
+ 'RegionId': self.region,
+ 'ServerCertificate': server_certificate,
+ 'PrivateKey': private_key}
+ if name:
+ params['ServerCertificateName'] = name
+ resp_body = self.connection.request(self.path, params).object
+ return self._to_server_certificate(resp_body)
+
+ def ex_list_certificates(self, certificate_ids=[]):
+ """
+ List all server certificates
+ :param certificate_ids: certificate ids to filter results
+ :type certificate_ids: ``str``
+ :return: certificates
+ :rtype: ``SLBServerCertificate``
+ """
+ params = {'Action': 'DescribeServerCertificates',
+ 'RegionId': self.region}
+ if certificate_ids and isinstance(certificate_ids, list):
+ params['ServerCertificateId'] = ','.join(certificate_ids)
+
+ resp_body = self.connection.request(self.path, params).object
+ cert_elements = findall(resp_body,
+ 'ServerCertificates/ServerCertificate',
+ namespace=self.namespace)
+ certificates = [self._to_server_certificate(el)
+ for el in cert_elements]
+ return certificates
+
+ def ex_delete_certificate(self, certificate_id):
+ """
+ Delete the given server certificate
+ :param certificate_id: the id of the certificate to delete
+ :type certificate_id: ``str``
+ :return: whether process is success
+ :rtype: ``bool``
+ """
+ params = {'Action': 'DeleteServerCertificate',
+ 'RegionId': self.region,
+ 'ServerCertificateId': certificate_id}
+ resp = self.connection.request(self.path, params)
+ return resp.success()
+
+ def ex_set_certificate_name(self, certificate_id, name):
+ """
+ Set server certificate name.
+ :param certificate_id: the id of the server certificate to update
+ :type certificate_id: ``str``
+ :param name: the new name
+ :type name: ``str``
+ :return: whether updating is success
+ :rtype: ``bool``
+ """
+ params = {'Action': 'SetServerCertificateName',
+ 'RegionId': self.region,
+ 'ServerCertificateId': certificate_id,
+ 'ServerCertificateName': name}
+ resp = self.connection.request(self.path, params)
+ return resp.success()
+
+ def _to_balancers(self, element):
+ xpath = 'LoadBalancers/LoadBalancer'
+ return [self._to_balancer(el)
+ for el in findall(element=element, xpath=xpath,
+ namespace=self.namespace)]
+
+ def _to_balancer(self, el):
+ _id = findtext(element=el, xpath='LoadBalancerId',
+ namespace=self.namespace)
+ name = findtext(element=el, xpath='LoadBalancerName',
+ namespace=self.namespace)
+ status = findtext(element=el, xpath='LoadBalancerStatus',
+ namespace=self.namespace)
+ state = STATE_MAPPINGS.get(status, State.UNKNOWN)
+ address = findtext(element=el, xpath='Address',
+ namespace=self.namespace)
+ extra = self._get_extra_dict(
+ el, RESOURCE_EXTRA_ATTRIBUTES_MAP['balancer'])
+
+ balancer = LoadBalancer(id=_id, name=name, state=state, ip=address,
+ port=None, driver=self, extra=extra)
+ return balancer
+
+ def _create_list_params(self, params, items, label):
+ """
+ return parameter list
+ """
+ if isinstance(items, str):
+ items = [items]
+ for index, item in enumerate(items):
+ params[label % (index + 1)] = item
+ return params
+
+ def _get_extra_dict(self, element, mapping):
+ """
+ Extract attributes from the element based on rules provided in the
+ mapping dictionary.
+
+ :param element: Element to parse the values from.
+ :type element: xml.etree.ElementTree.Element.
+
+ :param mapping: Dictionary with the extra layout
+ :type node: :class:`Node`
+
+ :rtype: ``dict``
+ """
+ extra = {}
+ for attribute, values in mapping.items():
+ transform_func = values['transform_func']
+ value = findattr(element=element,
+ xpath=values['xpath'],
+ namespace=self.namespace)
+ if value:
+ try:
+ extra[attribute] = transform_func(value)
+ except Exception:
+ extra[attribute] = None
+ else:
+ extra[attribute] = value
+
+ return extra
+
+ def _to_servers_json(self, members):
+ servers = []
+ for each in members:
+ server = {'ServerId': each.id,
+ 'Weight': '100'}
+ if 'Weight' in each.extra:
+ server['Weight'] = each.extra['Weight']
+ servers.append(server)
+ try:
+ return json.dumps(servers)
+ except Exception:
+ raise AttributeError('could not convert member to backend server')
+
+ def _to_balancer_attribute(self, element):
+ balancer = self._to_balancer(element)
+ port_proto_elements = findall(
+ element, 'ListenerPortsAndProtocol/ListenerPortAndProtocol',
+ namespace=self.namespace)
+ if len(port_proto_elements) > 0:
+ listeners = [self._to_port_and_protocol(el)
+ for el in port_proto_elements]
+ else:
+ port_elements = findall(element, 'ListenerPorts/ListenerPort',
+ namespace=self.namespace)
+ listeners = [{'ListenerPort': el.text, 'ListenerProtocol': 'http'}
+ for el in port_elements]
+ server_elements = findall(element,
+ 'BackendServers/BackendServer',
+ namespace=self.namespace)
+ backend_servers = [self._to_server_and_weight(el)
+ for el in server_elements]
+ return SLBLoadBalancerAttribute(balancer, listeners, backend_servers)
+
+ def _to_port_and_protocol(self, el):
+ port = findtext(el, 'ListenerPort', namespace=self.namespace)
+ protocol = findtext(el, 'ListenerProtocol', namespace=self.namespace)
+ return {'ListenerPort': port, 'ListenerProtocol': protocol}
+
+ def _to_server_and_weight(self, el):
+ server_id = findtext(el, 'ServerId', namespace=self.namespace)
+ weight = findtext(el, 'Weight', namespace=self.namespace)
+ return {'ServerId': server_id, 'Weight': weight}
+
+ def _to_server_certificate(self, el):
+ _id = findtext(el, 'ServerCertificateId', namespace=self.namespace)
+ name = findtext(el, 'ServerCertificateName', namespace=self.namespace)
+ fingerprint = findtext(el, 'Fingerprint', namespace=self.namespace)
+ return SLBServerCertificate(id=_id, name=name,
+ fingerprint=fingerprint)
+
+ def _get_extra_params(self, extra_param_keys, kwargs):
+ params = {}
+ for key in extra_param_keys:
+ if key in kwargs:
+ params[key] = kwargs[key]
+ return params
+
+ def _list_to_json(self, value):
+ try:
+ return json.dumps(value)
+ except Exception:
+ return '[]'
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/loadbalancer/providers.py
----------------------------------------------------------------------
diff --git a/libcloud/loadbalancer/providers.py b/libcloud/loadbalancer/providers.py
index a4ff090..b619c9d 100644
--- a/libcloud/loadbalancer/providers.py
+++ b/libcloud/loadbalancer/providers.py
@@ -42,6 +42,9 @@ DRIVERS = {
('libcloud.loadbalancer.drivers.softlayer', 'SoftlayerLBDriver'),
Provider.DIMENSIONDATA:
('libcloud.loadbalancer.drivers.dimensiondata', 'DimensionDataLBDriver'),
+ Provider.SLB:
+ ('libcloud.loadbalancer.drivers.slb', 'SLBDriver'),
+
# Deprecated
Provider.RACKSPACE_US:
('libcloud.loadbalancer.drivers.rackspace', 'RackspaceLBDriver'),
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/loadbalancer/types.py
----------------------------------------------------------------------
diff --git a/libcloud/loadbalancer/types.py b/libcloud/loadbalancer/types.py
index c7e390b..0d5deec 100644
--- a/libcloud/loadbalancer/types.py
+++ b/libcloud/loadbalancer/types.py
@@ -32,6 +32,9 @@ class LibcloudLBImmutableError(LibcloudLBError):
class Provider(object):
+ """
+ :cvar SLB: Aliyun SLB loadbalancer driver
+ """
RACKSPACE = 'rackspace'
GOGRID = 'gogrid'
NINEFOLD = 'ninefold'
@@ -41,6 +44,7 @@ class Provider(object):
GCE = 'gce'
SOFTLAYER = 'softlayer'
DIMENSIONDATA = 'dimensiondata'
+ SLB = 'slb'
# Deprecated
RACKSPACE_US = 'rackspace_us'
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/storage/drivers/oss.py
----------------------------------------------------------------------
diff --git a/libcloud/storage/drivers/oss.py b/libcloud/storage/drivers/oss.py
new file mode 100644
index 0000000..84df44f
--- /dev/null
+++ b/libcloud/storage/drivers/oss.py
@@ -0,0 +1,1069 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import base64
+import codecs
+import hmac
+import os
+import time
+import sys
+from hashlib import sha1
+
+try:
+ from lxml import etree as ET
+except ImportError:
+ from xml.etree import ElementTree as ET
+
+try:
+ from lxml.etree import Element, SubElement
+except ImportError:
+ from xml.etree.ElementTree import Element, SubElement
+
+from libcloud.utils.py3 import httplib
+from libcloud.utils.py3 import urlquote
+from libcloud.utils.py3 import urlencode
+from libcloud.utils.py3 import b
+from libcloud.utils.py3 import tostring
+from libcloud.utils.py3 import PY3
+from libcloud.utils.xml import fixxpath, findtext
+from libcloud.utils.files import guess_file_mime_type, read_in_chunks, \
+ exhaust_iterator
+from libcloud.common.types import InvalidCredsError, LibcloudError
+from libcloud.common.base import ConnectionUserAndKey, RawResponse, \
+ XmlResponse
+from libcloud.common.types import MalformedResponseError
+from libcloud.storage.base import Object, Container, StorageDriver, \
+ DEFAULT_CONTENT_TYPE
+from libcloud.storage.types import ContainerError
+from libcloud.storage.types import ContainerIsNotEmptyError
+from libcloud.storage.types import InvalidContainerNameError
+from libcloud.storage.types import ContainerDoesNotExistError
+from libcloud.storage.types import ObjectDoesNotExistError
+from libcloud.storage.types import ObjectHashMismatchError
+
+__all__ = [
+ 'OSSStorageDriver',
+ 'OSSMultipartUpload',
+
+ 'EXPIRATION_SECONDS',
+ 'CHUNK_SIZE',
+ 'MAX_UPLOADS_PER_RESPONSE'
+]
+
+GMT_TIME_FORMAT = "%a, %d %b %Y %H:%M:%S GMT"
+EXPIRATION_SECONDS = 15 * 60
+
+# OSS multi-part chunks must be great than 100KB except the last one
+CHUNK_SIZE = 100 * 1024
+
+# Desired number of items in each response inside a paginated request in
+# ex_iterate_multipart_uploads.
+MAX_UPLOADS_PER_RESPONSE = 1000
+
+
+class OSSResponse(XmlResponse):
+ namespace = None
+ valid_response_codes = [httplib.NOT_FOUND, httplib.CONFLICT,
+ httplib.BAD_REQUEST]
+
+ def success(self):
+ i = int(self.status)
+ return i >= 200 and i <= 299 or i in self.valid_response_codes
+
+ def parse_body(self):
+ """
+ OSSResponse body is in utf-8 encoding.
+ """
+ if len(self.body) == 0 and not self.parse_zero_length_body:
+ return self.body
+
+ try:
+ if PY3:
+ parser = ET.XMLParser(encoding='utf-8')
+ body = ET.XML(self.body.encode('utf-8'), parser=parser)
+ else:
+ body = ET.XML(self.body)
+ except:
+ raise MalformedResponseError('Failed to parse XML',
+ body=self.body,
+ driver=self.connection.driver)
+ return body
+
+ def parse_error(self):
+ if self.status in [httplib.UNAUTHORIZED, httplib.FORBIDDEN]:
+ raise InvalidCredsError(self.body)
+ elif self.status == httplib.MOVED_PERMANENTLY:
+ raise LibcloudError('This bucket is located in a different ' +
+ 'region. Please use the correct driver.',
+ driver=OSSStorageDriver)
+ elif self.status == httplib.METHOD_NOT_ALLOWED:
+ raise LibcloudError('The method is not allowed. Status code: %d, '
+ 'headers: %s' % (self.status, self.headers))
+ raise LibcloudError('Unknown error. Status code: %d, body: %s' %
+ (self.status, self.body),
+ driver=OSSStorageDriver)
+
+
+class OSSRawResponse(OSSResponse, RawResponse):
+ pass
+
+
+class OSSConnection(ConnectionUserAndKey):
+ """
+ Represents a single connection to the Aliyun OSS Endpoint
+ """
+
+ _domain = 'aliyuncs.com'
+ _default_location = 'oss'
+ responseCls = OSSResponse
+ rawResponseCls = OSSRawResponse
+
+ @staticmethod
+ def _get_auth_signature(method, headers, params, expires, secret_key, path,
+ vendor_prefix):
+ """
+ Signature = base64(hmac-sha1(AccessKeySecret,
+ VERB + "\n"
+ + CONTENT-MD5 + "\n"
+ + CONTENT-TYPE + "\n"
+ + EXPIRES + "\n"
+ + CanonicalizedOSSHeaders
+ + CanonicalizedResource))
+ """
+ special_headers = {'content-md5': '',
+ 'content-type': '',
+ 'expires': ''}
+ vendor_headers = {}
+
+ for key, value in list(headers.items()):
+ key_lower = key.lower()
+ if key_lower in special_headers:
+ special_headers[key_lower] = value.strip()
+ elif key_lower.startswith(vendor_prefix):
+ vendor_headers[key_lower] = value.strip()
+
+ if expires:
+ special_headers['expires'] = str(expires)
+
+ buf = [method]
+ for _, value in sorted(special_headers.items()):
+ buf.append(value)
+ string_to_sign = '\n'.join(buf)
+
+ buf = []
+ for key, value in sorted(vendor_headers.items()):
+ buf.append('%s:%s' % (key, value))
+ header_string = '\n'.join(buf)
+
+ values_to_sign = []
+ for value in [string_to_sign, header_string, path]:
+ if value:
+ values_to_sign.append(value)
+
+ string_to_sign = '\n'.join(values_to_sign)
+ b64_hmac = base64.b64encode(
+ hmac.new(b(secret_key), b(string_to_sign), digestmod=sha1).digest()
+ )
+ return b64_hmac
+
+ @staticmethod
+ def _get_expires(params):
+ """
+ Get expires timeout seconds from parameters.
+ """
+ expires = None
+ if 'expires' in params:
+ expires = params['expires']
+ elif 'Expires' in params:
+ expires = params['Expires']
+ if expires:
+ try:
+ return int(expires)
+ except Exception:
+ pass
+ return int(time.time()) + EXPIRATION_SECONDS
+
+ def add_default_params(self, params):
+ expires_at = self._get_expires(params)
+ expires = str(expires_at)
+ params['OSSAccessKeyId'] = self.user_id
+ params['Expires'] = expires
+ return params
+
+ def add_default_headers(self, headers):
+ headers['Date'] = time.strftime(GMT_TIME_FORMAT, time.gmtime())
+ return headers
+
+ def pre_connect_hook(self, params, headers):
+ if self._container:
+ path = '/%s%s' % (self._container.name, self.action)
+ else:
+ path = self.action
+ params['Signature'] = self._get_auth_signature(
+ method=self.method, headers=headers, params=params,
+ expires=params['Expires'], secret_key=self.key, path=path,
+ vendor_prefix=self.driver.http_vendor_prefix)
+ return params, headers
+
+ def request(self, action, params=None, data=None, headers=None,
+ method='GET', raw=False, container=None):
+ self.host = '%s.%s' % (self._default_location, self._domain)
+ self._container = container
+ if container and container.name:
+ if 'location' in container.extra:
+ self.host = '%s.%s.%s' % (container.name,
+ container.extra['location'],
+ self._domain)
+ else:
+ self.host = '%s.%s' % (container.name, self.host)
+ return super(OSSConnection, self).request(action=action,
+ params=params,
+ data=data,
+ headers=headers,
+ method=method,
+ raw=raw)
+
+
+class OSSMultipartUpload(object):
+ """
+ Class representing an Aliyun OSS multipart upload
+ """
+
+ def __init__(self, key, id, initiated):
+ """
+ Class representing an Aliyun OSS multipart upload
+
+ :param key: The object/key that was being uploaded
+ :type key: ``str``
+
+ :param id: The upload id assigned by Aliyun
+ :type id: ``str``
+
+ :param initiated: The date/time at which the upload was started
+ :type created_at: ``str``
+ """
+ self.key = key
+ self.id = id
+ self.initiated = initiated
+
+ def __repr__(self):
+ return ('<OSSMultipartUpload: key=%s>' % (self.key))
+
+
+class OSSStorageDriver(StorageDriver):
+ name = 'Aliyun OSS'
+ website = 'http://www.aliyun.com/product/oss'
+ connectionCls = OSSConnection
+ hash_type = 'md5'
+ supports_chunked_encoding = False
+ supports_multipart_upload = True
+ namespace = None
+ http_vendor_prefix = 'x-oss-'
+
+ def iterate_containers(self):
+ response = self.connection.request('/')
+ if response.status == httplib.OK:
+ containers = self._to_containers(obj=response.object,
+ xpath='Buckets/Bucket')
+ return containers
+
+ raise LibcloudError('Unexpected status code: %s' % (response.status),
+ driver=self)
+
+ def list_container_objects(self, container, ex_prefix=None):
+ """
+ Return a list of objects for the given container.
+
+ :param container: Container instance.
+ :type container: :class:`Container`
+
+ :keyword ex_prefix: Only return objects starting with ex_prefix
+ :type ex_prefix: ``str``
+
+ :return: A list of Object instances.
+ :rtype: ``list`` of :class:`Object`
+ """
+ return list(self.iterate_container_objects(container,
+ ex_prefix=ex_prefix))
+
+ def iterate_container_objects(self, container, ex_prefix=None):
+ """
+ Return a generator of objects for the given container.
+
+ :param container: Container instance
+ :type container: :class:`Container`
+
+ :keyword ex_prefix: Only return objects starting with ex_prefix
+ :type ex_prefix: ``str``
+
+ :return: A generator of Object instances.
+ :rtype: ``generator`` of :class:`Object`
+ """
+ params = {}
+ if ex_prefix:
+ params['prefix'] = ex_prefix
+
+ last_key = None
+ exhausted = False
+
+ while not exhausted:
+ if last_key:
+ params['marker'] = last_key
+
+ response = self.connection.request('/',
+ params=params,
+ container=container)
+
+ if response.status != httplib.OK:
+ raise LibcloudError('Unexpected status code: %s' %
+ (response.status), driver=self)
+
+ objects = self._to_objs(obj=response.object,
+ xpath='Contents', container=container)
+ is_truncated = response.object.findtext(fixxpath(
+ xpath='IsTruncated', namespace=self.namespace)).lower()
+ exhausted = (is_truncated == 'false')
+
+ last_key = None
+ for obj in objects:
+ last_key = obj.name
+ yield obj
+
+ def get_container(self, container_name):
+ for container in self.iterate_containers():
+ if container.name == container_name:
+ return container
+ raise ContainerDoesNotExistError(value=None,
+ driver=self,
+ container_name=container_name)
+
+ def get_object(self, container_name, object_name):
+ container = self.get_container(container_name=container_name)
+ object_path = self._get_object_path(container, object_name)
+ response = self.connection.request(object_path,
+ method='HEAD',
+ container=container)
+
+ if response.status == httplib.OK:
+ obj = self._headers_to_object(object_name=object_name,
+ container=container,
+ headers=response.headers)
+ return obj
+
+ raise ObjectDoesNotExistError(value=None, driver=self,
+ object_name=object_name)
+
+ def create_container(self, container_name, ex_location=None):
+ """
+ @inherits :class:`StorageDriver.create_container`
+
+ :keyword ex_location: The desired location where to create container
+ :type keyword: ``str``
+ """
+ extra = None
+ if ex_location:
+ root = Element('CreateBucketConfiguration')
+ child = SubElement(root, 'LocationConstraint')
+ child.text = ex_location
+
+ data = tostring(root)
+ extra = {'location': ex_location}
+ else:
+ data = ''
+
+ container = Container(name=container_name, extra=extra, driver=self)
+ response = self.connection.request('/',
+ data=data,
+ method='PUT',
+ container=container)
+
+ if response.status == httplib.OK:
+ return container
+ elif response.status == httplib.CONFLICT:
+ raise InvalidContainerNameError(
+ value='Container with this name already exists. The name must '
+ 'be unique among all the containers in the system',
+ container_name=container_name, driver=self)
+ elif response.status == httplib.BAD_REQUEST:
+ raise ContainerError(
+ value='Bad request when creating container: %s' %
+ response.body,
+ container_name=container_name, driver=self)
+
+ raise LibcloudError('Unexpected status code: %s' % (response.status),
+ driver=self)
+
+ def delete_container(self, container):
+ # Note: All the objects in the container must be deleted first
+ response = self.connection.request('/',
+ method='DELETE',
+ container=container)
+ if response.status == httplib.NO_CONTENT:
+ return True
+ elif response.status == httplib.CONFLICT:
+ raise ContainerIsNotEmptyError(
+ value='Container must be empty before it can be deleted.',
+ container_name=container.name, driver=self)
+ elif response.status == httplib.NOT_FOUND:
+ raise ContainerDoesNotExistError(value=None,
+ driver=self,
+ container_name=container.name)
+
+ return False
+
+ def download_object(self, obj, destination_path, overwrite_existing=False,
+ delete_on_failure=True):
+ obj_path = self._get_object_path(obj.container, obj.name)
+
+ response = self.connection.request(obj_path,
+ method='GET',
+ raw=True,
+ container=obj.container)
+
+ return self._get_object(obj=obj, callback=self._save_object,
+ response=response,
+ callback_kwargs={
+ 'obj': obj,
+ 'response': response.response,
+ 'destination_path': destination_path,
+ 'overwrite_existing': overwrite_existing,
+ 'delete_on_failure': delete_on_failure},
+ success_status_code=httplib.OK)
+
+ def download_object_as_stream(self, obj, chunk_size=None):
+ obj_path = self._get_object_path(obj.container, obj.name)
+ response = self.connection.request(obj_path,
+ method='GET',
+ raw=True,
+ container=obj.container)
+
+ return self._get_object(obj=obj, callback=read_in_chunks,
+ response=response,
+ callback_kwargs={'iterator': response.response,
+ 'chunk_size': chunk_size},
+ success_status_code=httplib.OK)
+
+ def upload_object(self, file_path, container, object_name, extra=None,
+ verify_hash=True, headers=None):
+ upload_func = self._upload_file
+ upload_func_kwargs = {'file_path': file_path}
+
+ return self._put_object(container=container, object_name=object_name,
+ upload_func=upload_func,
+ upload_func_kwargs=upload_func_kwargs,
+ extra=extra, file_path=file_path,
+ verify_hash=verify_hash)
+
+ def upload_object_via_stream(self, iterator, container, object_name,
+ extra=None, headers=None):
+ method = 'PUT'
+ params = None
+
+ if self.supports_multipart_upload:
+ # Initiate the multipart request and get an upload id
+ upload_func = self._upload_multipart
+ upload_func_kwargs = {'iterator': iterator,
+ 'container': container,
+ 'object_name': object_name}
+ method = 'POST'
+ iterator = iter('')
+ params = 'uploads'
+
+ elif self.supports_chunked_encoding:
+ upload_func = self._stream_data
+ upload_func_kwargs = {'iterator': iterator}
+ else:
+ # In this case, we have to load the entire object to
+ # memory and send it as normal data
+ upload_func = self._upload_data
+ upload_func_kwargs = {}
+
+ return self._put_object(container=container, object_name=object_name,
+ upload_func=upload_func,
+ upload_func_kwargs=upload_func_kwargs,
+ extra=extra, method=method, query_args=params,
+ iterator=iterator, verify_hash=False)
+
+ def delete_object(self, obj):
+ object_path = self._get_object_path(obj.container, obj.name)
+ response = self.connection.request(object_path, method='DELETE',
+ container=obj.container)
+ if response.status == httplib.NO_CONTENT:
+ return True
+ elif response.status == httplib.NOT_FOUND:
+ raise ObjectDoesNotExistError(value=None, driver=self,
+ object_name=obj.name)
+
+ return False
+
+ def ex_iterate_multipart_uploads(self, container, prefix=None,
+ delimiter=None,
+ max_uploads=MAX_UPLOADS_PER_RESPONSE):
+ """
+ Extension method for listing all in-progress OSS multipart uploads.
+
+ Each multipart upload which has not been committed or aborted is
+ considered in-progress.
+
+ :param container: The container holding the uploads
+ :type container: :class:`Container`
+
+ :keyword prefix: Print only uploads of objects with this prefix
+ :type prefix: ``str``
+
+ :keyword delimiter: The object/key names are grouped based on
+ being split by this delimiter
+ :type delimiter: ``str``
+
+ :keyword max_uploads: The max uplod items returned for one request
+ :type max_uploads: ``int``
+
+ :return: A generator of OSSMultipartUpload instances.
+ :rtype: ``generator`` of :class:`OSSMultipartUpload`
+ """
+
+ if not self.supports_multipart_upload:
+ raise LibcloudError('Feature not supported', driver=self)
+
+ request_path = '/?uploads'
+ params = {'max-uploads': max_uploads}
+
+ if prefix:
+ params['prefix'] = prefix
+
+ if delimiter:
+ params['delimiter'] = delimiter
+
+ def finder(node, text):
+ return node.findtext(fixxpath(xpath=text,
+ namespace=self.namespace))
+
+ while True:
+ response = self.connection.request(request_path, params=params,
+ container=container)
+
+ if response.status != httplib.OK:
+ raise LibcloudError('Error fetching multipart uploads. '
+ 'Got code: %s' % response.status,
+ driver=self)
+
+ body = response.parse_body()
+ # pylint: disable=maybe-no-member
+ for node in body.findall(fixxpath(xpath='Upload',
+ namespace=self.namespace)):
+
+ key = finder(node, 'Key')
+ upload_id = finder(node, 'UploadId')
+ initiated = finder(node, 'Initiated')
+
+ yield OSSMultipartUpload(key, upload_id, initiated)
+
+ # Check if this is the last entry in the listing
+ # pylint: disable=maybe-no-member
+ is_truncated = body.findtext(fixxpath(xpath='IsTruncated',
+ namespace=self.namespace))
+
+ if is_truncated.lower() == 'false':
+ break
+
+ # Provide params for the next request
+ upload_marker = body.findtext(fixxpath(xpath='NextUploadIdMarker',
+ namespace=self.namespace))
+ key_marker = body.findtext(fixxpath(xpath='NextKeyMarker',
+ namespace=self.namespace))
+
+ params['key-marker'] = key_marker
+ params['upload-id-marker'] = upload_marker
+
+ def ex_abort_all_multipart_uploads(self, container, prefix=None):
+ """
+ Extension method for removing all partially completed OSS multipart
+ uploads.
+
+ :param container: The container holding the uploads
+ :type container: :class:`Container`
+
+ :keyword prefix: Delete only uploads of objects with this prefix
+ :type prefix: ``str``
+ """
+
+ # Iterate through the container and delete the upload ids
+ for upload in self.ex_iterate_multipart_uploads(container, prefix,
+ delimiter=None):
+ object_path = self._get_object_path(container, upload.key)
+ self._abort_multipart(object_path, upload.id, container=container)
+
+ def _clean_object_name(self, name):
+ name = urlquote(name)
+ return name
+
+ def _put_object(self, container, object_name, upload_func,
+ upload_func_kwargs, method='PUT', query_args=None,
+ extra=None, file_path=None, iterator=None,
+ verify_hash=False):
+ """
+ Create an object and upload data using the given function.
+ """
+ headers = {}
+ extra = extra or {}
+
+ content_type = extra.get('content_type', None)
+ meta_data = extra.get('meta_data', None)
+ acl = extra.get('acl', None)
+
+ if meta_data:
+ for key, value in list(meta_data.items()):
+ key = self.http_vendor_prefix + 'meta-%s' % (key)
+ headers[key] = value
+
+ if acl:
+ if acl not in ['public-read', 'private', 'public-read-write']:
+ raise AttributeError('invalid acl value: %s' % acl)
+ headers[self.http_vendor_prefix + 'object-acl'] = acl
+
+ request_path = self._get_object_path(container, object_name)
+
+ if query_args:
+ request_path = '?'.join((request_path, query_args))
+
+ # TODO: Let the underlying exceptions bubble up and capture the SIGPIPE
+ # here.
+ # SIGPIPE is thrown if the provided container does not exist or the
+ # user does not have correct permission
+ result_dict = self._upload_object(
+ object_name=object_name, content_type=content_type,
+ upload_func=upload_func, upload_func_kwargs=upload_func_kwargs,
+ request_path=request_path, request_method=method,
+ headers=headers, file_path=file_path, iterator=iterator,
+ container=container)
+
+ response = result_dict['response']
+ bytes_transferred = result_dict['bytes_transferred']
+ headers = response.headers
+ response = response.response
+ server_hash = headers['etag'].replace('"', '')
+
+ if (verify_hash and result_dict['data_hash'].upper() != server_hash):
+ raise ObjectHashMismatchError(
+ value='MD5 hash checksum does not match',
+ object_name=object_name, driver=self)
+ elif response.status == httplib.OK:
+ obj = Object(
+ name=object_name, size=bytes_transferred, hash=server_hash,
+ extra={'acl': acl}, meta_data=meta_data, container=container,
+ driver=self)
+
+ return obj
+ else:
+ raise LibcloudError(
+ 'Unexpected status code, status_code=%s' % (response.status),
+ driver=self)
+
+ def _upload_multipart(self, response, data, iterator, container,
+ object_name, calculate_hash=True):
+ """
+ Callback invoked for uploading data to OSS using Aliyun's
+ multipart upload mechanism
+
+ :param response: Response object from the initial POST request
+ :type response: :class:`OSSRawResponse`
+
+ :param data: Any data from the initial POST request
+ :type data: ``str``
+
+ :param iterator: The generator for fetching the upload data
+ :type iterator: ``generator``
+
+ :param container: The container owning the object to which data is
+ being uploaded
+ :type container: :class:`Container`
+
+ :param object_name: The name of the object to which we are uploading
+ :type object_name: ``str``
+
+ :keyword calculate_hash: Indicates if we must calculate the data hash
+ :type calculate_hash: ``bool``
+
+ :return: A tuple of (status, checksum, bytes transferred)
+ :rtype: ``tuple``
+ """
+
+ object_path = self._get_object_path(container, object_name)
+
+ # Get the upload id from the response xml
+ response.body = response.response.read()
+ body = response.parse_body()
+ upload_id = body.find(fixxpath(xpath='UploadId',
+ namespace=self.namespace)).text
+
+ try:
+ # Upload the data through the iterator
+ result = self._upload_from_iterator(iterator, object_path,
+ upload_id, calculate_hash,
+ container=container)
+ (chunks, data_hash, bytes_transferred) = result
+
+ # Commit the chunk info and complete the upload
+ etag = self._commit_multipart(object_path, upload_id, chunks,
+ container=container)
+ except Exception:
+ exc = sys.exc_info()[1]
+ # Amazon provides a mechanism for aborting an upload.
+ self._abort_multipart(object_path, upload_id, container=container)
+ raise exc
+
+ # Modify the response header of the first request. This is used
+ # by other functions once the callback is done
+ response.headers['etag'] = etag
+
+ return (True, data_hash, bytes_transferred)
+
+ def _upload_from_iterator(self, iterator, object_path, upload_id,
+ calculate_hash=True, container=None):
+ """
+ Uploads data from an interator in fixed sized chunks to OSS
+
+ :param iterator: The generator for fetching the upload data
+ :type iterator: ``generator``
+
+ :param object_path: The path of the object to which we are uploading
+ :type object_name: ``str``
+
+ :param upload_id: The upload id allocated for this multipart upload
+ :type upload_id: ``str``
+
+ :keyword calculate_hash: Indicates if we must calculate the data hash
+ :type calculate_hash: ``bool``
+
+ :keyword container: the container object to upload object to
+ :type container: :class:`Container`
+
+ :return: A tuple of (chunk info, checksum, bytes transferred)
+ :rtype: ``tuple``
+ """
+
+ data_hash = None
+ if calculate_hash:
+ data_hash = self._get_hash_function()
+
+ bytes_transferred = 0
+ count = 1
+ chunks = []
+ params = {'uploadId': upload_id}
+
+ # Read the input data in chunk sizes suitable for AWS
+ for data in read_in_chunks(iterator, chunk_size=CHUNK_SIZE,
+ fill_size=True, yield_empty=True):
+ bytes_transferred += len(data)
+
+ if calculate_hash:
+ data_hash.update(data)
+
+ chunk_hash = self._get_hash_function()
+ chunk_hash.update(data)
+ chunk_hash = base64.b64encode(chunk_hash.digest()).decode('utf-8')
+
+ # OSS will calculate hash of the uploaded data and
+ # check this header.
+ headers = {'Content-MD5': chunk_hash}
+ params['partNumber'] = count
+
+ request_path = '?'.join((object_path, urlencode(params)))
+
+ resp = self.connection.request(request_path, method='PUT',
+ data=data, headers=headers,
+ container=container)
+
+ if resp.status != httplib.OK:
+ raise LibcloudError('Error uploading chunk', driver=self)
+
+ server_hash = resp.headers['etag']
+
+ # Keep this data for a later commit
+ chunks.append((count, server_hash))
+ count += 1
+
+ if calculate_hash:
+ data_hash = data_hash.hexdigest()
+
+ return (chunks, data_hash, bytes_transferred)
+
+ def _commit_multipart(self, object_path, upload_id, chunks,
+ container=None):
+ """
+ Makes a final commit of the data.
+
+ :param object_path: Server side object path.
+ :type object_path: ``str``
+
+ :param upload_id: ID of the multipart upload.
+ :type upload_id: ``str``
+
+ :param upload_id: A list of (chunk_number, chunk_hash) tuples.
+ :type upload_id: ``list``
+
+ :keyword container: The container owning the object to which data is
+ being uploaded
+ :type container: :class:`Container`
+ """
+
+ root = Element('CompleteMultipartUpload')
+
+ for (count, etag) in chunks:
+ part = SubElement(root, 'Part')
+ part_no = SubElement(part, 'PartNumber')
+ part_no.text = str(count)
+
+ etag_id = SubElement(part, 'ETag')
+ etag_id.text = str(etag)
+
+ data = tostring(root)
+
+ params = {'uploadId': upload_id}
+ request_path = '?'.join((object_path, urlencode(params)))
+ response = self.connection.request(request_path, data=data,
+ method='POST', container=container)
+
+ if response.status != httplib.OK:
+ element = response.object
+ # pylint: disable=maybe-no-member
+ code, message = response._parse_error_details(element=element)
+ msg = 'Error in multipart commit: %s (%s)' % (message, code)
+ raise LibcloudError(msg, driver=self)
+
+ # Get the server's etag to be passed back to the caller
+ body = response.parse_body()
+ server_hash = body.find(fixxpath(xpath='ETag',
+ namespace=self.namespace)).text
+ return server_hash
+
+ def _abort_multipart(self, object_path, upload_id, container=None):
+ """
+ Aborts an already initiated multipart upload
+
+ :param object_path: Server side object path.
+ :type object_path: ``str``
+
+ :param upload_id: ID of the multipart upload.
+ :type upload_id: ``str``
+
+ :keyword container: The container owning the object to which data is
+ being uploaded
+ :type container: :class:`Container`
+ """
+
+ params = {'uploadId': upload_id}
+ request_path = '?'.join((object_path, urlencode(params)))
+ resp = self.connection.request(request_path, method='DELETE',
+ container=container)
+
+ if resp.status != httplib.NO_CONTENT:
+ raise LibcloudError('Error in multipart abort. status_code=%d' %
+ (resp.status), driver=self)
+
+ def _upload_object(self, object_name, content_type, upload_func,
+ upload_func_kwargs, request_path, request_method='PUT',
+ headers=None, file_path=None, iterator=None,
+ container=None):
+ """
+ Helper function for setting common request headers and calling the
+ passed in callback which uploads an object.
+ """
+ headers = headers or {}
+
+ if file_path and not os.path.exists(file_path):
+ raise OSError('File %s does not exist' % (file_path))
+
+ if iterator is not None and not hasattr(iterator, 'next') and not \
+ hasattr(iterator, '__next__'):
+ raise AttributeError('iterator object must implement next() ' +
+ 'method.')
+
+ if not content_type:
+ if file_path:
+ name = file_path
+ else:
+ name = object_name
+ content_type, _ = guess_file_mime_type(name)
+
+ if not content_type:
+ if self.strict_mode:
+ raise AttributeError('File content-type could not be '
+ 'guessed and no content_type value '
+ 'is provided')
+ else:
+ # Fallback to a content-type
+ content_type = DEFAULT_CONTENT_TYPE
+
+ file_size = None
+
+ if iterator:
+ if self.supports_chunked_encoding:
+ headers['Transfer-Encoding'] = 'chunked'
+ upload_func_kwargs['chunked'] = True
+ else:
+ # Chunked transfer encoding is not supported. Need to buffer
+ # all the data in memory so we can determine file size.
+ iterator = read_in_chunks(
+ iterator=iterator)
+ data = exhaust_iterator(iterator=iterator)
+
+ file_size = len(data)
+ upload_func_kwargs['data'] = data
+ else:
+ file_size = os.path.getsize(file_path)
+ upload_func_kwargs['chunked'] = False
+
+ if file_size is not None and 'Content-Length' not in headers:
+ headers['Content-Length'] = file_size
+
+ headers['Content-Type'] = content_type
+ response = self.connection.request(request_path,
+ method=request_method, data=None,
+ headers=headers, raw=True,
+ container=container)
+
+ upload_func_kwargs['response'] = response
+ success, data_hash, bytes_transferred = upload_func(
+ **upload_func_kwargs)
+
+ if not success:
+ raise LibcloudError(
+ value='Object upload failed, Perhaps a timeout?', driver=self)
+
+ result_dict = {'response': response, 'data_hash': data_hash,
+ 'bytes_transferred': bytes_transferred}
+ return result_dict
+
+ def _to_containers(self, obj, xpath):
+ for element in obj.findall(fixxpath(xpath=xpath,
+ namespace=self.namespace)):
+ yield self._to_container(element)
+
+ def _to_container(self, element):
+ extra = {
+ 'creation_date': findtext(element=element, xpath='CreationDate',
+ namespace=self.namespace),
+ 'location': findtext(element=element, xpath='Location',
+ namespace=self.namespace)
+ }
+
+ container = Container(name=findtext(element=element, xpath='Name',
+ namespace=self.namespace),
+ extra=extra,
+ driver=self
+ )
+
+ return container
+
+ def _to_objs(self, obj, xpath, container):
+ return [self._to_obj(element, container) for element in
+ obj.findall(fixxpath(xpath=xpath, namespace=self.namespace))]
+
+ def _to_obj(self, element, container):
+ owner_id = findtext(element=element, xpath='Owner/ID',
+ namespace=self.namespace)
+ owner_display_name = findtext(element=element,
+ xpath='Owner/DisplayName',
+ namespace=self.namespace)
+ meta_data = {'owner': {'id': owner_id,
+ 'display_name': self._safe_decode(
+ owner_display_name)}}
+ last_modified = findtext(element=element,
+ xpath='LastModified',
+ namespace=self.namespace)
+ extra = {'last_modified': last_modified}
+
+ name = self._safe_decode(findtext(element=element, xpath='Key',
+ namespace=self.namespace))
+ obj = Object(name=name,
+ size=int(findtext(element=element, xpath='Size',
+ namespace=self.namespace)),
+ hash=findtext(element=element, xpath='ETag',
+ namespace=self.namespace).replace('"', ''),
+ extra=extra,
+ meta_data=meta_data,
+ container=container,
+ driver=self
+ )
+
+ return obj
+
+ def _safe_decode(self, encoded):
+ """
+ Decode it as an escaped string and then treate the content as
+ UTF-8 encoded.
+ """
+ try:
+ if encoded:
+ unescaped, _ign = codecs.escape_decode(encoded)
+ return unescaped.decode('utf-8')
+ return encoded
+ except Exception:
+ return encoded
+
+ def _get_container_path(self, container):
+ """
+ Return a container path
+
+ :param container: Container instance
+ :type container: :class:`Container`
+
+ :return: A path for this container.
+ :rtype: ``str``
+ """
+ return '/%s' % (container.name)
+
+ def _get_object_path(self, container, object_name):
+ """
+ Return an object's path.
+ Aliyun OSS api puts the container name in the host,
+ so ignore container here.
+
+ :param container: Container instance
+ :type container: :class:`Container`
+
+ :param object_name: Object name
+ :type object_name: :class:`str`
+
+ :return: A path for this object.
+ :rtype: ``str``
+ """
+ object_name_cleaned = self._clean_object_name(object_name)
+ object_path = '/%s' % object_name_cleaned
+ return object_path
+
+ def _headers_to_object(self, object_name, container, headers):
+ hash = headers['etag'].replace('"', '')
+ extra = {'content_type': headers['content-type'],
+ 'etag': headers['etag']}
+ meta_data = {}
+
+ if 'last-modified' in headers:
+ extra['last_modified'] = headers['last-modified']
+
+ for key, value in headers.items():
+ if not key.lower().startswith(self.http_vendor_prefix + 'meta-'):
+ continue
+
+ key = key.replace(self.http_vendor_prefix + 'meta-', '')
+ meta_data[key] = value
+
+ obj = Object(name=object_name, size=int(headers['content-length']),
+ hash=hash, extra=extra,
+ meta_data=meta_data,
+ container=container,
+ driver=self)
+ return obj
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/storage/providers.py
----------------------------------------------------------------------
diff --git a/libcloud/storage/providers.py b/libcloud/storage/providers.py
index f02588c..0bf1e41 100644
--- a/libcloud/storage/providers.py
+++ b/libcloud/storage/providers.py
@@ -58,6 +58,8 @@ DRIVERS = {
('libcloud.storage.drivers.auroraobjects', 'AuroraObjectsStorageDriver'),
Provider.BACKBLAZE_B2:
('libcloud.storage.drivers.backblaze_b2', 'BackblazeB2StorageDriver'),
+ Provider.OSS:
+ ('libcloud.storage.drivers.oss', 'OSSStorageDriver'),
# Deprecated
Provider.CLOUDFILES_US:
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/storage/types.py
----------------------------------------------------------------------
diff --git a/libcloud/storage/types.py b/libcloud/storage/types.py
index 837897f..9c536e3 100644
--- a/libcloud/storage/types.py
+++ b/libcloud/storage/types.py
@@ -43,6 +43,7 @@ class Provider(object):
:cvar NIMBUS: Nimbus.io driver
:cvar LOCAL: Local storage driver
:cvar AURORAOBJECTS: AuroraObjects storage driver
+ :cvar OSS: Aliyun OSS storage driver
"""
DUMMY = 'dummy'
S3 = 's3'
@@ -64,6 +65,7 @@ class Provider(object):
KTUCLOUD = 'ktucloud'
AURORAOBJECTS = 'auroraobjects'
BACKBLAZE_B2 = 'backblaze_b2'
+ OSS = 'oss'
# Deperecated
CLOUDFILES_US = 'cloudfiles_us'
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/test/common/test_aliyun.py
----------------------------------------------------------------------
diff --git a/libcloud/test/common/test_aliyun.py b/libcloud/test/common/test_aliyun.py
new file mode 100644
index 0000000..da071ba
--- /dev/null
+++ b/libcloud/test/common/test_aliyun.py
@@ -0,0 +1,45 @@
+import sys
+import unittest
+
+from libcloud.common import aliyun
+from libcloud.common.aliyun import AliyunRequestSignerAlgorithmV1_0
+from libcloud.test import LibcloudTestCase
+
+
+class AliyunRequestSignerAlgorithmV1_0TestCase(LibcloudTestCase):
+
+ def setUp(self):
+ self.signer = AliyunRequestSignerAlgorithmV1_0('testid', 'testsecret',
+ '1.0')
+
+ def test_sign_request(self):
+ params = {'TimeStamp': '2012-12-26T10:33:56Z',
+ 'Format': 'XML',
+ 'AccessKeyId': 'testid',
+ 'Action': 'DescribeRegions',
+ 'SignatureMethod': 'HMAC-SHA1',
+ 'RegionId': 'region1',
+ 'SignatureNonce': 'NwDAxvLU6tFE0DVb',
+ 'Version': '2014-05-26',
+ 'SignatureVersion': '1.0'}
+ method = 'GET'
+ path = '/'
+
+ expected = 'K9fCVP6Jrklpd3rLYKh1pfrrFNo='
+ self.assertEqual(expected,
+ self.signer._sign_request(params, method, path))
+
+
+class AliyunCommonTestCase(LibcloudTestCase):
+
+ def test_percent_encode(self):
+ data = {
+ 'abc': 'abc',
+ ' *~': '%20%2A~'
+ }
+ for key in data:
+ self.assertEqual(data[key], aliyun._percent_encode(key))
+
+
+if __name__ == '__main__':
+ sys.exit(unittest.main())
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/test/compute/fixtures/ecs/attach_disk.xml
----------------------------------------------------------------------
diff --git a/libcloud/test/compute/fixtures/ecs/attach_disk.xml b/libcloud/test/compute/fixtures/ecs/attach_disk.xml
new file mode 100644
index 0000000..cfce115
--- /dev/null
+++ b/libcloud/test/compute/fixtures/ecs/attach_disk.xml
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<AttachDiskResponse>
+ <RequestId>DA38B11A-9D6D-420B-942F-95D68606C4FC</RequestId>
+</AttachDiskResponse>
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/test/compute/fixtures/ecs/copy_image.xml
----------------------------------------------------------------------
diff --git a/libcloud/test/compute/fixtures/ecs/copy_image.xml b/libcloud/test/compute/fixtures/ecs/copy_image.xml
new file mode 100644
index 0000000..a25cba5
--- /dev/null
+++ b/libcloud/test/compute/fixtures/ecs/copy_image.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<CopyImageResponse>
+ <ImageId>i-28n7dkvov</ImageId>
+ <RequestId>DA38B11A-9D6D-420B-942F-95D68606C4FC</RequestId>
+</CopyImageResponse>
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/test/compute/fixtures/ecs/create_disk.xml
----------------------------------------------------------------------
diff --git a/libcloud/test/compute/fixtures/ecs/create_disk.xml b/libcloud/test/compute/fixtures/ecs/create_disk.xml
new file mode 100644
index 0000000..9ba554e
--- /dev/null
+++ b/libcloud/test/compute/fixtures/ecs/create_disk.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<CreateDiskResponse>
+ <DiskId>i-28n7dkvov</DiskId>
+ <RequestId>DA38B11A-9D6D-420B-942F-95D68606C4FC</RequestId>
+</CreateDiskResponse>
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/test/compute/fixtures/ecs/create_image.xml
----------------------------------------------------------------------
diff --git a/libcloud/test/compute/fixtures/ecs/create_image.xml b/libcloud/test/compute/fixtures/ecs/create_image.xml
new file mode 100644
index 0000000..f25bc69
--- /dev/null
+++ b/libcloud/test/compute/fixtures/ecs/create_image.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<CreateImageResponse>
+ <ImageId>i-28n7dkvov</ImageId>
+ <RequestId>DA38B11A-9D6D-420B-942F-95D68606C4FC</RequestId>
+</CreateImageResponse>
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/test/compute/fixtures/ecs/create_instance.xml
----------------------------------------------------------------------
diff --git a/libcloud/test/compute/fixtures/ecs/create_instance.xml b/libcloud/test/compute/fixtures/ecs/create_instance.xml
new file mode 100644
index 0000000..b9699bd
--- /dev/null
+++ b/libcloud/test/compute/fixtures/ecs/create_instance.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<CreateInstanceResponse>
+ <InstanceId>i-28n7dkvov</InstanceId>
+ <RequestId>DA38B11A-9D6D-420B-942F-95D68606C4FC</RequestId>
+</CreateInstanceResponse>
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/test/compute/fixtures/ecs/create_node_describe_instances.xml
----------------------------------------------------------------------
diff --git a/libcloud/test/compute/fixtures/ecs/create_node_describe_instances.xml b/libcloud/test/compute/fixtures/ecs/create_node_describe_instances.xml
new file mode 100644
index 0000000..24eae7e
--- /dev/null
+++ b/libcloud/test/compute/fixtures/ecs/create_node_describe_instances.xml
@@ -0,0 +1,56 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<DescribeInstancesResponse>
+ <PageNumber>1</PageNumber>
+ <TotalCount>1</TotalCount>
+ <PageSize>10</PageSize>
+ <RequestId>CA75EE06-D5F7-433C-870B-5042EED6C1DC</RequestId>
+ <Instances>
+ <Instance>
+ <ImageId>ubuntu1404_64_20G_aliaegis_20150325.vhd</ImageId>
+ <InnerIpAddress>
+ <IpAddress>10.163.197.74</IpAddress>
+ </InnerIpAddress>
+ <InstanceTypeFamily>ecs.t1</InstanceTypeFamily>
+ <VlanId></VlanId>
+ <InstanceId>i-28n7dkvov</InstanceId>
+ <EipAddress>
+ <IpAddress></IpAddress>
+ <AllocationId></AllocationId>
+ <InternetChargeType></InternetChargeType>
+ </EipAddress>
+ <InternetMaxBandwidthIn>-1</InternetMaxBandwidthIn>
+ <ZoneId>cn-qingdao-b</ZoneId>
+ <InternetChargeType>PayByTraffic</InternetChargeType>
+ <SerialNumber>ca0122d9-374d-4fce-9fc0-71f7c3eaf1c3</SerialNumber>
+ <IoOptimized>false</IoOptimized>
+ <Memory>1024</Memory>
+ <Cpu>1</Cpu>
+ <VpcAttributes>
+ <NatIpAddress></NatIpAddress>
+ <PrivateIpAddress />
+ <VSwitchId></VSwitchId>
+ <VpcId></VpcId>
+ </VpcAttributes>
+ <InternetMaxBandwidthOut>1</InternetMaxBandwidthOut>
+ <DeviceAvailable>true</DeviceAvailable>
+ <SecurityGroupIds>
+ <SecurityGroupId>sg-28ou0f3xa</SecurityGroupId>
+ </SecurityGroupIds>
+ <InstanceName>iZ28n7dkvovZ</InstanceName>
+ <Description></Description>
+ <InstanceNetworkType>classic</InstanceNetworkType>
+ <PublicIpAddress>
+ <IpAddress>114.215.124.73</IpAddress>
+ </PublicIpAddress>
+ <HostName>iZ28n7dkvovZ</HostName>
+ <InstanceType>ecs.t1.small</InstanceType>
+ <CreationTime>2015-12-27T07:35Z</CreationTime>
+ <Status>Running</Status>
+ <ClusterId></ClusterId>
+ <RegionId>cn-qingdao</RegionId>
+ <OperationLocks />
+ <InstanceChargeType>PostPaid</InstanceChargeType>
+ <ExpiredTime>2999-09-08T16:00Z</ExpiredTime>
+ </Instance>
+ </Instances>
+</DescribeInstancesResponse>
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/test/compute/fixtures/ecs/create_snapshot.xml
----------------------------------------------------------------------
diff --git a/libcloud/test/compute/fixtures/ecs/create_snapshot.xml b/libcloud/test/compute/fixtures/ecs/create_snapshot.xml
new file mode 100644
index 0000000..54793b1
--- /dev/null
+++ b/libcloud/test/compute/fixtures/ecs/create_snapshot.xml
@@ -0,0 +1,5 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<CreateSnapshotResponse>
+ <SnapshotId>i-28n7dkvov</SnapshotId>
+ <RequestId>DA38B11A-9D6D-420B-942F-95D68606C4FC</RequestId>
+</CreateSnapshotResponse>
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/test/compute/fixtures/ecs/create_volume_describe_disks.xml
----------------------------------------------------------------------
diff --git a/libcloud/test/compute/fixtures/ecs/create_volume_describe_disks.xml b/libcloud/test/compute/fixtures/ecs/create_volume_describe_disks.xml
new file mode 100644
index 0000000..d25c9a1
--- /dev/null
+++ b/libcloud/test/compute/fixtures/ecs/create_volume_describe_disks.xml
@@ -0,0 +1,36 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<DescribeDisksResponse>
+ <Disks>
+ <Disk>
+ <DiskChargeType>PostPaid</DiskChargeType>
+ <DeleteAutoSnapshot>true</DeleteAutoSnapshot>
+ <DeleteWithInstance>true</DeleteWithInstance>
+ <EnableAutoSnapshot>true</EnableAutoSnapshot>
+ <Category>cloud</Category>
+ <Description>Description</Description>
+ <DiskName>ubuntu1404sys</DiskName>
+ <Size>5</Size>
+ <Type>system</Type>
+ <InstanceId>i-28whl2nj2</InstanceId>
+ <CreationTime>2014-07-23T02:44:06Z</CreationTime>
+ <ImageId>ubuntu1404_64_20G_aliaegis_20150325.vhd</ImageId>
+ <ZoneId>cn-qingdao-b</ZoneId>
+ <AttachedTime>2016-01-04T15:02:17Z</AttachedTime>
+ <DetachedTime></DetachedTime>
+ <Device>/dev/xvda</Device>
+ <OperationLocks></OperationLocks>
+ <Portable>false</Portable>
+ <ProductCode></ProductCode>
+ <RegionId>cn-qingdao</RegionId>
+ <DiskId>d-28zfrmo13</DiskId>
+ <SourceSnapshotId></SourceSnapshotId>
+ <Status>In_use</Status>
+ <OperationLocks />
+ <ExpiredTime>2999-09-08T16:00Z</ExpiredTime>
+ </Disk>
+ </Disks>
+ <PageNumber>1</PageNumber>
+ <PageSize>10</PageSize>
+ <TotalCount>1</TotalCount>
+ <RequestId>ED5CF6DD-71CA-462C-9C94-A61A78A01479</RequestId>
+</DescribeDisksResponse>
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/test/compute/fixtures/ecs/delete_disk.xml
----------------------------------------------------------------------
diff --git a/libcloud/test/compute/fixtures/ecs/delete_disk.xml b/libcloud/test/compute/fixtures/ecs/delete_disk.xml
new file mode 100644
index 0000000..d83b677
--- /dev/null
+++ b/libcloud/test/compute/fixtures/ecs/delete_disk.xml
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<DeleteDiskResponse>
+ <RequestId>DA38B11A-9D6D-420B-942F-95D68606C4FC</RequestId>
+</DeleteDiskResponse>
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/test/compute/fixtures/ecs/delete_image.xml
----------------------------------------------------------------------
diff --git a/libcloud/test/compute/fixtures/ecs/delete_image.xml b/libcloud/test/compute/fixtures/ecs/delete_image.xml
new file mode 100644
index 0000000..9c0382f
--- /dev/null
+++ b/libcloud/test/compute/fixtures/ecs/delete_image.xml
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<DeleteImageResponse>
+ <RequestId>DA38B11A-9D6D-420B-942F-95D68606C4FC</RequestId>
+</DeleteImageResponse>
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/test/compute/fixtures/ecs/delete_instance.xml
----------------------------------------------------------------------
diff --git a/libcloud/test/compute/fixtures/ecs/delete_instance.xml b/libcloud/test/compute/fixtures/ecs/delete_instance.xml
new file mode 100644
index 0000000..b1f017c
--- /dev/null
+++ b/libcloud/test/compute/fixtures/ecs/delete_instance.xml
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<DeleteInstanceResponse>
+ <RequestId>DA38B11A-9D6D-420B-942F-95D68606C4FC</RequestId>
+</DeleteInstanceResponse>
http://git-wip-us.apache.org/repos/asf/libcloud/blob/e6002e0a/libcloud/test/compute/fixtures/ecs/delete_snapshot.xml
----------------------------------------------------------------------
diff --git a/libcloud/test/compute/fixtures/ecs/delete_snapshot.xml b/libcloud/test/compute/fixtures/ecs/delete_snapshot.xml
new file mode 100644
index 0000000..7fac47b
--- /dev/null
+++ b/libcloud/test/compute/fixtures/ecs/delete_snapshot.xml
@@ -0,0 +1,4 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<DeleteSnapshotResponse>
+ <RequestId>DA38B11A-9D6D-420B-942F-95D68606C4FC</RequestId>
+</DeleteSnapshotResponse>