diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..2db0d00
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,14 @@
+Copyright (c) 2016 netease.com
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..d8b5f8a
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,6 @@
+include LICENSE
+include README.rst
+include MANIFEST.in
+include setup.py
+
+recursive-include test_nos *.py
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..5a4b0f9
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,1137 @@
+NOS Python SDK
+==============
+
+NOS Python SDK实现了NOS对象操作接口,基于此SDK能方便快速地实现Python应用程序来使用NOS的对象存储服务。
+
+支持的功能
+----------
+
+对象操作接口
+^^^^^^^^^^^^
+
+* Delete Object —— 删除一个对象
+* Delete Multiple Objects —— 用一个HTTP请求删除同一个Bucket中的多个对象
+* Get Object —— 读取对象内容
+* Head Object —— 获取对象相关元数据信息
+* List Objects —— 获取一个桶的对象列表
+* Put Object —— 上传一个对象
+* Put Object - Copy —— 拷贝一个对象
+* Put Object - Move —— 桶内部move一个对象
+
+大对象分块操作接口
+^^^^^^^^^^^^^^^^^^
+
+* Initiate Multipart Upload —— 初始化分块上传
+* Upload Part —— 上传一个分块
+* Complete Multipart Upload —— 完成分块上传
+* Abort Multipart Upload —— 取消分块上传并删除已上传的分块
+* List Parts —— 列出已上传的分块
+* List Multipart Uploads —— 列出所有执行中的分块上传事件
+
+接口实现
+--------
+
+在调用对象操作接口前需要生成一个nos.Client类的实例。且在调用操作接口时,都有可能抛出异常,可以使用`nos.exceptions.ServiceException`捕获nos服务器异常错误,使用`nos.exceptions.ClientException`捕获nos客户端异常错误。
+
+nos.Client对象实例化
+^^^^^^^^^^^^^^^^^^^^
+
+使用举例
+
+::
+
+ client = nos.Client(
+ access_key_id="string",
+ access_key_secret="string",
+ transport_class=nos.transport.Transport,
+ **kwargs
+ )
+
+参数说明
+
+* access_key_id(string) -- 访问凭证ID。当需要访问的桶属性为Public-read时,可以将该值设置成None。默认值为:None。
+* access_key_secret(string) -- 访问凭证密钥。当需要访问的桶属性为Public-read时,可以将该值设置成None。默认值为:None。
+* transport_class(class) -- 与NOS服务器进行数据传输的类型,类型中至少需要包含`perform_request`成员函数。默认值为:nos.transport.Transport。
+* kwargs -- 其他可选参数,如下。
+ * end_point(string) -- 与NOS服务器进行数据传输、交互的服务器的主域名。默认为:`nos-eastchina1.126.net`。
+ * num_pools(integer) -- HTTP连接池的大小。默认值为:16。
+ * timeout(integer) -- 连接超时的时间,单位:秒。
+ * max_retries(integer) -- 当得到HTTP 5XX的服务器错误的响应时,进行重试的次数。默认值为:2。
+
+nos.Client可能引发的所有异常类型
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+在程序运行过程中,如果遇到错误,Python SDK会抛出相应的异常。所有异常均属于NOSException类,其下分为两个子类:ClientException、ServiceException。在调用Python SDK接口的时候,捕捉这些异常并打印必要的信息有利于定位问题。
+
+ClientException
+:::::::::::::::
+
+ClientException包含SDK客户端的异常。比如,上传对象时对象名为空,就会抛出该异常。
+ClientException类下有如下子类,用于细分客户端异常:
+
+.. list-table::
+ :widths: 5 10
+ :header-rows: 1
+
+ * - 类名
+ - 抛出异常的原因
+ * - InvalidBucketName
+ - 传入的桶名为空
+ * - InvalidObjectName
+ - 传入的对象名为空
+ * - FileOpenModeError
+ - 出入的对象为文件且没有使用二进制文件方式打开
+ * - XmlParseError
+ - 解析服务端响应的XML内容失败
+ * - SerializationError
+ - 上传对象序列化失败
+ * - ConnectionError
+ - 连接服务端异常
+ * - ConnectionTimeout
+ - 连接服务端超时
+
+ServiceException
+::::::::::::::::
+
+ServiceException包含NOS服务器返回的异常。当NOS服务器返回4xx或5xx的HTTP错误码时,Python SDK会将NOS Server的响应转换为ServiceException。
+ServiceException类下有如下子类,用于细分NOS服务器返回的异常:
+
+.. list-table::
+ :widths: 5 10
+ :header-rows: 1
+
+ * - 类名
+ - 抛出异常的原因
+ * - MultiObjectDeleteException
+ - 批量删除对象时,存在部分对象无法删除
+ * - BadRequestError
+ - 服务端返回HTTP 400响应
+ * - ForbiddenError
+ - 服务端返回HTTP 403响应
+ * - NotFoundError
+ - 服务端返回HTTP 404响应
+ * - MethodNotAllowedError
+ - 服务端返回HTTP 405响应
+ * - ConflictError
+ - 服务端返回HTTP 409响应
+ * - LengthRequiredError
+ - 服务端返回HTTP 411响应
+ * - RequestedRangeNotSatisfiableError
+ - 服务端返回HTTP 416响应
+ * - InternalServerErrorError
+ - 服务端返回HTTP 500响应
+ * - NotImplementedError
+ - 服务端返回HTTP 501响应
+ * - ServiceUnavailableError
+ - 服务端返回HTTP 503响应
+
+nos.Client的使用和异常处理的示例代码
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+::
+
+ try:
+ resp = client.XXX(
+ bucket=bucket,
+ key=key
+ )
+ except nos.exceptions.ServiceException as e:
+ print (
+ 'ServiceException: %s\n'
+ 'status_code: %s\n'
+ 'error_type: %s\n'
+ 'error_code: %s\n'
+ 'request_id: %s\n'
+ 'message: %s\n'
+ ) % (
+ e,
+ e.status_code, # 错误http状态码
+ e.error_type, # NOS服务器定义错误类型
+ e.error_code, # NOS服务器定义错误码
+ e.request_id, # 请求ID,有利于nos开发人员跟踪异常请求的错误原因
+ e.message # 错误描述信息
+ )
+ except nos.exceptions.ClientException as e:
+ print (
+ 'ClientException: %s\n'
+ 'message: %s\n'
+ ) % (
+ e,
+ e.message # 客户端错误信息
+ )
+
+对象操作接口
+^^^^^^^^^^^^
+
+Delete Object
+:::::::::::::
+
+使用举例
+
+::
+
+ resp = client.delete_object(
+ bucket="string",
+ key="string"
+ )
+
+参数说明
+
+* bucket(string) -- 桶名。
+* key(string) -- 对象名。
+
+返回值举例
+
+::
+
+ {
+ "x_nos_request_id": "17b21e42ac11000001390ab891440240"
+ }
+
+返回值说明
+返回值为字典类型
+
+* x_nos_request_id(string) -- 唯一定位一个请求的ID号。
+
+
+Delete Multiple Objects
+:::::::::::::::::::::::
+
+使用举例
+
+::
+
+ resp = client.delete_objects(
+ bucket="string",
+ keys=[
+ "string1",
+ "string2",
+ ...
+ ],
+ quiet=True|False
+ )
+
+参数说明
+
+* bucket(string) -- 桶名。
+* objects(list) -- 待删除的对象名称列表。
+* quiet(boolean) -- 是否开启安静模式(安静模式不显示具体删除信息)。
+
+返回值举例
+
+::
+
+ {
+ "x_nos_request_id": "17b21e42ac11000001390ab891440240",
+ "response": xml.etree.ElementTree() # xml.etree.ElementTree类型对象
+ }
+
+返回值的`response`的字符形式可能如下:
+
+::
+
+
+
+
+ 1.jpg
+
+
+ 2.jpg
+ AccessDenied
+ Access Denied
+
+
+ 3.jpg
+ NoSuchKey
+ No Such Key
+
+
+
+*注意:下列各项通过xml.etree.ElementTree的成员函数获取具体值时,得到的均为字符串;目前标注的类型为原类型名称,需自行转换。*
+
+.. list-table::
+ :widths: 10 30
+ :header-rows: 1
+
+ * - Element
+ - 描述
+ * - DeleteResult
+ - | 多重删除的响应容器元素
+ | 类型:容器
+ * - Deleted
+ - | 已被成功删除的容器元素
+ | 类型:容器
+ | 父节点:DeleteResult
+ * - Key
+ - | 已删除的对象键值
+ | 类型:字符串
+ | 父节点:Deleted,Error
+ * - VersionId
+ - | 已删除的对象版本号
+ | 类型:数字
+ | 父节点:Deleted,Error
+ * - Error
+ - | 删除失败的对象版本号
+ | 类型:容器
+ | 父节点:DeleteResult
+ * - Code
+ - | 删除失败返回的错误码
+ | 类型:字符串
+ | 父节点:Error
+ * - Message
+ - | 删除失败返回的详细错误描述
+ | 类型:字符串
+ | 父节点:Error
+
+返回值说明
+返回值为字典类型
+
+* x_nos_request_id(string) -- 唯一定位一个请求的ID号。
+* response(xml.etree.ElementTree) -- 包含返回信息的xml对象。
+
+
+Get Object
+::::::::::
+
+使用举例
+
+::
+
+ resp = client.get_object(
+ bucket="string",
+ key="string",
+ **kwargs
+ )
+
+参数说明
+
+* bucket(string) -- 桶名。
+* key(string) -- 对象名。
+* kwargs -- 其他可选参数,如下。
+ * range(string) -- 下载指定的数据块,Range Header参考RFC2616。
+
+返回值举例
+
+::
+
+ {
+ "x_nos_request_id": "17b21e42ac11000001390ab891440240",
+ "content_length": 1024,
+ "content_range": "0-1024/234564",
+ "content_type": "application/octet-stream;charset=UTF-8",
+ "etag": "3adbbad1791fbae3ec908894c4963870",
+ "body": StreamingBody()
+ }
+
+返回值说明
+返回值为字典类型
+
+* x_nos_request_id(string) -- 唯一定位一个请求的ID号。
+* content_length(integer) -- 返回的数据块的字节数。
+* content_range(string) -- 返回的数据块的范围。
+* content_type(string) -- 返回的数据块的类型。
+* etag(string) -- 对象的哈希值,反应对象内容的更改情况。
+* body(StreamingBody) -- 对象数据。
+
+
+Head Object
+:::::::::::
+
+使用举例
+
+::
+
+ resp = client.head_object(
+ bucket="string",
+ key="string"
+ )
+
+参数说明
+
+* bucket(string) -- 桶名。
+* key(string) -- 对象名。
+
+返回值举例
+
+::
+
+ {
+ "x_nos_request_id": "17b21e42ac11000001390ab891440240",
+ "content_length": 1024,
+ "content_type": "application/octet-stream;charset=UTF-8",
+ "etag": "3adbbad1791fbae3ec908894c4963870",
+ "last_modified": "Mon, 23 May 2016 16:07:15 Asia/Shanghai"
+ }
+
+返回值说明
+返回值为字典类型
+
+* x_nos_request_id(string) -- 唯一定位一个请求的ID号。
+* content_length(integer) -- 返回的数据块的字节数。
+* content_type(string) -- 返回的数据块的类型。
+* etag(string) -- 对象的哈希值,反应对象内容的更改情况。
+* last_modified(string) -- 最近一次修改对象的时间。
+
+
+List Objects
+::::::::::::
+
+使用举例
+
+::
+
+ resp = client.list_objects(
+ bucket="string",
+ **kwargs
+ )
+
+参数说明
+
+* bucket(string) -- 桶名。
+* kwargs -- 其他可选参数。
+ * delimiter(string) -- 分界符,用于做groupby操作。
+ * marker(string) -- 字典序的起始标记,只列出该标记之后的部分。
+ * limit(integer) -- 限定返回的数量,返回的结果小于或等于该值。取值范围:0-1000,默认:100
+ * prefix(string) -- 只返回Key以特定前缀开头的那些对象。可以使用前缀把一个桶里面的对象分成不同的组,类似文件系统的目录一样。
+
+返回值举例
+
+::
+
+ {
+ "x_nos_request_id": "17b21e42ac11000001390ab891440240",
+ "response": xml.etree.ElementTree() # xml.etree.ElementTree类型对象
+ }
+
+返回值的`response`的字符形式可能如下:
+
+::
+
+
+
+ dream
+ user
+ 2
+ user/yao
+ true
+
+ user/lin
+ 2012-01-01T12:00:00.000Z
+ 258ef3fdfa96f00ad9f27c383fc9acce Etag>
+ 143663
+ Standard
+
+
+ user/yao
+ 2012-01-01T12:00:00.000Z
+ < Etag>828ef3fdfa96f00ad9f27c383fc9ac7f Etag>
+ 423983
+ Standard
+
+
+
+*注意:下列各项通过xml.etree.ElementTree的成员函数获取具体值时,得到的均为字符串;目前标注的类型为原类型名称,需自行转换。*
+
+.. list-table::
+ :widths: 10 35
+ :header-rows: 1
+
+ * - 元素
+ - 描述
+ * - Contents
+ - | 对象元数据,代表一个对象描述
+ | 类型:容器
+ | 父节点:ListBucketObjects
+ | 子节点:Key,LastModified,Size,Etag
+ * - CommonPrefixes
+ - | 只有当指定了delimiter分界符时,才会有这个响应
+ | 类型:字符串
+ | 父节点:ListBucketObjects
+ * - delimiter
+ - | 分界符
+ | 类型:字符串
+ | 父节点:ListBucketObjects
+ * - DisplayName
+ - | 对象的拥有者
+ | 类型:字符串
+ | 父节点:ListBucketObjects.Contents.Owner
+ * - Etag
+ - | 对象的哈希描述
+ | 类型:字符串
+ | 父节点:ListBucketObjects.Contents
+ * - ID
+ - | 对象拥有者的ID
+ | 类型:字符串
+ | 父节点:ListBucketObjects.Contents.Owner
+ * - IsTruncated
+ - | 是否截断,如果因为设置了limit导致不是所有的数据集都返回,则该值设置为true
+ | 类型:布尔值
+ | 父节点:ListBucketObjects
+ * - Key
+ - | 对象的名称
+ | 类型:字符串
+ | 父节点:ListBucketObjects.Contents
+ * - LastModified
+ - | 对象最后修改日期和时间
+ | 类型:日期 格式:yyyy-MM-dd"T"HH:mm:ss.SSSZ
+ | 父节点:ListBucketObjects.Contents
+ * - Marker
+ - | 列表的起始位置,等于请求参数设置的Marker值
+ | 类型:字符串
+ | 父节点:ListBucketObjects
+ * - NextMark
+ - | 下一次分页的起点
+ | 类型:字符串
+ | 父节点:ListBucketObjects
+ * - MaxKeys
+ - | 请求的对象个数限制
+ | 类型:数字
+ | 父节点:ListBucketObjects
+ * - Name
+ - | 请求的桶名称
+ | 类型:字符串
+ | 父节点:ListBucketObjects
+ * - Owner
+ - | 桶拥有者
+ | 类型:容器
+ | 父节点:ListBucketObjects.contents | CommonPrefixes
+ | 子节点:DisplayName|ID
+ * - Prefix
+ - | 请求的对象的Key的前缀
+ | 类型:字符串
+ | 父节点:ListBucketObjects
+ * - Size
+ - | 对象的大小字节数
+ | 类型:数字
+ | 父节点:ListBucketObjects.contents
+ * - StorageClasss
+ - | 存储级别
+ | 类型:字符串
+ | 父节点:ListBucketObjects.contents
+
+返回值说明
+返回值为字典类型
+
+* x_nos_request_id(string) -- 唯一定位一个请求的ID号。
+* response(xml.etree.ElementTree) -- 包含返回信息的xml对象。
+
+
+Put Object
+::::::::::
+
+使用举例
+
+::
+
+ resp = client.put_object(
+ bucket="string",
+ key="string",
+ body=serializable_object,
+ **kwargs
+ )
+
+参数说明
+
+* bucket(string) -- 桶名。
+* key(string) -- 对象名。
+* body(serializable_object) -- 对象内容,可以是文件句柄、字符串、字典等任何可序列化的对象。
+* kwargs -- 其他可选参数。
+ * meta_data(dict) -- 用户自定义的元数据,通过键值对的形式上报,键名和值均为字符串,且键名需以\`x-nos-meta-\`开头。
+
+返回值举例
+
+::
+
+ {
+ "x_nos_request_id": "17b21e42ac11000001390ab891440240",
+ "etag": "fbacf535f27731c9771645a39863328"
+ }
+
+返回值说明
+返回值为字典类型
+
+* x_nos_request_id(string) -- 唯一定位一个请求的id号。
+* etag(string) -- 对象的哈希值,反应对象内容的更改情况。
+
+
+Put Object - Copy
+:::::::::::::::::
+
+使用举例
+
+::
+
+ resp = client.copy_object(
+ src_bucket="string",
+ src_key="string",
+ dest_bucket="string",
+ dest_key="string"
+ )
+
+参数说明
+
+* src_bucket(string) -- 来源对象的桶名。
+* src_key(string) -- 来源对象的对象名。
+* dest_bucket(string) -- 目标对象的桶名。
+* dest_key(string) -- 目标对象的对象名。
+
+返回值举例
+
+::
+
+ {
+ "x_nos_request_id": "17b21e42ac11000001390ab891440240"
+ }
+
+返回值说明
+返回值为字典类型
+
+* x_nos_request_id(string) -- 唯一定位一个请求的ID号。
+
+
+Move Object
+:::::::::::
+
+使用举例
+
+::
+
+ resp = client.move_object(
+ src_bucket="string",
+ src_key="string",
+ dest_bucket="string",
+ dest_key="string"
+ )
+
+参数说明
+
+* src_bucket(string) -- 来源对象的桶名。
+* src_key(string) -- 来源对象的对象名。
+* dest_bucket(string) -- 目标对象的桶名。
+* dest_key(string) -- 目标对象的对象名。
+
+返回值举例
+
+::
+
+ {
+ "x_nos_request_id": "17b21e42ac11000001390ab891440240"
+ }
+
+返回值说明
+返回值为字典类型
+
+* x_nos_request_id(string) -- 唯一定位一个请求的ID号。
+
+
+Initiate Multipart Upload
+:::::::::::::::::::::::::
+
+使用举例
+
+::
+
+ resp = client.create_multipart_upload(
+ bucket="string",
+ key="string",
+ **kwargs
+ )
+
+参数说明
+
+* bucket(string) -- 桶名。
+* key(string) -- 对象名。
+* kwargs -- 其他可选参数。
+ * meta_data(dict) -- 用户自定义的元数据,通过键值对的形式上报,键名和值均为字符串,且键名需以\`x-nos-meta-\`开头。
+
+返回值举例
+
+::
+
+ {
+ "x_nos_request_id": "17b21e42ac11000001390ab891440240",
+ "response": xml.etree.ElementTree() # xml.etree.ElementTree类型对象
+ }
+
+返回值的`response`的字符形式可能如下:
+
+::
+
+
+
+ filestation
+ movie.avi
+ VXBsb2FkIElEIGZvciA2aWWpbmcncyBteS1tb3S5tMnRzIHVwbG9hZA
+
+
+*注意:下列各项通过xml.etree.ElementTree的成员函数获取具体值时,得到的均为字符串;目前标注的类型为原类型名称,需自行转换。*
+
+.. list-table::
+ :widths: 10 30
+ :header-rows: 1
+
+ * - 元素
+ - 描述
+ * - InitiateMultipartUploadResult
+ - | 响应容器元素
+ | 类型:容器
+ | 子节点:Key,Bucket
+ * - Key
+ - | 对象的Key
+ | 类型:字符串
+ | 父节点:InitiateMultipartUploadResult
+ * - Bucket
+ - | 对象的桶
+ | 类型:字符串
+ | 父节点:InitiateMultipartUploadResult
+ * - UploadId
+ - | 分块上传的ID,用这个ID来作为各块属于这个文件的标识
+ | 类型:字符串
+ | 父节点:InitiateMultipartUploadResult
+
+返回值说明
+返回值为字典类型
+
+* x_nos_request_id(string) -- 唯一定位一个请求的id号。
+* response(xml.etree.ElementTree) -- 包含返回信息的xml对象。
+
+
+Upload Part
+:::::::::::
+
+使用举例
+
+::
+
+ resp = client.upload_part(
+ bucket="string",
+ key="string",
+ part_num=2,
+ upload_id="string",
+ body=serializable_object
+ )
+
+参数说明
+
+* bucket(string) -- 桶名。
+* key(string) -- 对象名。
+* part_num(integer) -- 数据分块编码号(1-10000)。
+* upload_id(string) -- 数据上传标识号。
+* body(serializable_object) -- 对象内容,可以是文件句柄、字符串、字典等任何可序列化的对象。
+
+返回值举例
+
+::
+
+ {
+ "x_nos_request_id": "17b21e42ac11000001390ab891440240",
+ "etag": "fbacf535f27731c9771645a39863328"
+ }
+
+返回值说明
+返回值为字典类型
+
+* x_nos_request_id(string) -- 唯一定位一个请求的id号。
+* etag(string) -- 对象的哈希值,反应对象内容的更改情况。
+
+
+Complete Multipart Upload
+:::::::::::::::::::::::::
+
+在将所有数据Part都上传完成后,必须调用Complete Multipart Upload API来完成整个文件的Multipart Upload。在执行该操作时,用户必须提供所有有效的数据Part的列表(包括part号码和ETAG);NOS收到用户提交的Part列表后,会逐一验证每个数据Part的有效性。当所有的数据Part验证通过后,NOS将把这些数据part组合成一个完整的Object。
+使用x-nos-Object-md5扩展头发送对象的MD5值,用作去重库的建立(Put Object使用Content-MD5建立对象去重库)。
+
+使用举例
+
+::
+
+ resp = client.complete_multipart_upload(
+ bucket="string",
+ key="string",
+ upload_id="string",
+ info=[
+ {
+ "part_num": 1,
+ "etag": "string"
+ },
+ {
+ "part_num": 2,
+ "etag": "string"
+ },
+ ...
+ ],
+ **kwargs
+ )
+
+参数说明
+
+* bucket(string) -- 桶名。
+* key(string) -- 对象名。
+* upload_id(string) -- 数据上传标识号。
+* info(list) -- 所有有效的数据Part的列表(包括part号码和etag)
+* kwargs -- 其他可选参数,如下。
+ * object_md5(string) -- 发送对象的md5值,用于后续去重。
+
+返回值举例
+
+::
+
+ {
+ "x_nos_request_id": "17b21e42ac11000001390ab891440240",
+ "response": xml.etree.ElementTree() # xml.etree.ElementTree类型对象
+ }
+
+返回值的`response`的字符形式可能如下:
+
+::
+
+
+
+ filestation.nos.netease.com/movie.avi
+ filestation
+ movie.avi
+ "3858f62230ac3c915f300c664312c11f-9"
+
+
+*注意:下列各项通过xml.etree.ElementTree的成员函数获取具体值时,得到的均为字符串;目前标注的类型为原类型名称,需自行转换。*
+
+.. list-table::
+ :widths: 10 30
+ :header-rows: 1
+
+ * - 元素
+ - 描述
+ * - Bucket
+ - | 新创建对象所在的桶
+ | 类型:字符串
+ | 父节点:CompleteMultipartUploadResult
+ * - CompleteMultipartUploadResult
+ - | 响应容器元素
+ | 类型:容器
+ | 子节点:Location,Bucket,Key,ETag
+ * - ETag
+ - | 新创建的对象的Entity Tag
+ | 类型:字符串
+ | 父节点:CompleteMultipartUploadResult
+ * - Key
+ - | 新创建对象的Key
+ | 类型:字符串
+ | 父节点:CompleteMultipartUploadResult
+ * - Location
+ - | 新创建的这个对象的资源定位URL
+ | 类型:字符串
+ | 父节点:CompleteMultipartUploadResult
+
+返回值说明
+返回值为字典类型
+
+* x_nos_request_id(string) -- 唯一定位一个请求的ID号。
+* response(xml.etree.ElementTree) -- 包含返回信息的xml对象。
+
+
+Abort Multipart Upload
+::::::::::::::::::::::
+
+使用举例
+
+::
+
+ resp = client.abort_multipart_upload(
+ bucket="string",
+ key="string",
+ upload_id="string"
+ )
+
+参数说明
+
+* bucket(string) -- 桶名。
+* key(string) -- 对象名。
+* upload_id(string) -- 数据上传标识号。
+
+返回值举例
+
+::
+
+ {
+ "x_nos_request_id": "17b21e42ac11000001390ab891440240"
+ }
+
+返回值说明
+返回值为字典类型
+
+* x_nos_request_id(string) -- 唯一定位一个请求的ID号。
+
+
+List Parts
+::::::::::
+
+使用举例
+
+::
+
+ resp = client.list_parts(
+ bucket="string",
+ key="string",
+ upload_id="string",
+ **kwargs
+ )
+
+参数说明
+
+* bucket(string) -- 桶名。
+* key(string) -- 对象名。
+* upload_id(string) -- 数据上传标识号。
+* kwargs -- 其他可选参数,如下。
+ * limit(integer) -- 限制响应中返回的记录个数。取值范围:0-1000,默认1000。
+ * part_number_marker(string) -- 分块号的界限,只有更大的分块号会被列出来。
+
+返回值举例
+
+::
+
+ {
+ "x_nos_request_id": "17b21e42ac11000001390ab891440240",
+ "response": xml.etree.ElementTree() # xml.etree.ElementTree类型对象
+ }
+
+返回值的`response`的字符形式可能如下:
+
+::
+
+
+
+ example-Bucket
+ example-Object
+ 23r54i252358235332523f23
+
+ 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a
+ someName
+
+ STANDARD
+ 1
+ 3
+ 2
+ true
+
+ 2
+ 2010-11-10T20:48:34.000Z
+ "7778aef83f66abc1fa1e8477f296d394"
+ 10485760
+
+
+ 3
+ 2010-11-10T20:48:33.000Z
+ "aaaa18db4cc2f85cedef654fccc4a4x8"
+ 10485760
+
+
+
+*注意:下列各项通过xml.etree.ElementTree的成员函数获取具体值时,得到的均为字符串;目前标注的类型为原类型名称,需自行转换。*
+
+.. list-table::
+ :widths: 10 30
+ :header-rows: 1
+
+ * - 元素
+ - 描述
+ * - ListPartsResult
+ - | 列出已上传块信息
+ | 类型:容器
+ | 子节点:Bucket、Key、UploadId、Owner、StorageClass、PartNumberMarker、NextPartNumberMarker、MaxParts, IsTruncated、Part
+ * - Bucket
+ - | 桶的名称
+ | 类型: String
+ | 父节点: ListPartsResult
+ * - Key
+ - | 对象的Key
+ | 类型: String
+ | 父节点: ListPartsResult
+ * - UploadId
+ - | 分块上传操作的ID
+ | 类型: String
+ | 父节点: ListPartsResult
+ * - ID
+ - | 对象拥有者的ID
+ | 类型: String
+ | 父节点: Owner
+ * - DisplayName
+ - | 对象的拥有者.
+ | 类型: String
+ | 父节点: Owner
+ * - Owner
+ - | 桶拥有者的信息
+ | 子节点:ID, DisplayName
+ | 类型: 容器
+ | 父节点: ListPartsResult
+ * - StorageClass
+ - | 存储级别.
+ | 类型: String
+ | 父节点: ListPartsResult
+ * - PartNumberMarker
+ - | 上次List操作后的Part number
+ | 类型: Integer
+ | 父节点: ListPartsResult
+ * - NextPartNumberMarker
+ - | 作为后续List操作的part-number-marker
+ | 类型: Integer
+ | 父节点: ListPartsResult
+ * - MaxParts
+ - | 响应允许返回的的最大part数目
+ | 类型: Integer
+ | 父节点: ListPartsResult
+ * - IsTruncated
+ - | 是否截断,如果因为设置了limit导致不是所有的数据集都返回了,则该值设置为true
+ | 类型: Boolean
+ | 父节点: ListPartsResult
+ * - Part
+ - | 列出相关part信息
+ | 子节点:PartNumber, LastModified, ETag, Size
+ | 类型: String
+ | 父节点: ListPartsResult
+ * - PartNumber
+ - | 识别特定part的一串数字
+ | 类型: Integer
+ | 父节点: Part
+ * - LastModified
+ - | 该part上传的时间
+ | 类型: Date
+ | 父节点: Part
+ * - ETag
+ - | 当该part被上传时返回
+ | 类型: String
+ | 父节点: Part
+ * - Size
+ - | 已上传的 part数据的大小.
+ | 类型: Integer
+ | 父节点: Part
+
+返回值说明
+返回值为字典类型
+
+* x_nos_request_id(string) -- 唯一定位一个请求的ID号。
+* response(xml.etree.ElementTree) -- 包含返回信息的xml对象。
+
+
+List Multipart Uploads
+::::::::::::::::::::::
+
+使用举例:
+
+::
+
+ resp = client.list_multipart_uploads(
+ bucket="string",
+ **kwargs
+ )
+
+参数说明
+
+* bucket(string) -- 桶名。
+* kwargs -- 其他可选参数,如下。
+ * limit(integer) -- 限制响应中返回的记录个数。取值范围:0-1000,默认1000。
+ * key_marker(string) -- 指定某一uploads key,只有大于该key-marker的才会被列出。
+
+返回值举例
+
+::
+
+ {
+ "x_nos_request_id": "17b21e42ac11000001390ab891440240",
+ "response": xml.etree.ElementTree() # xml.etree.ElementTree类型对象
+ }
+
+返回值的`response`的字符形式可能如下:
+
+::
+
+
+
+ Bucket
+ my-movie.m2ts
+
+ my-divisor
+ XMgbGlrZSBlbHZpbmcncyBub3QgaGF2aW5nIG11Y2ggbHVjaw
+
+ 75aa57f09aa0c8caeab4f8c24e99d10f8e7faeebf76c078efc7c6caea54ba06a
+ OwnerDisplayName
+
+ STANDARD
+
+
+ my-movie.m2ts
+ VXBsb2FkIElEIGZvciBlbHZpbcyBteS1tb3ZpZS5tMnRzIHVwbG9hZA
+
+ b1d16700c70b0b05597d7acd6a3f92be
+ OwnerDisplayName
+
+ STANDARD
+
+
+
+*注意:下列各项通过xml.etree.ElementTree的成员函数获取具体值时,得到的均为字符串;目前标注的类型为原类型名称,需自行转换。*
+
+.. list-table::
+ :widths: 10 30
+ :header-rows: 1
+
+ * - 元素
+ - 描述
+ * - ListMultipartUploadsResult
+ - | 响应容器元素
+ | 类型:容器
+ | 子节点:Bucket,KeyMarker,Upload,NextKeyMarker, owner
+ * - Bucket
+ - | 对象的桶
+ | 类型:字符串
+ | 父节点:ListMultipartUploadsResult
+ * - NextKeyMarker
+ - | 作为后续查询的key-marker
+ | 类型:String
+ | 父节点:ListMultipartUploadsResult
+ * - IsTruncated
+ - | 是否截断,如果因为设置了limit导致不是所有的数据集都返回了,则该值设置为true
+ | 类型:Boolean
+ | 父节点: ListMultipartUploadsResult
+ * - Upload
+ - | 类型:容器
+ | 子节点:Key,UploadId
+ | 父节点:ListMultipartUploadsResult
+ * - Key
+ - | 对象的Key
+ | 类型:字符串
+ | 父节点:Upload
+ * - UploadId
+ - | 分块上传操作的ID
+ | 类型String
+ | 父节点:Upload
+ * - ID
+ - | 对象拥有者的ID
+ | 类型: String
+ | 父节点: Owner
+ * - DisplayName
+ - | 对象的拥有者
+ | 类型: String
+ | 父节点: Owner
+ * - Owner
+ - | 桶拥有者的信息
+ | 类型:容器
+ | 子节点:DisplayName|ID
+ | 父节点:Upload
+ * - StorageClass
+ - | 存储级别
+ | 类型: String
+ | 父节点: Upload
+ * - Initiated
+ - | 该分块上传操作被初始化的时间
+ | 类型:Date
+ | 父节点: Upload
+ * - ListMultipartUploadsResult.Prefix
+ - | 当请求中包含了prefix参数时,响应中会填充这一prefix
+ | 类型:String
+ | 父节点: ListMultipartUploadsResult
+
+返回值说明
+返回值为字典类型
+
+* x_nos_request_id(string) -- 唯一定位一个请求的ID号。
+* response(xml.etree.ElementTree) -- 包含返回信息的xml对象。
diff --git a/nos/__init__.py b/nos/__init__.py
new file mode 100644
index 0000000..20101ec
--- /dev/null
+++ b/nos/__init__.py
@@ -0,0 +1,19 @@
+# -*- coding:utf8 -*-
+
+from __future__ import absolute_import
+
+from .client import Client
+
+import sys
+
+__all__ = ["Client", "transport", "serializer", "connection", "exceptions"]
+__version__ = "1.0.0"
+
+
+if (2, 7) <= sys.version_info < (3, 2):
+ # On Python 2.7 and Python3 < 3.2, install no-op handler to silence
+ # `No handlers could be found for logger "nos"` message per
+ #
+ import logging
+ logger = logging.getLogger('nos')
+ logger.addHandler(logging.NullHandler())
diff --git a/nos/client/__init__.py b/nos/client/__init__.py
new file mode 100644
index 0000000..b9fe4b9
--- /dev/null
+++ b/nos/client/__init__.py
@@ -0,0 +1,7 @@
+# -*- coding:utf8 -*-
+
+from __future__ import unicode_literals
+
+from .nos_client import Client
+
+__all__ = ['Client']
diff --git a/nos/client/auth.py b/nos/client/auth.py
new file mode 100644
index 0000000..2d57f83
--- /dev/null
+++ b/nos/client/auth.py
@@ -0,0 +1,166 @@
+# -*- coding:utf8 -*-
+
+import base64
+import hashlib
+import hmac
+import time
+import urllib2
+import copy
+from .utils import (HTTP_HEADER, NOS_HEADER_PREFIX, TIME_CST_FORMAT,
+ CHUNK_SIZE, SUB_RESOURCE)
+
+
+class RequestMetaData(object):
+ """
+ Used to generate authorization header and request url
+ """
+ def __init__(self, access_key_id, access_key_secret, method,
+ bucket=None, key=None, end_point='nos.netease.com',
+ params={}, body=None, headers={}):
+ self.access_key_id = access_key_id
+ self.access_key_secret = access_key_secret
+ self.method = method
+ self.bucket = bucket
+ self.key = key
+ self.end_point = end_point
+ self.params = params
+ self.headers = copy.deepcopy(headers)
+ self.body = body
+ self.url = ''
+
+ self._complete_headers()
+ self._complete_url()
+
+ def get_url(self):
+ return self.url
+
+ def get_headers(self):
+ return self.headers
+
+ def _complete_headers(self):
+ # init date header
+ self.headers[HTTP_HEADER.DATE] = time.strftime(
+ TIME_CST_FORMAT, time.gmtime(time.time() + 8 * 3600)
+ )
+
+ # init content-md5 header
+ if self.body is not None:
+ md5 = hashlib.md5()
+ if isinstance(self.body, file):
+ offset = self.body.tell()
+ while True:
+ data = self.body.read(CHUNK_SIZE)
+ if not data:
+ break
+ md5.update(data)
+ md5sum = md5.hexdigest()
+ self.headers[HTTP_HEADER.CONTENT_MD5] = md5sum
+ self.body.seek(offset, 0)
+ else:
+ md5.update(self.body)
+ md5sum = md5.hexdigest()
+ self.headers[HTTP_HEADER.CONTENT_MD5] = md5sum
+
+ # init authorization header
+ if None not in (self.access_key_id, self.access_key_secret):
+ str_to_sign = self._get_string_to_sign()
+ hmac_sha1 = hmac.new(str(self.access_key_secret),
+ str_to_sign, hashlib.sha256)
+ b64_hmac_sha1 = base64.encodestring(hmac_sha1.digest()).strip()
+ authorization_string = b64_hmac_sha1.rstrip('\n')
+
+ self.headers[HTTP_HEADER.AUTHORIZATION] = 'NOS %s:%s' % (
+ self.access_key_id, authorization_string
+ )
+
+ def _complete_url(self):
+ """
+ build the url with query string
+ :return: url with query string
+ """
+ if self.bucket is None:
+ self.url = 'http://%s/' % self.end_point
+ else:
+ self.url = 'http://%s.%s/' % (self.bucket, self.end_point)
+
+ if self.key is not None:
+ self.url += urllib2.quote(self.key.strip('/'), '*')
+
+ if not self.params:
+ return
+
+ pairs = []
+ for k, v in self.params.iteritems():
+ piece = k
+ if v is not None:
+ piece += "=%s" % urllib2.quote(str(v), '*')
+ pairs.append(piece)
+ query_string = '&'.join(pairs)
+ self.url += ("?" + query_string)
+
+ def _get_string_to_sign(self):
+ """
+ Generate string which should be signed and setted in header while
+ sending request
+ @rtype: string
+ @return: canonical string for netease storage service
+ """
+ headers = dict([(k.lower(), str(v).strip())
+ for k, v in self.headers.iteritems()])
+
+ meta_headers = dict([(k, v) for k, v in headers.iteritems()
+ if k.startswith(NOS_HEADER_PREFIX)])
+
+ content_type = headers.get(HTTP_HEADER.CONTENT_TYPE.lower(), '')
+ content_md5 = headers.get(HTTP_HEADER.CONTENT_MD5.lower(), '')
+ date = headers.get(HTTP_HEADER.DATE.lower(), '')
+ expires = headers.get(HTTP_HEADER.EXPIRES.lower(), '')
+
+ # compute string to sign
+ str_to_sign = '%s\n%s\n%s\n%s\n' % (
+ self.method,
+ content_md5,
+ content_type,
+ expires or date
+ )
+
+ sorted_meta_headers = meta_headers.keys()
+ sorted_meta_headers.sort()
+
+ for meta_header in sorted_meta_headers:
+ str_to_sign += '%s:%s\n' % (meta_header, meta_headers[meta_header])
+
+ str_to_sign += "%s" % (self._get_canonicalized_resource())
+ return str_to_sign
+
+ def _get_canonicalized_resource(self):
+ """
+ get canoicalized resource /bucket/obj?upload
+ """
+ # append the root path
+ buf = '/'
+ # append the bucket if it exists
+ if self.bucket is not None:
+ buf += "%s/" % self.bucket
+
+ # add the key. even if it doesn't exist, add the slash
+ if self.key is not None:
+ buf += urllib2.quote(self.key.strip('/'), '*')
+
+ # handle sub source in special query string arguments
+ if self.params:
+ buf += "?"
+ pairs = []
+ for k, v in self.params.iteritems():
+ if k not in SUB_RESOURCE:
+ continue
+ piece = k
+ if v is not None:
+ piece += "=%s" % urllib2.quote(str(v), '*')
+ pairs.append(piece)
+
+ buf += '&'.join(pairs)
+ if len(pairs) == 0:
+ return buf.rstrip('?')
+
+ return buf
diff --git a/nos/client/nos_client.py b/nos/client/nos_client.py
new file mode 100644
index 0000000..8f98c1f
--- /dev/null
+++ b/nos/client/nos_client.py
@@ -0,0 +1,636 @@
+# -*- coding:utf8 -*-
+
+from .utils import (HTTP_METHOD, HTTP_HEADER, RETURN_KEY)
+from ..exceptions import (XmlParseError, MultiObjectDeleteException,
+ InvalidBucketName, InvalidObjectName)
+from ..transport import Transport
+from ..compat import ET
+
+import cgi
+import urllib2
+
+
+def parse_xml(status, headers, body):
+ try:
+ data = body.read()
+ return ET.fromstring(data)
+ except Exception as e:
+ raise XmlParseError(
+ '\n%s\nstatus: %s\nheaders: %s\nbody: \n%s\n' % (
+ str(e), status, headers, data
+ ),
+ e
+ )
+
+
+class Client(object):
+ """
+ The client for accessing the Netease NOS web service.
+
+ You can use it as follows:
+
+
+ import nos
+
+ access_key_id = 'xxxxxxxxx'
+ access_key_secret = 'xxxxxxxxx'
+ bucket = 'xxxx'
+ key = 'xxxx'
+
+ client = nos.Client(
+ access_key_id=access_key_id,
+ access_key_secret=access_key_secret
+ )
+ try:
+ resp = client.get_object(
+ bucket=bucket,
+ key=key
+ )
+ except nos.exceptions.ServiceException as e:
+ print (
+ 'ServiceException: %s\n'
+ 'status_code: %s\n'
+ 'error_type: %s\n'
+ 'error_code: %s\n'
+ 'request_id: %s\n'
+ 'message: %s\n'
+ ) % (
+ e,
+ e.status_code,
+ e.error_type,
+ e.error_code,
+ e.request_id,
+ e.message
+ )
+ except nos.exceptions.ClientException as e:
+ print (
+ 'ClientException: %s\n'
+ 'message: %s\n'
+ ) % (
+ e,
+ e.message
+ )
+
+ """
+ def __init__(self, access_key_id=None, access_key_secret=None,
+ transport_class=Transport, **kwargs):
+ """
+ If the bucket is public-read, the parameter of `access_key_id` or
+ `access_key_secret` can be set to `None`, else the parameter should be
+ given by string.
+
+ :arg access_key_id(string): The access key ID. `None` is set by default.
+ :arg access_key_secret(string): The secret access key. `None` is set by
+ default.
+ :arg transport_class(class): The class will be used for
+ transport. `nos.transport.Transport` is set by default.
+ :arg kwargs: Other optional parameters.
+ :opt_arg end_point(string): The point which the object will
+ transport to. `nos.netease.com` is set by default.
+ :opt_arg num_pools(integer): Number of connection pools to cache
+ before discarding the leastrecently used pool. `16` is set by
+ default.
+ :opt_arg timeout(integer): Timeout while connecting to server.
+ :opt_arg max_retries(integer): The count of retry when get http 5XX.
+ `2` is set by default.
+ """
+ self.transport = transport_class(
+ access_key_id=access_key_id,
+ access_key_secret=access_key_secret,
+ **kwargs
+ )
+
+ def delete_object(self, bucket, key):
+ """
+ Delete the specified object in the specified bucket.
+
+ :arg bucket(string): The name of the Nos bucket.
+ :arg key(string): The name of the Nos object.
+ :ret return_value(dict): The response of NOS server.
+ :element x_nos_request_id(string): ID which can point out the
+ request.
+ :raise ClientException: If any errors are occured in the client point.
+ :raise ServiceException: If any errors occurred in NOS server point.
+ """
+ status, headers, body = self.transport.perform_request(
+ HTTP_METHOD.DELETE, bucket, key
+ )
+ return {
+ RETURN_KEY.X_NOS_REQUEST_ID: headers.get(
+ HTTP_HEADER.X_NOS_REQUEST_ID, ''
+ )
+ }
+
+ def delete_objects(self, bucket, keys, quiet=False):
+ """
+ Delete the objects in the specified bucket.
+
+ :arg bucket(string): The name of the Nos bucket.
+ :arg keys(list): The list of the Nos object which can be deleted.
+ :arg quiet(boolean): Is quiet mode enabled, false by default.
+ :ret return_value(dict): The response of NOS server.
+ :element x_nos_request_id(string): ID which can point out the
+ request.
+ :element response(ElementTree): The response body of NOS server.
+ :raise ClientException: If any errors are occured in the client point.
+ :raise ServiceException: If any errors occurred in NOS server point.
+ """
+ body = self.__get_delete_objects_body(keys, quiet)
+ params = {'delete': None}
+ status, headers, body = self.transport.perform_request(
+ HTTP_METHOD.POST, bucket, params=params, body=body
+ )
+
+ ret_xml = parse_xml(status, headers, body)
+ errors = [
+ {
+ 'key': i.findtext('Key', ''),
+ 'code': i.findtext('Code', ''),
+ 'message': i.findtext('Message', '')
+ }
+ for i in ret_xml.findall('Error')
+ ]
+ if errors:
+ raise MultiObjectDeleteException(errors)
+
+ return {
+ RETURN_KEY.X_NOS_REQUEST_ID: headers.get(
+ HTTP_HEADER.X_NOS_REQUEST_ID, ''
+ ),
+ RETURN_KEY.RESPONSE: ret_xml
+ }
+
+ def get_object(self, bucket, key, **kwargs):
+ """
+ Get the object stored in NOS under the specified bucket and key.
+
+ :arg bucket(string): The name of the Nos bucket.
+ :arg key(string): The name of the Nos object.
+ :arg kwargs: Other optional parameters.
+ :opt_arg range(string): The Range header of request.
+ :ret return_value(dict): The response of NOS server.
+ :element x_nos_request_id(string): ID which can point out the
+ request.
+ :element content_length(integer): The Content-Length header of
+ response.
+ :element content_range(string): The Content-Range header of
+ response.
+ :element content_type(string): The Content-Type header of response.
+ :element etag(string): The ETag header of response.
+ :element body(StreamingBody): The response body of NOS server, which
+ can use functions such as read(), readline().
+ :raise ClientException: If any errors are occured in the client point.
+ :raise ServiceException: If any errors occurred in NOS server point.
+ """
+ headers = {}
+ if 'range' in kwargs:
+ headers[HTTP_HEADER.RANGE] = kwargs['range']
+
+ status, headers, body = self.transport.perform_request(
+ HTTP_METHOD.GET, bucket, key, headers=headers
+ )
+ return {
+ RETURN_KEY.X_NOS_REQUEST_ID: headers.get(
+ HTTP_HEADER.X_NOS_REQUEST_ID, ''
+ ),
+ RETURN_KEY.CONTENT_LENGTH: int(
+ headers.get(HTTP_HEADER.CONTENT_LENGTH, 0)
+ ),
+ RETURN_KEY.CONTENT_RANGE: headers.get(
+ HTTP_HEADER.CONTENT_RANGE, ''
+ ),
+ RETURN_KEY.CONTENT_TYPE: headers.get(HTTP_HEADER.CONTENT_TYPE, ''),
+ RETURN_KEY.ETAG: headers.get(HTTP_HEADER.ETAG, ''),
+ RETURN_KEY.BODY: body
+ }
+
+ def head_object(self, bucket, key):
+ """
+ Get info of the object stored in NOS under the specified bucket and key.
+
+ :arg bucket(string): The name of the Nos bucket.
+ :arg key(string): The name of the Nos object.
+ :ret return_value(dict): The response of NOS server.
+ :element x_nos_request_id(string): ID which can point out the
+ request.
+ :element content_length(integer): The Content-Length header of
+ response.
+ :element last_modified(string): The Last-Modified header of
+ response.
+ :element content_type(string): The Content-Type header of response.
+ :element etag(string): The ETag header of response.
+ :raise ClientException: If any errors are occured in the client point.
+ :raise ServiceException: If any errors occurred in NOS server point.
+ """
+ status, headers, body = self.transport.perform_request(
+ HTTP_METHOD.HEAD, bucket, key
+ )
+ return {
+ RETURN_KEY.X_NOS_REQUEST_ID: headers.get(
+ HTTP_HEADER.X_NOS_REQUEST_ID, ''
+ ),
+ RETURN_KEY.CONTENT_LENGTH: int(
+ headers.get(HTTP_HEADER.CONTENT_LENGTH, 0)
+ ),
+ RETURN_KEY.LAST_MODIFIED: headers.get(
+ HTTP_HEADER.LAST_MODIFIED, ''
+ ),
+ RETURN_KEY.CONTENT_TYPE: headers.get(HTTP_HEADER.CONTENT_TYPE, ''),
+ RETURN_KEY.ETAG: headers.get(HTTP_HEADER.ETAG, '')
+ }
+
+ def list_objects(self, bucket, **kwargs):
+ """
+ Return a list of summary information about the objects in the specified
+ buckets.
+
+ :arg bucket(string): The name of the Nos bucket.
+ :arg kwargs: Other optional parameters.
+ :opt_arg delimiter(string): Optional parameter that causes keys
+ that contain the same string between the prefix and the first
+ occurrence of the delimiter to be rolled up into a single result
+ element. These rolled-up keys are not returned elsewhere in the
+ response. The most commonly used delimiter is "/", which
+ simulates a hierarchical organization similar to a file system
+ directory structure.
+ :opt_arg marker(string): Optional parameter indicating where in the
+ bucket to begin listing. The list will only include keys that
+ occur lexicographically after the marker.
+ :opt_arg limit(integer): Optional parameter indicating the maximum
+ number of keys to include in the response. Nos might return fewer
+ than this, but will not return more. Even if maxKeys is not
+ specified, Nos will limit the number of results in the response.
+ :opt_arg prefix(string): Optional parameter restricting the response
+ to keys which begin with the specified prefix. You can use
+ prefixes to separate a bucket into different sets of keys in a way
+ similar to how a file system uses folders.
+ :ret return_value(dict): The response of NOS server.
+ :element x_nos_request_id(string): ID which can point out the
+ request.
+ :element response(ElementTree): The response body of NOS server.
+ :raise ClientException: If any errors are occured in the client point.
+ :raise ServiceException: If any errors occurred in NOS server point.
+ """
+ keys = set(['delimiter', 'marker', 'limit', 'prefix'])
+ params = {}
+ for k, v in kwargs.iteritems():
+ if k in keys:
+ params[k] = v
+
+ limit = params.pop('limit', None)
+ if limit is not None:
+ params['max-keys'] = str(limit)
+
+ status, headers, body = self.transport.perform_request(
+ HTTP_METHOD.GET, bucket, params=params
+ )
+ return {
+ RETURN_KEY.X_NOS_REQUEST_ID: headers.get(
+ HTTP_HEADER.X_NOS_REQUEST_ID, ''
+ ),
+ RETURN_KEY.RESPONSE: parse_xml(status, headers, body)
+ }
+
+ def put_object(self, bucket, key, body, **kwargs):
+ """
+ Upload the specified object to NOS under the specified bucket and key
+ name.
+
+ :arg bucket(string): The name of the Nos bucket.
+ :arg key(string): The name of the Nos object.
+ :arg body(serializable_object): The content of the Nos object, which can
+ be file, dict, list, string or any other serializable object.
+ :arg kwargs: Other optional parameters.
+ :opt_arg meta_data(dict): Represents the object metadata that is
+ stored with Nos. This includes custom user-supplied metadata and
+ the key should start with 'x-nos-meta-'.
+ :ret return_value(dict): The response of NOS server.
+ :element x_nos_request_id(string): ID which can point out the
+ request.
+ :element etag(string): The ETag header of response.
+ :raise ClientException: If any errors are occured in the client point.
+ :raise ServiceException: If any errors occurred in NOS server point.
+ """
+ headers = {}
+ for k, v in kwargs.get('meta_data', {}).iteritems():
+ headers[k] = v
+
+ status, headers, body = self.transport.perform_request(
+ HTTP_METHOD.PUT, bucket, key, body=body, headers=headers
+ )
+ return {
+ RETURN_KEY.X_NOS_REQUEST_ID: headers.get(
+ HTTP_HEADER.X_NOS_REQUEST_ID, ''
+ ),
+ RETURN_KEY.ETAG: headers.get(HTTP_HEADER.ETAG, '')
+ }
+
+ def copy_object(self, src_bucket, src_key, dest_bucket, dest_key):
+ """
+ Copy a source object to a new destination in NOS.
+
+ :arg src_bucket(string): The name of the source bucket.
+ :arg src_key(string): The name of the source object.
+ :arg dest_bucket(string): The name of the destination bucket.
+ :arg dest_key(string): The name of the destination object.
+ :ret return_value(dict): The response of NOS server.
+ :element x_nos_request_id(string): ID which can point out the
+ request.
+ :raise ClientException: If any errors are occured in the client point.
+ :raise ServiceException: If any errors occurred in NOS server point.
+ """
+ src_bucket = src_bucket.encode('utf-8') \
+ if isinstance(src_bucket, unicode) else src_bucket
+ src_key = src_key.encode('utf-8') \
+ if isinstance(src_key, unicode) else src_key
+ if src_bucket is not None and src_bucket == '':
+ raise InvalidBucketName()
+ if src_key is not None and src_key == '':
+ raise InvalidObjectName()
+
+ headers = {}
+ headers[HTTP_HEADER.X_NOS_COPY_SOURCE] = '/%s/%s' % (
+ src_bucket, urllib2.quote(src_key.strip('/'), '*')
+ )
+
+ status, headers, body = self.transport.perform_request(
+ HTTP_METHOD.PUT, dest_bucket, dest_key, headers=headers
+ )
+ return {
+ RETURN_KEY.X_NOS_REQUEST_ID: headers.get(
+ HTTP_HEADER.X_NOS_REQUEST_ID, ''
+ )
+ }
+
+ def move_object(self, src_bucket, src_key, dest_bucket, dest_key):
+ """
+ Move a source object to a new destination in NOS.
+
+ :arg src_bucket(string): The name of the source bucket.
+ :arg src_key(string): The name of the source object.
+ :arg dest_bucket(string): The name of the destination bucket.
+ :arg dest_key(string): The name of the destination object.
+ :ret return_value(dict): The response of NOS server.
+ :element x_nos_request_id(string): ID which can point out the
+ request.
+ :raise ClientException: If any errors are occured in the client point.
+ :raise ServiceException: If any errors occurred in NOS server point.
+ """
+ src_bucket = src_bucket.encode('utf-8') \
+ if isinstance(src_bucket, unicode) else src_bucket
+ src_key = src_key.encode('utf-8') \
+ if isinstance(src_key, unicode) else src_key
+ if src_bucket is not None and src_bucket == '':
+ raise InvalidBucketName()
+ if src_key is not None and src_key == '':
+ raise InvalidObjectName()
+
+ headers = {}
+ headers[HTTP_HEADER.X_NOS_MOVE_SOURCE] = '/%s/%s' % (
+ src_bucket, urllib2.quote(src_key.strip('/'), '*')
+ )
+
+ status, headers, body = self.transport.perform_request(
+ HTTP_METHOD.PUT, dest_bucket, dest_key, headers=headers
+ )
+ return {
+ RETURN_KEY.X_NOS_REQUEST_ID: headers.get(
+ HTTP_HEADER.X_NOS_REQUEST_ID, ''
+ )
+ }
+
+ def create_multipart_upload(self, bucket, key, **kwargs):
+ """
+ Initiate a multipart upload and returns an response which contains an
+ upload ID. This upload ID associates all the parts in the specific
+ upload and is used in each of your subsequent requests. You also include
+ this upload ID in the final request to either complete, or abort the
+ multipart upload request.
+
+ :arg bucket(string): The name of the Nos bucket.
+ :arg key(string): The name of the Nos object.
+ :arg kwargs: Other optional parameters.
+ :opt_arg meta_data(dict): Represents the object metadata that is
+ stored with Nos. This includes custom user-supplied metadata and
+ the key should start with 'x-nos-meta-'.
+ :ret return_value(dict): The response of NOS server.
+ :element x_nos_request_id(string): ID which can point out the
+ request.
+ :element response(ElementTree): The response body of NOS server.
+ :raise ClientException: If any errors are occured in the client point.
+ :raise ServiceException: If any errors occurred in NOS server point.
+ """
+ headers = {}
+ for k, v in kwargs.get('meta_data', {}).iteritems():
+ headers[k] = v
+
+ params = {'uploads': None}
+ body = ''
+ status, headers, body = self.transport.perform_request(
+ HTTP_METHOD.POST, bucket, key, body=body,
+ params=params, headers=headers
+ )
+ return {
+ RETURN_KEY.X_NOS_REQUEST_ID: headers.get(
+ HTTP_HEADER.X_NOS_REQUEST_ID, ''
+ ),
+ RETURN_KEY.RESPONSE: parse_xml(status, headers, body)
+ }
+
+ def upload_part(self, bucket, key, part_num, upload_id, body):
+ """
+ Upload a part in a multipart upload. You must initiate a multipart
+ upload before you can upload any part.
+
+ :arg bucket(string): The name of the Nos bucket.
+ :arg key(string): The name of the Nos object.
+ :arg part_num(integer): The part number describing this part's position
+ relative to the other parts in the multipart upload. Part number must
+ be between 1 and 10,000 (inclusive).
+ :arg upload_id(string): The ID of an existing, initiated multipart
+ upload, with which this new part will be associated.
+ :arg body(serializable_object): The content of the Nos object, which can
+ be file, dict, list, string or any other serializable object.
+ :ret return_value(dict): The response of NOS server.
+ :element x_nos_request_id(string): ID which can point out the
+ request.
+ :element etag(string): The ETag header of response.
+ :raise ClientException: If any errors are occured in the client point.
+ :raise ServiceException: If any errors occurred in NOS server point.
+ """
+ params = {
+ 'partNumber': str(part_num),
+ 'uploadId': upload_id
+ }
+ status, headers, body = self.transport.perform_request(
+ HTTP_METHOD.PUT, bucket, key, body=body, params=params
+ )
+ return {
+ RETURN_KEY.X_NOS_REQUEST_ID: headers.get(
+ HTTP_HEADER.X_NOS_REQUEST_ID, ''
+ ),
+ RETURN_KEY.ETAG: headers.get(HTTP_HEADER.ETAG, '')
+ }
+
+ def complete_multipart_upload(self, bucket, key, upload_id, info, **kwargs):
+ """
+ Complete a multipart upload by assembling previously uploaded parts.
+
+ :arg bucket(string): The name of the Nos bucket.
+ :arg key(string): The name of the Nos object.
+ :arg upload_id(string): The ID of an existing, initiated multipart
+ upload, with which this new part will be associated.
+ :arg info(list): The list of part numbers and ETags to use when
+ completing the multipart upload.
+ :arg kwargs: Other optional parameters.
+ :opt_arg object_md5(string): MD5 of the whole object which is
+ multipart uploaded.
+ :ret return_value(dict): The response of NOS server.
+ :element x_nos_request_id(string): ID which can point out the
+ request.
+ :element response(ElementTree): The response body of NOS server.
+ :raise ClientException: If any errors are occured in the client point.
+ :raise ServiceException: If any errors occurred in NOS server point.
+ """
+ params = {'uploadId': upload_id}
+ headers = {}
+ if 'object_md5' in kwargs:
+ headers[HTTP_HEADER.X_NOS_OBJECT_MD5] = kwargs['object_md5']
+
+ parts_xml = []
+ part_xml = '%s%s'
+ for i in info:
+ parts_xml.append(part_xml % (i['part_num'], i['etag']))
+ body = ('%s' %
+ (''.join(parts_xml)))
+
+ status, headers, body = self.transport.perform_request(
+ HTTP_METHOD.POST, bucket, key, body=body,
+ params=params, headers=headers
+ )
+ return {
+ RETURN_KEY.X_NOS_REQUEST_ID: headers.get(
+ HTTP_HEADER.X_NOS_REQUEST_ID, ''
+ ),
+ RETURN_KEY.RESPONSE: parse_xml(status, headers, body)
+ }
+
+ def abort_multipart_upload(self, bucket, key, upload_id):
+ """
+ Abort a multipart upload. After a multipart upload is aborted, no
+ additional parts can be uploaded using that upload ID. The storage
+ consumed by any previously uploaded parts will be freed. However, if any
+ part uploads are currently in progress, those part uploads may or may
+ not succeed. As a result, it may be necessary to abort a given multipart
+ upload multiple times in order to completely free all storage consumed
+ by all parts.
+
+ :arg bucket(string): The name of the Nos bucket.
+ :arg key(string): The name of the Nos object.
+ :arg upload_id(string): The ID of an existing, initiated multipart
+ upload, with which this new part will be associated.
+ :ret return_value(dict): The response of NOS server.
+ :element x_nos_request_id(string): ID which can point out the
+ request.
+ :raise ClientException: If any errors are occured in the client point.
+ :raise ServiceException: If any errors occurred in NOS server point.
+ """
+ params = {'uploadId': upload_id}
+ status, headers, body = self.transport.perform_request(
+ HTTP_METHOD.DELETE, bucket, key, params=params
+ )
+ return {
+ RETURN_KEY.X_NOS_REQUEST_ID: headers.get(
+ HTTP_HEADER.X_NOS_REQUEST_ID, ''
+ )
+ }
+
+ def list_parts(self, bucket, key, upload_id, **kwargs):
+ """
+ List the parts that have been uploaded for a specific multipart upload.
+
+ This method must include the upload ID, returned by the
+ `create_multipart_upload` operation. This request returns a maximum of
+ 1000 uploaded parts by default. You can restrict the number of parts
+ returned by specifying the limit parameter.
+
+ :arg bucket(string): The name of the Nos bucket.
+ :arg key(string): The name of the Nos object.
+ :arg upload_id(string): The ID of an existing, initiated multipart
+ upload, with which this new part will be associated.
+ :arg kwargs: Other optional parameters.
+ :opt_arg limit(integer): The optional maximum number of parts to be
+ returned in the part listing.
+ :opt_arg part_number_marker(string): The optional part number marker
+ indicating where in the results to being listing parts.
+ :ret return_value(dict): The response of NOS server.
+ :element x_nos_request_id(string): ID which can point out the
+ request.
+ :element response(ElementTree): The response body of NOS server.
+ :raise ClientException: If any errors are occured in the client point.
+ :raise ServiceException: If any errors occurred in NOS server point.
+ """
+ params = {'uploadId': upload_id}
+ if 'limit' in kwargs:
+ params['max-parts'] = str(kwargs['limit'])
+ if 'part_number_marker' in kwargs:
+ params['part-number-marker'] = kwargs['part_number_marker']
+
+ status, headers, body = self.transport.perform_request(
+ HTTP_METHOD.GET, bucket, key, params=params
+ )
+ return {
+ RETURN_KEY.X_NOS_REQUEST_ID: headers.get(
+ HTTP_HEADER.X_NOS_REQUEST_ID, ''
+ ),
+ RETURN_KEY.RESPONSE: parse_xml(status, headers, body)
+ }
+
+ def list_multipart_uploads(self, bucket, **kwargs):
+ """
+ List in-progress multipart uploads. An in-progress multipart upload is
+ a multipart upload that has been initiated, using the
+ `create_multipart_upload` request, but has not yet been completed or
+ aborted.
+
+ This operation returns at most 1,000 multipart uploads in the response
+ by default. The number of multipart uploads can be further limited using
+ the limit parameter.
+
+ :arg bucket(string): The name of the Nos bucket.
+ :arg kwargs: Other optional parameters.
+ :opt_arg limit(integer): The optional maximum number of uploads to
+ return.
+ :opt_arg key_marker(string): The optional key marker indicating
+ where in the results to begin listing.
+ :ret return_value(dict): The response of NOS server.
+ :element x_nos_request_id(string): ID which can point out the
+ request.
+ :element response(ElementTree): The response body of NOS server.
+ :raise ClientException: If any errors are occured in the client point.
+ :raise ServiceException: If any errors occurred in NOS server point.
+ """
+ params = {'uploads': None}
+ if 'limit' in kwargs:
+ params['max-uploads'] = str(kwargs['limit'])
+ if 'key_marker' in kwargs:
+ params['key-marker'] = kwargs['key_marker']
+
+ status, headers, body = self.transport.perform_request(
+ HTTP_METHOD.GET, bucket, params=params
+ )
+ return {
+ RETURN_KEY.X_NOS_REQUEST_ID: headers.get(
+ HTTP_HEADER.X_NOS_REQUEST_ID, ''
+ ),
+ RETURN_KEY.RESPONSE: parse_xml(status, headers, body)
+ }
+
+ def __get_delete_objects_body(self, objects, quiet):
+ objs = ['' % (cgi.escape(i))
+ for i in objects]
+ if not objs:
+ objs = ['']
+ return '%s%s' % (
+ str(quiet).lower(), ''.join(objs)
+ )
diff --git a/nos/client/utils.py b/nos/client/utils.py
new file mode 100644
index 0000000..464ec80
--- /dev/null
+++ b/nos/client/utils.py
@@ -0,0 +1,62 @@
+# -*- coding:utf8 -*-
+
+
+def enum(**enums):
+ return type('Enum', (), enums)
+
+HTTP_METHOD = enum(
+ HEAD='HEAD',
+ GET='GET',
+ POST='POST',
+ PUT='PUT',
+ DELETE='DELETE'
+)
+
+HTTP_HEADER = enum(
+ AUTHORIZATION='Authorization',
+ CONTENT_LENGTH='Content-Length',
+ CONTENT_TYPE='Content-Type',
+ CONTENT_MD5='Content-MD5',
+ CONTENT_RANGE='Content-Range',
+ RANGE='Range',
+ LAST_MODIFIED='Last-Modified',
+ ETAG='ETag',
+ DATE='Date',
+ EXPIRES='Expires',
+ X_NOS_REQUEST_ID='x-nos-request-id',
+ X_NOS_COPY_SOURCE='x-nos-copy-source',
+ X_NOS_MOVE_SOURCE='x-nos-move-source',
+ X_NOS_OBJECT_MD5='x-nos-Object-md5'
+)
+
+RETURN_KEY = enum(
+ X_NOS_REQUEST_ID='x_nos_request_id',
+ RESPONSE='response',
+ ETAG='etag',
+ CONTENT_LENGTH='content_length',
+ CONTENT_RANGE='content_range',
+ CONTENT_TYPE='content_type',
+ LAST_MODIFIED='last_modified',
+ BODY='body'
+)
+
+SUB_RESOURCE = set([
+ 'acl',
+ 'location',
+ 'versioning',
+ 'versions',
+ 'versionId',
+ 'uploadId',
+ 'uploads',
+ 'partNumber',
+ 'delete',
+ 'deduplication',
+ 'crop',
+ 'resize',
+])
+
+CHUNK_SIZE = 65536
+MAX_OBJECT_SIZE = 100 * 1024 * 1024
+TIME_CST_FORMAT = '%a, %d %b %Y %H:%M:%S Asia/Shanghai'
+METADATA_PREFIX = 'x-nos-meta-'
+NOS_HEADER_PREFIX = 'x-nos-'
diff --git a/nos/compat.py b/nos/compat.py
new file mode 100644
index 0000000..c1adcee
--- /dev/null
+++ b/nos/compat.py
@@ -0,0 +1,20 @@
+# -*- coding:utf8 -*-
+
+import sys
+
+PY2 = sys.version_info[0] == 2
+
+if PY2:
+ string_types = basestring,
+ from urllib import quote_plus, urlencode
+ from urlparse import urlparse
+ from itertools import imap as map
+else:
+ string_types = str, bytes
+ from urllib.parse import quote_plus, urlencode, urlparse
+ map = map
+
+try:
+ import xml.etree.cElementTree as ET
+except ImportError:
+ import xml.etree.ElementTree as ET
diff --git a/nos/connection.py b/nos/connection.py
new file mode 100644
index 0000000..26bb778
--- /dev/null
+++ b/nos/connection.py
@@ -0,0 +1,60 @@
+# -*- coding:utf8 -*-
+
+import urllib3
+from urllib3.exceptions import ReadTimeoutError
+from .exceptions import (ConnectionError, ConnectionTimeout,
+ ServiceException, HTTP_EXCEPTIONS)
+from .compat import ET
+
+__all__ = ["Urllib3HttpConnection"]
+
+
+class Urllib3HttpConnection(object):
+ def __init__(self, num_pools=16, **kwargs):
+ self.pool = urllib3.PoolManager(num_pools=num_pools)
+
+ def perform_request(self, method, url, body=None, headers={}, timeout=None,
+ preload_content=False):
+ try:
+ kw = {'preload_content': preload_content}
+ if timeout:
+ kw['timeout'] = timeout
+
+ # in python2 we need to make sure the url and method are not
+ # unicode. Otherwise the body will be decoded into unicode too and
+ # that will fail.
+ if not isinstance(url, str):
+ url = url.encode('utf-8')
+ if not isinstance(method, str):
+ method = method.encode('utf-8')
+
+ response = self.pool.urlopen(method, url, body=body, retries=False,
+ headers=headers, **kw)
+ except ReadTimeoutError as e:
+ raise ConnectionTimeout(str(e), e)
+ except Exception as e:
+ raise ConnectionError(str(e), e)
+
+ if not (200 <= response.status < 300):
+ self._raise_error(response)
+ return response.status, response.getheaders(), response
+
+ def _raise_error(self, response):
+ """ Locate appropriate exception and raise it. """
+ status_code = response.status
+ error_type = response.reason
+ request_id = response.getheader('x-nos-request-id', '')
+ raw_data = response.read()
+ error_code = ''
+ message = ''
+ try:
+ resp_info = ET.fromstring(raw_data)
+ error_code = resp_info.findtext('Code', '')
+ message = resp_info.findtext('Message', '')
+ except:
+ # we don't care what went wrong
+ pass
+
+ raise HTTP_EXCEPTIONS.get(status_code, ServiceException)(
+ status_code, error_type, error_code, request_id, message
+ )
diff --git a/nos/exceptions.py b/nos/exceptions.py
new file mode 100644
index 0000000..d1c443a
--- /dev/null
+++ b/nos/exceptions.py
@@ -0,0 +1,266 @@
+# -*- coding:utf8 -*-
+
+__all__ = [
+ "NOSException",
+ "ClientException",
+ "ServiceException",
+ "InvalidBucketName",
+ "InvalidObjectName",
+ "XmlParseError",
+ "SerializationError",
+ "ConnectionError",
+ "ConnectionTimeout",
+ "MultiObjectDeleteException",
+ "BadRequestError",
+ "ForbiddenError",
+ "NotFoundError",
+ "MethodNotAllowedError",
+ "ConflictError",
+ "LengthRequiredError",
+ "RequestedRangeNotSatisfiableError",
+ "InternalServerErrorError",
+ "NotImplementedError",
+ "ServiceUnavailableError"
+]
+
+
+class NOSException(Exception):
+ """
+ Base class for all exceptions raised by this package's operations.
+ """
+
+
+class ClientException(NOSException):
+ """
+ Exception raised when there was an exception while sdk client working.
+ """
+ @property
+ def error(self):
+ return self.args[0]
+
+ @property
+ def info(self):
+ return self.args[1]
+
+ @property
+ def message(self):
+ return '%s(%s) caused by: %s(%s)' % (
+ self.__class__.__name__,
+ self.error,
+ self.info.__class__.__name__,
+ self.info
+ )
+
+ def __str__(self):
+ return self.message
+
+
+class ServiceException(NOSException):
+ """
+ Exception raised when NOS returns a non-OK (>=400) HTTP status code.
+ """
+ @property
+ def status_code(self):
+ return self.args[0]
+
+ @property
+ def error_type(self):
+ return self.args[1]
+
+ @property
+ def error_code(self):
+ return self.args[2]
+
+ @property
+ def request_id(self):
+ return self.args[3]
+
+ @property
+ def message(self):
+ return self.args[4]
+
+ def __str__(self):
+ return '%s(%s, %s, %s, %s, %s)' % (
+ self.__class__.__name__,
+ self.status_code,
+ self.error_type,
+ self.error_code,
+ self.request_id,
+ self.message
+ )
+
+
+class InvalidBucketName(ClientException):
+ """
+ Exception raised when bucket name is invalid.
+ """
+ @property
+ def error(self):
+ pass
+
+ @property
+ def info(self):
+ pass
+
+ @property
+ def message(self):
+ return 'InvalidBucketName caused by: bucket name is empty.'
+
+ def __str__(self):
+ return self.message
+
+
+class InvalidObjectName(ClientException):
+ """
+ Exception raised when object name is invalid.
+ """
+ @property
+ def error(self):
+ pass
+
+ @property
+ def info(self):
+ pass
+
+ @property
+ def message(self):
+ return 'InvalidObjectName caused by: object name is empty.'
+
+ def __str__(self):
+ return self.message
+
+
+class FileOpenModeError(ClientException):
+ """
+ Exception raised when upload object is a file that opened without the mode
+ for binary files.
+ """
+ @property
+ def error(self):
+ pass
+
+ @property
+ def info(self):
+ pass
+
+ @property
+ def message(self):
+ return ('FileOpenModeError caused by: object is a file that opened '
+ 'without the mode for binary files.')
+
+ def __str__(self):
+ return self.message
+
+
+class XmlParseError(ClientException):
+ """
+ Error raised when there was an exception while parse xml.
+ """
+
+
+class SerializationError(ClientException):
+ """
+ Data passed in failed to serialize properly in the Serializer being
+ used.
+ """
+
+
+class ConnectionError(ClientException):
+ """
+ Error raised when there was an exception while talking to NOS server.
+ """
+
+
+class ConnectionTimeout(ConnectionError):
+ """ A network timeout. """
+
+
+class MultiObjectDeleteException(ServiceException):
+ """
+ Exception raised when there was an exception while delete objects.
+ """
+ @property
+ def status_code(self):
+ pass
+
+ @property
+ def error_type(self):
+ pass
+
+ @property
+ def error_code(self):
+ pass
+
+ @property
+ def request_id(self):
+ pass
+
+ @property
+ def message(self):
+ return ('MultiObjectDeleteException caused by: some objects delete '
+ 'unsuccessfully.')
+
+ @property
+ def errors(self):
+ return self.args[0]
+
+ def __str__(self):
+ return '%s %s' % (
+ self.message,
+ self.errors
+ )
+
+
+class BadRequestError(ServiceException):
+ """ Exception representing a 400 status code. """
+
+
+class ForbiddenError(ServiceException):
+ """ Exception representing a 403 status code. """
+
+
+class NotFoundError(ServiceException):
+ """ Exception representing a 404 status code. """
+
+
+class MethodNotAllowedError(ServiceException):
+ """ Exception representing a 405 status code. """
+
+
+class ConflictError(ServiceException):
+ """ Exception representing a 409 status code. """
+
+
+class LengthRequiredError(ServiceException):
+ """ Exception representing a 411 status code. """
+
+
+class RequestedRangeNotSatisfiableError(ServiceException):
+ """ Exception representing a 416 status code. """
+
+
+class InternalServerErrorError(ServiceException):
+ """ Exception representing a 500 status code. """
+
+
+class NotImplementedError(ServiceException):
+ """ Exception representing a 501 status code. """
+
+
+class ServiceUnavailableError(ServiceException):
+ """ Exception representing a 503 status code. """
+
+
+# more generic mappings from status_code to python exceptions
+HTTP_EXCEPTIONS = {
+ 400: BadRequestError,
+ 403: ForbiddenError,
+ 404: NotFoundError,
+ 405: MethodNotAllowedError,
+ 409: ConflictError,
+ 411: LengthRequiredError,
+ 416: RequestedRangeNotSatisfiableError,
+ 500: InternalServerErrorError,
+ 501: NotImplementedError,
+ 503: ServiceUnavailableError,
+}
diff --git a/nos/serializer.py b/nos/serializer.py
new file mode 100644
index 0000000..030ff40
--- /dev/null
+++ b/nos/serializer.py
@@ -0,0 +1,42 @@
+# -*- coding:utf8 -*-
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+import uuid
+from datetime import date, datetime
+from decimal import Decimal
+
+from .exceptions import SerializationError
+from .compat import string_types
+
+__all__ = ["JSONSerializer"]
+
+
+class JSONSerializer(object):
+ def default(self, data):
+ if isinstance(data, (date, datetime)):
+ return data.isoformat()
+ elif isinstance(data, Decimal):
+ return float(data)
+ elif isinstance(data, uuid.UUID):
+ return str(data)
+ raise TypeError("Unable to serialize %r (type: %s)" % (data, type(data)))
+
+ def dumps(self, data):
+ # don't serialize strings
+ if isinstance(data, string_types):
+ if isinstance(data, unicode):
+ return data.encode("utf-8")
+ else:
+ return data
+
+ # don't serialize file
+ if isinstance(data, file):
+ return data
+
+ try:
+ return json.dumps(data, default=self.default, ensure_ascii=False)
+ except (ValueError, TypeError) as e:
+ raise SerializationError(data, e)
diff --git a/nos/transport.py b/nos/transport.py
new file mode 100644
index 0000000..34da806
--- /dev/null
+++ b/nos/transport.py
@@ -0,0 +1,116 @@
+# -*- coding:utf8 -*-
+
+from .connection import Urllib3HttpConnection
+from .serializer import JSONSerializer
+from .exceptions import (NOSException, ServiceException, ConnectionError,
+ ConnectionTimeout, InvalidObjectName,
+ InvalidBucketName, FileOpenModeError,
+ BadRequestError)
+from .client.auth import RequestMetaData
+from .client.utils import MAX_OBJECT_SIZE
+
+__all__ = ["Transport"]
+
+
+class Transport(object):
+ """
+ Encapsulation of transport-related to logic. Handles instantiation of the
+ individual connections as well as creating a connection pool to hold them.
+
+ Main interface is the `perform_request` method.
+ """
+ def __init__(self, access_key_id=None, access_key_secret=None,
+ connection_class=Urllib3HttpConnection,
+ serializer=JSONSerializer(), end_point='nos-eastchina1.126.net',
+ max_retries=2, retry_on_status=(500, 501, 503, ),
+ retry_on_timeout=False, timeout=None, **kwargs):
+ self.access_key_id = access_key_id
+ self.access_key_secret = access_key_secret
+ self.max_retries = max_retries
+ self.retry_on_timeout = retry_on_timeout
+ self.retry_on_status = retry_on_status
+ self.timeout = timeout
+ self.end_point = end_point
+
+ # data serializer
+ self.serializer = serializer
+ # store all strategies...
+ self.connection = connection_class(**kwargs)
+
+ def perform_request(self, method, bucket=None, key=None, params={},
+ body=None, headers={}, timeout=None):
+ method = method.encode('utf-8') \
+ if isinstance(method, unicode) else method
+ bucket = bucket.encode('utf-8') \
+ if isinstance(bucket, unicode) else bucket
+ key = key.encode('utf-8') if isinstance(key, unicode) else key
+
+ if bucket is not None and bucket == '':
+ raise InvalidBucketName()
+
+ if key is not None and key == '':
+ raise InvalidObjectName()
+
+ if body is not None:
+ body = self.serializer.dumps(body)
+ length = 0
+ if isinstance(body, file):
+ if 'b' not in body.mode.lower():
+ raise FileOpenModeError()
+ offset = body.tell()
+ body.seek(0, 2)
+ end = body.tell()
+ body.seek(offset, 0)
+ length = end - offset
+ else:
+ length = len(body)
+
+ if length > MAX_OBJECT_SIZE:
+ raise BadRequestError(
+ 400,
+ 'Bad Request',
+ 'EntityTooLarge',
+ '',
+ 'Request Entity Too Large'
+ )
+
+ meta_data = RequestMetaData(
+ access_key_id=self.access_key_id,
+ access_key_secret=self.access_key_secret,
+ method=method,
+ bucket=bucket,
+ end_point=self.end_point,
+ key=key,
+ params=params,
+ body=body,
+ headers=headers
+ )
+ url = meta_data.get_url()
+ headers = meta_data.get_headers()
+
+ for attempt in xrange(self.max_retries + 1):
+ try:
+ status, headers, body = self.connection.perform_request(
+ method, url, body, headers,
+ timeout=(timeout or self.timeout)
+ )
+
+ except NOSException as e:
+ retry = False
+ if isinstance(e, ConnectionTimeout):
+ retry = self.retry_on_timeout
+ elif isinstance(e, ConnectionError):
+ retry = True
+ elif (isinstance(e, ServiceException) and
+ e.status_code in self.retry_on_status):
+ retry = True
+
+ if retry:
+ # raise exception on last retry
+ if attempt >= self.max_retries:
+ raise
+ else:
+ raise
+
+ else:
+ return status, headers, body
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..4a7f5f2
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+
+from os.path import join, dirname
+from setuptools import setup, find_packages
+import sys
+import re
+
+version = ""
+with open("nos/__init__.py", "r") as fd:
+ version = re.search(r"^__version__\s*=\s*[\"']([^\"']*)[\"']", fd.read(),
+ re.MULTILINE).group(1)
+
+long_description = ""
+with open(join(dirname(__file__), "README.rst")) as fd:
+ long_description = fd.read().strip()
+
+install_requires = [
+ "urllib3>=1.8, <2.0",
+]
+tests_require = [
+ "nose",
+ "coverage",
+ "mock",
+ "pyaml",
+ "nosexcover"
+]
+
+# use external unittest for 2.6
+if sys.version_info[:2] == (2, 6):
+ install_requires.append("unittest2")
+
+setup(
+ name="nos-python-sdk",
+ description="NetEase Object Storage SDK",
+ license="MIT License",
+ url="https://c.163.com/",
+ long_description=long_description,
+ version=version,
+ author="NOS Developer",
+ author_email="hzsunjianliang@corp.netease.com",
+ packages=find_packages(
+ where=".",
+ exclude=("test_nos*", )
+ ),
+ classifiers=[
+ "Development Status :: 5 - Production/Stable",
+ "License :: OSI Approved :: MIT License",
+ "Intended Audience :: Developers",
+ "Operating System :: OS Independent",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 2",
+ "Programming Language :: Python :: 2.6",
+ "Programming Language :: Python :: 2.7"
+ ],
+ install_requires=install_requires,
+
+ test_suite="test_nos.run_tests.run_all",
+ tests_require=tests_require,
+)
diff --git a/test_nos/__init__.py b/test_nos/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/test_nos/run_tests.py b/test_nos/run_tests.py
new file mode 100644
index 0000000..f986f0d
--- /dev/null
+++ b/test_nos/run_tests.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+# -*- coding:utf8 -*-
+
+from __future__ import print_function
+
+import sys
+from os.path import dirname, abspath
+
+import nose
+
+
+def run_all(argv=None):
+ sys.exitfunc = lambda: sys.stderr.write('Shutting down....\n')
+
+ # always insert coverage when running tests
+ if argv is None:
+ argv = [
+ 'nosetests', '--with-xunit',
+ '--with-xcoverage', '--cover-package=nos',
+ '--cover-erase', '--cover-branches',
+ '--logging-filter=nos', '--logging-level=DEBUG',
+ '--verbose'
+ ]
+
+ nose.run_exit(
+ argv=argv,
+ defaultTest=abspath(dirname(__file__))
+ )
+
+if __name__ == '__main__':
+ run_all(sys.argv)
diff --git a/test_nos/test_cases.py b/test_nos/test_cases.py
new file mode 100644
index 0000000..368059a
--- /dev/null
+++ b/test_nos/test_cases.py
@@ -0,0 +1,68 @@
+# -*- coding:utf8 -*-
+
+from mock import Mock
+from collections import defaultdict
+try:
+ # python 2.6
+ from unittest2 import TestCase, SkipTest
+except ImportError:
+ from unittest import TestCase, SkipTest
+
+from nos import Client
+
+
+class DummyTransport(object):
+ def __init__(self, responses=None, **kwargs):
+ self.responses = responses
+ self.call_count = 0
+ self.calls = defaultdict(list)
+
+ def perform_request(self, method, bucket='', key='', params={}, body=None,
+ headers={}, timeout=None):
+ resp = Mock()
+ resp.read = Mock(return_value='')
+ h = {
+ 'Last-Modified': 'Fri, 10 Feb 2012 21:34:55 GMT',
+ 'Content-Length':1,
+ 'Content-Range': '0-50'
+ }
+ resp = 200, h, resp
+ if self.responses:
+ resp = self.responses[self.call_count]
+ self.call_count += 1
+ self.calls[(method, bucket, key)].append((params, body, headers, timeout))
+ return resp
+
+
+class ClinetTestCase(TestCase):
+ def setUp(self):
+ super(ClinetTestCase, self).setUp()
+ self.client = Client(transport_class=DummyTransport)
+
+ def assert_call_count_equals(self, count):
+ self.assertEquals(count, self.client.transport.call_count)
+
+ def assert_url_called(self, method, bucket, key, count=1):
+ self.assertIn((method, bucket, key), self.client.transport.calls)
+ calls = self.client.transport.calls[(method, bucket, key)]
+ self.assertEquals(count, len(calls))
+ return calls
+
+
+class TestClinetTestCase(ClinetTestCase):
+ def test_our_transport_used(self):
+ self.assertIsInstance(self.client.transport, DummyTransport)
+
+ def test_start_with_0_call(self):
+ self.assert_call_count_equals(0)
+
+ def test_each_call_is_recorded(self):
+ self.client.transport.perform_request('GET')
+ self.client.transport.perform_request(
+ 'DELETE', 'test', 'object', params={},
+ body='body', headers={}, timeout=None
+ )
+ self.assert_call_count_equals(2)
+ self.assertEquals([({}, 'body', {}, None)], self.assert_url_called(
+ 'DELETE', 'test', 'object', 1
+ ))
diff --git a/test_nos/test_client/__init__.py b/test_nos/test_client/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/test_nos/test_client/test_auth.py b/test_nos/test_client/test_auth.py
new file mode 100644
index 0000000..43546ad
--- /dev/null
+++ b/test_nos/test_client/test_auth.py
@@ -0,0 +1,65 @@
+# -*- coding:utf8 -*-
+
+from mock import Mock
+from nos.client.auth import RequestMetaData
+
+from ..test_cases import TestCase
+
+
+class TestRequestMetaData(TestCase):
+ def test_get_url(self):
+ meta_data = RequestMetaData('', '', 'GET')
+ meta_data.url = 'http://www.1.com'
+ self.assertEquals('http://www.1.com', meta_data.get_url())
+
+ def test_get_headers(self):
+ meta_data = RequestMetaData('', '', 'GET')
+ meta_data.headers = {'a': 'b'}
+ self.assertEquals({'a': 'b'}, meta_data.get_headers())
+
+ def test_complete_headers(self):
+ meta_data = RequestMetaData('', '', 'GET', body='1234567')
+ self.assertEquals('fcea920f7412b5da7be0cf42b8c93759',
+ meta_data.headers['Content-MD5'])
+
+ meta_data = RequestMetaData('', '', 'GET', body='1234567')
+ meta_data._get_string_to_sign = Mock(return_value='12345')
+ meta_data._complete_headers()
+ self.assertEquals('NOS :riPI9XPTbHodbLyLC+vlLgZm3PFPoEQHMo+5RLj3qC0=',
+ meta_data.headers['Authorization'])
+
+ meta_data = RequestMetaData('test', 'object', 'GET', 'aaa', 'bbb',
+ params={'upload': None, 'a': 12345})
+ self.assertEquals('http://aaa.nos.netease.com/bbb?a=12345&upload',
+ meta_data.url)
+
+ def test_get_string_to_sign(self):
+ meta_data = RequestMetaData('', '', 'GET')
+ meta_data.headers = {
+ 'Date': 'Fri, 10 Feb 2012 21:34:55 GMT',
+ 'Expires': 'Fri, 10 Feb 2016 21:34:55 GMT',
+ 'x-nos-x': 'hello'
+ }
+ meta_data._get_canonicalized_resource = Mock(return_value='12345')
+ self.assertEquals(
+ 'GET\n\n\nFri, 10 Feb 2016 21:34:55 GMT\nx-nos-x:hello\n12345',
+ meta_data._get_string_to_sign()
+ )
+
+ def test_get_canonicalized_resource(self):
+ meta_data = RequestMetaData('', '', 'GET')
+ meta_data.bucket = 'test'
+ meta_data.key = 'ob&2!'
+ meta_data.params = {
+ 'test': '1',
+ 'a': 2,
+ 'upload': None,
+ 'logging': 3,
+ 'lifecycle': None,
+ 'delete': None,
+ 'uploadId': '1221334'
+ }
+ self.assertEquals(
+ '/test/ob%262%21?uploadId=1221334&delete',
+ meta_data._get_canonicalized_resource()
+ )
diff --git a/test_nos/test_client/test_nos_client.py b/test_nos/test_client/test_nos_client.py
new file mode 100644
index 0000000..7adc604
--- /dev/null
+++ b/test_nos/test_client/test_nos_client.py
@@ -0,0 +1,91 @@
+# -*- coding:utf8 -*-
+
+from datetime import datetime
+from StringIO import StringIO
+from ..test_cases import ClinetTestCase
+from nos.client.nos_client import parse_xml
+from nos.exceptions import XmlParseError, InvalidBucketName, InvalidObjectName
+
+
+class TestClient(ClinetTestCase):
+ def test_parse_xml(self):
+ status, headers, body = 200, {}, StringIO('')
+ self.assertRaises(XmlParseError, parse_xml, status, headers, body)
+
+ def test_delete_object(self):
+ self.client.delete_object('bucket', 'key')
+ self.assert_url_called('DELETE', 'bucket', 'key')
+
+ def test_delete_objects(self):
+ self.client.delete_objects('bucket', ['key1', 'key2'])
+ self.assert_url_called('POST', 'bucket', '')
+
+ def test_get_object(self):
+ self.client.get_object(
+ 'bucket', 'key', range='0-100',
+ if_modified_since=datetime(2015, 1, 1)
+ )
+ self.assert_url_called('GET', 'bucket', 'key')
+
+ def test_head_object(self):
+ self.client.head_object('bucket', 'key')
+ self.assert_url_called('HEAD', 'bucket', 'key')
+
+ def test_list_objects(self):
+ self.client.list_objects('bucket', delimiter='', marker='',
+ limit=10, prefix='')
+ self.assert_url_called('GET', 'bucket', '')
+
+ def test_put_object(self):
+ self.client.put_object('bucket', 'key', 'hello', storage_class='cheap')
+ self.assert_url_called('PUT', 'bucket', 'key')
+
+ def test_copy_object(self):
+ self.client.copy_object('bucket', 'key2', 'bucket', 'key')
+ self.assert_url_called('PUT', 'bucket', 'key', 1)
+ self.client.copy_object(u'bucket', u'key2', u'bucket', u'key')
+ self.assert_url_called('PUT', 'bucket', 'key', 2)
+ self.assertRaises(InvalidBucketName, self.client.copy_object,
+ '', 'key2', 'bucket', 'key')
+ self.assertRaises(InvalidObjectName, self.client.copy_object,
+ 'bucket', '', 'bucket', 'key')
+
+ def test_move_object(self):
+ self.client.move_object('bucket', 'key2', 'bucket', 'key')
+ self.assert_url_called('PUT', 'bucket', 'key', 1)
+ self.client.move_object(u'bucket', u'key2', u'bucket', u'key')
+ self.assert_url_called('PUT', 'bucket', 'key', 2)
+ self.assertRaises(InvalidBucketName, self.client.move_object,
+ '', 'key2', 'bucket', 'key')
+ self.assertRaises(InvalidObjectName, self.client.move_object,
+ 'bucket', '', 'bucket', 'key')
+
+ def test_create_multipart_upload(self):
+ self.client.create_multipart_upload('bucket', 'key',
+ storage_class='cheap')
+ self.assert_url_called('POST', 'bucket', 'key')
+
+ def test_upload_part(self):
+ self.client.upload_part('bucket', 'key', 1, '21', 'hello')
+ self.assert_url_called('PUT', 'bucket', 'key')
+
+ def test_complete_multipart_upload(self):
+ self.client.complete_multipart_upload(
+ 'bucket', 'key', '11212sd',
+ [{'part_num': 1, 'etag': 'ab1234'},
+ {'part_num': 2, 'etag': 'da4513'}],
+ object_md5='3425b2123')
+ self.assert_url_called('POST', 'bucket', 'key')
+
+ def test_abort_multipart_upload(self):
+ self.client.abort_multipart_upload('bucket', 'key', 'asdsa')
+ self.assert_url_called('DELETE', 'bucket', 'key')
+
+ def test_list_parts(self):
+ self.client.list_parts('bucket', 'key', 'asdsa',
+ limit=10, part_number_marker='')
+ self.assert_url_called('GET', 'bucket', 'key')
+
+ def test_list_multipart_uploads(self):
+ self.client.list_multipart_uploads('bucket', limit=10, key_marker='')
+ self.assert_url_called('GET', 'bucket', '')
diff --git a/test_nos/test_connection.py b/test_nos/test_connection.py
new file mode 100644
index 0000000..7a1fcf7
--- /dev/null
+++ b/test_nos/test_connection.py
@@ -0,0 +1,93 @@
+# -*- coding:utf8 -*-
+
+from mock import Mock, patch
+import urllib3
+from urllib3.exceptions import ReadTimeoutError
+from nos.exceptions import ConnectionTimeout, ConnectionError, BadRequestError
+from nos.connection import Urllib3HttpConnection
+
+from .test_cases import TestCase
+
+
+class TestUrllib3Connection(TestCase):
+ def _get_mock_connection(self, connection_params={},
+ status_code=200, response_body='{}'):
+ con = Urllib3HttpConnection(**connection_params)
+
+ def _dummy_send(*args, **kwargs):
+ dummy_response = Mock()
+ dummy_response.headers = {}
+ dummy_response.getheaders = Mock(return_value={})
+ dummy_response.status = status_code
+ dummy_response.read = Mock(return_value=response_body)
+ dummy_response.request = args[0]
+ dummy_response.cookies = {}
+ _dummy_send.call_args = (args, kwargs)
+ return dummy_response
+ con.pool.urlopen = _dummy_send
+ return con
+
+ @patch('urllib3.PoolManager')
+ def test_default_num_pools(self, mock_pool_manager):
+ Urllib3HttpConnection()
+ mock_pool_manager.assert_called_once_with(num_pools=16)
+
+ @patch('urllib3.PoolManager')
+ def test_num_pools(self, mock_pool_manager):
+ Urllib3HttpConnection(num_pools=1)
+ mock_pool_manager.assert_called_once_with(num_pools=1)
+
+ def test_pool(self):
+ con = Urllib3HttpConnection()
+ self.assertIsInstance(con.pool, urllib3.poolmanager.PoolManager)
+
+ def test_read_timeout_error(self):
+ con = Urllib3HttpConnection()
+ con.pool.urlopen = Mock(side_effect=ReadTimeoutError('', '', ''))
+ self.assertRaises(ConnectionTimeout, con.perform_request,
+ u'GET', u'/', timeout=10)
+
+ def test_other_error(self):
+ con = Urllib3HttpConnection()
+ con.pool.urlopen = Mock(side_effect=KeyError())
+ self.assertRaises(ConnectionError, con.perform_request,
+ u'GET', u'/', timeout=10)
+
+ def test_perform_request_default(self):
+ con = self._get_mock_connection(status_code=200, response_body='ok!')
+ status, headers, response = con.perform_request('GET', '/')
+ self.assertEquals(200, status)
+ self.assertEquals({}, headers)
+ self.assertEquals('ok!', response.read())
+
+ @patch('nos.connection.Urllib3HttpConnection._raise_error')
+ def test_request_error(self, mock_raise_error):
+ con = self._get_mock_connection(status_code=400)
+ con.perform_request('GET', '/')
+ mock_raise_error.assert_called_once()
+
+ def test_raise_error_with_no_data(self):
+ response = Mock()
+ response.status = 400
+ response.reason = 'Bad Request'
+ response.getheader = Mock(return_value='')
+ response.read = Mock(return_value='')
+
+ con = Urllib3HttpConnection()
+ self.assertRaises(BadRequestError, con._raise_error, response)
+
+ def test_raise_error(self):
+ response = Mock()
+ response.status = 400
+ response.reason = 'Bad Request'
+ response.getheader = Mock(return_value='')
+ response.read = Mock(return_value='''
+
+ InvalidArgument
+ 值“uploadId=23r54i252358235-3253222”非法,必须为整数
+
+ c081a3ec0aa000000154d125d733840f
+
+''')
+ con = Urllib3HttpConnection()
+ self.assertRaises(BadRequestError, con._raise_error, response)
diff --git a/test_nos/test_exceptions.py b/test_nos/test_exceptions.py
new file mode 100644
index 0000000..0c723cb
--- /dev/null
+++ b/test_nos/test_exceptions.py
@@ -0,0 +1,92 @@
+# -*- coding:utf8 -*-
+
+from nos.exceptions import (ClientException, ServiceException,
+ InvalidObjectName, InvalidBucketName,
+ FileOpenModeError, MultiObjectDeleteException)
+
+from .test_cases import TestCase
+
+
+class TestClientException(TestCase):
+ def test_default(self):
+ message = "ClientException(hello) caused by: KeyError('key error')"
+ err = KeyError("key error")
+ exception = ClientException('hello', err)
+ self.assertEquals('hello', exception.error)
+ self.assertEquals(err, exception.info)
+ self.assertEquals(message, exception.message)
+ self.assertEquals(message, str(exception))
+
+
+class TestServiceException(TestCase):
+ def test_default(self):
+ status_code = 400
+ error_type = 'Bad Request'
+ error_code = 'InvalidArgument'
+ request_id = '9b8932d70aa000000154d729c6b0840e'
+ message = 'uploadId=23r54i252358235-3253222'
+ exception = ServiceException(status_code, error_type, error_code,
+ request_id, message)
+ self.assertEquals(status_code, exception.status_code)
+ self.assertEquals(error_type, exception.error_type)
+ self.assertEquals(error_code, exception.error_code)
+ self.assertEquals(request_id, exception.request_id)
+ self.assertEquals(message, exception.message)
+ self.assertEquals(
+ 'ServiceException(400, Bad Request, InvalidArgument,'
+ ' 9b8932d70aa000000154d729c6b0840e, '
+ 'uploadId=23r54i252358235-3253222)',
+ str(exception))
+
+
+class TestInvalidObjectName(TestCase):
+ def test_default(self):
+ message = 'InvalidObjectName caused by: object name is empty.'
+ exception = InvalidObjectName()
+ self.assertEquals(None, exception.error)
+ self.assertEquals(None, exception.info)
+ self.assertEquals(message, exception.message)
+ self.assertEquals(message, str(exception))
+
+
+class TestInvalidBucketName(TestCase):
+ def test_default(self):
+ message = 'InvalidBucketName caused by: bucket name is empty.'
+ exception = InvalidBucketName()
+ self.assertEquals(None, exception.error)
+ self.assertEquals(None, exception.info)
+ self.assertEquals(message, exception.message)
+ self.assertEquals(message, str(exception))
+
+
+class TestFileOpenModeError(TestCase):
+ def test_default(self):
+ message = ('FileOpenModeError caused by: object is a file that opened '
+ 'without the mode for binary files.')
+ exception = FileOpenModeError()
+ self.assertEquals(None, exception.error)
+ self.assertEquals(None, exception.info)
+ self.assertEquals(message, exception.message)
+ self.assertEquals(message, str(exception))
+
+
+class TestMultiObjectDeleteException(TestCase):
+ def test_default(self):
+ info = [{
+ 'key': '2.jpg',
+ 'code': 'NoSuchKey',
+ 'message': 'No Such Key'
+ }]
+ message = ('MultiObjectDeleteException caused by: some objects delete '
+ 'unsuccessfully.')
+ exception = MultiObjectDeleteException(info)
+ self.assertEquals(None, exception.status_code)
+ self.assertEquals(None, exception.error_type)
+ self.assertEquals(None, exception.error_code)
+ self.assertEquals(None, exception.request_id)
+ self.assertEquals(message, exception.message)
+ self.assertEquals(info, exception.errors)
+ self.assertEquals(
+ "%s %s" % (message, info),
+ str(exception)
+ )
diff --git a/test_nos/test_serializer.py b/test_nos/test_serializer.py
new file mode 100644
index 0000000..99a129a
--- /dev/null
+++ b/test_nos/test_serializer.py
@@ -0,0 +1,33 @@
+# -*- coding:utf8 -*-
+
+import uuid
+from nos.serializer import JSONSerializer
+from nos.exceptions import SerializationError
+from datetime import date, datetime
+from decimal import Decimal
+from mock import Mock
+
+from .test_cases import TestCase
+
+
+class TestJSONSerializer(TestCase):
+ def test_default(self):
+ serializer = JSONSerializer()
+ self.assertEquals('2016-05-01', serializer.default(date(2016, 5, 1)))
+ self.assertEquals('2016-05-01T00:00:00',
+ serializer.default(datetime(2016, 5, 1)))
+ self.assertEquals(12345.0, serializer.default(Decimal(12345)))
+ self.assertEquals(
+ '12345678-1234-5678-1234-567812345678',
+ serializer.default(uuid.UUID('12345678123456781234567812345678'))
+ )
+ self.assertRaises(TypeError, serializer.default, 'sads')
+
+ def test_dumps(self):
+ serializer = JSONSerializer()
+ self.assertEquals('12345', serializer.dumps('12345'))
+ self.assertEquals('54321', serializer.dumps(u'54321'))
+ s = Mock(spec=file)
+ self.assertEquals(s, serializer.dumps(s))
+ self.assertEquals('{"a": "b"}', serializer.dumps({'a': 'b'}))
+ self.assertRaises(SerializationError, serializer.dumps, set(['sadsa']))
diff --git a/test_nos/test_smoke_test.py b/test_nos/test_smoke_test.py
new file mode 100644
index 0000000..a72d4e5
--- /dev/null
+++ b/test_nos/test_smoke_test.py
@@ -0,0 +1,422 @@
+# -*- coding:utf8 -*-
+
+from .test_cases import TestCase
+import hashlib
+import json
+import nos
+
+
+class TestSmokeTest(TestCase):
+ ACCESS_KEY_ID = 'xxxxxxxxxx'
+ ACCESS_KEY_SECRET = 'xxxxxxxxxx'
+ BUCKET = 'xxxx'
+ KEYS = [
+ u'1特殊 字符`-=[]\\;\',./ ~!@#$%^&*()_+{}|:<>?"',
+ u'2特殊 字符`-=[]\\;\',./ ~!@#$%^&*()_+{}|:<>?"',
+ u'3特殊 字符`-=[]\\;\',./ ~!@#$%^&*()_+{}|:<>?"',
+ u'4特殊 字符`-=[]\\;\',./ ~!@#$%^&*()_+{}|:<>?"',
+ ]
+ BODY_STR = 'This is a test string!\r\n\r\n'
+ BODY_DICT = {"a": 1, "b": 2, "c": 3}
+ BODY_LIST = ["a", "b", "c"]
+ BODY_FILE = open('/etc/passwd', 'rb')
+
+ def clean_objects(self):
+ client = nos.Client(
+ access_key_id=self.ACCESS_KEY_ID,
+ access_key_secret=self.ACCESS_KEY_SECRET
+ )
+
+ # delete objects
+ r = client.list_objects(
+ bucket=self.BUCKET
+ )
+ resp = r['response']
+ keys = [i.findtext('Key') for i in resp.findall('Contents')]
+ if keys:
+ client.delete_objects(self.BUCKET, keys)
+
+ # head object to check
+ for i in keys:
+ self.assertRaises(nos.exceptions.NotFoundError,
+ client.head_object, self.BUCKET, i)
+
+ # list objects to check
+ r = client.list_objects(
+ bucket=self.BUCKET
+ )
+ resp = r['response']
+ keys = [i.findtext('Key') for i in resp.findall('Contents')]
+ self.assertEquals([], keys)
+
+ # abort multipart upload
+ r = client.list_multipart_uploads(
+ bucket=self.BUCKET
+ )
+ resp = r['response']
+ for i in resp.findall('Upload'):
+ client.abort_multipart_upload(
+ bucket=self.BUCKET,
+ key=i.findtext('Key'),
+ upload_id=i.findtext('UploadId')
+ )
+
+ # list multipart uploads to check
+ r = client.list_multipart_uploads(
+ bucket=self.BUCKET
+ )
+ resp = r['response']
+ self.assertEquals([], [i for i in resp.findall('Upload')])
+
+ def test_client(self):
+ self.assertEquals(self.ACCESS_KEY_ID.startswith('xxxx'), False)
+ self.assertEquals(self.ACCESS_KEY_SECRET.startswith('xxxx'), False)
+ self.assertEquals(self.BUCKET.startswith('xxxx'), False)
+
+ self.clean_objects()
+
+ # create client
+ client = nos.Client(
+ access_key_id=self.ACCESS_KEY_ID,
+ access_key_secret=self.ACCESS_KEY_SECRET
+ )
+
+ # put object
+ r = client.put_object(self.BUCKET, self.KEYS[0], self.BODY_STR)
+ p_md5 = r['etag']
+
+ # get object
+ r = client.get_object(self.BUCKET, self.KEYS[0])
+ g_md5 = r['etag']
+
+ body = r['body'].read()
+ self.assertEquals(p_md5, g_md5)
+ self.assertEquals(self.BODY_STR, body)
+
+ md5sum = hashlib.md5(self.BODY_STR).hexdigest()
+ self.assertEquals(md5sum, p_md5)
+
+ def test_multipart_upload(self):
+ self.clean_objects()
+ info = {}
+ body_part1 = self.BODY_STR * 1024 * 1024 * 2
+ body_part2 = self.BODY_STR * 1024 * 1024 * 1
+ body_all = body_part1 + body_part2
+ md5sum_part1 = hashlib.md5(body_part1).hexdigest()
+ md5sum_part2 = hashlib.md5(body_part2).hexdigest()
+ md5sum = hashlib.md5("%s-%s-" % (md5sum_part1, md5sum_part2)).hexdigest()
+
+ # create client
+ client = nos.Client(
+ access_key_id=self.ACCESS_KEY_ID,
+ access_key_secret=self.ACCESS_KEY_SECRET
+ )
+
+ # create multipart upload
+ r = client.create_multipart_upload(
+ self.BUCKET, self.KEYS[0],
+ meta_data={'x-nos-meta-hello': 'world'}
+ )
+ resp = r['response']
+ self.assertEquals(self.BUCKET, resp.findtext('Bucket'))
+ self.assertEquals(self.KEYS[0], resp.findtext('Key'))
+ upload_id = resp.findtext('UploadId')
+
+ # upload part
+ r = client.upload_part(
+ bucket=self.BUCKET,
+ key=self.KEYS[0],
+ part_num=1,
+ upload_id=upload_id,
+ body=body_part1
+ )
+ info['1'] = r['etag']
+ self.assertEquals(md5sum_part1, r['etag'])
+
+ # upload part
+ r = client.upload_part(
+ bucket=self.BUCKET,
+ key=self.KEYS[0],
+ part_num=2,
+ upload_id=upload_id,
+ body=body_part2
+ )
+ info['2'] = r['etag']
+ self.assertEquals(md5sum_part2, r['etag'])
+
+ # list parts
+ r = client.list_parts(
+ bucket=self.BUCKET,
+ key=self.KEYS[0],
+ upload_id=upload_id
+ )
+ resp = r['response']
+ for i in resp.findall('Part'):
+ self.assertEquals(
+ i.findtext('ETag').strip('a'),
+ info[i.findtext('PartNumber')]
+ )
+
+ # list multipart uploads
+ r = client.list_multipart_uploads(
+ bucket=self.BUCKET
+ )
+ resp = r['response']
+ for i in resp.findall('Upload'):
+ if upload_id == i.findtext('UploadId'):
+ self.assertEquals(self.KEYS[0], i.findtext('Key'))
+
+ # complete multipart upload
+ r = client.complete_multipart_upload(
+ bucket=self.BUCKET,
+ key=self.KEYS[0],
+ upload_id=upload_id,
+ info=[{'part_num': x, 'etag': y} for x, y in info.iteritems()]
+ )
+ resp = r['response']
+ self.assertEquals(self.BUCKET, resp.findtext('Bucket'))
+ self.assertEquals(self.KEYS[0], resp.findtext('Key'))
+ self.assertEquals(md5sum, resp.findtext('ETag').strip())
+
+ # get object
+ r = client.get_object(self.BUCKET, self.KEYS[0])
+ g_md5 = r['etag'].split('-')[0]
+
+ body = r['body'].read()
+ self.assertEquals(body_all, body)
+ self.assertEquals(md5sum, g_md5)
+
+ # create multipart upload
+ r = client.create_multipart_upload(self.BUCKET, self.KEYS[1])
+ resp = r['response']
+ self.assertEquals(self.BUCKET, resp.findtext('Bucket'))
+ self.assertEquals(self.KEYS[1], resp.findtext('Key'))
+ upload_id = resp.findtext('UploadId')
+
+ # abort multipart upload
+ r = client.abort_multipart_upload(
+ bucket=self.BUCKET,
+ key=self.KEYS[1],
+ upload_id=upload_id
+ )
+
+ # list multipart uploads
+ r = client.list_multipart_uploads(
+ bucket=self.BUCKET
+ )
+ resp = r['response']
+ for i in resp.findall('Upload'):
+ if upload_id == i.findtext('UploadId'):
+ raise
+
+ def test_nos_client(self):
+ self.clean_objects()
+ info = {}
+
+ # create client
+ client = nos.Client(
+ access_key_id=self.ACCESS_KEY_ID,
+ access_key_secret=self.ACCESS_KEY_SECRET
+ )
+
+ # put invalid object name
+ self.assertRaises(nos.exceptions.InvalidObjectName,
+ client.put_object, 'aa', '', '')
+
+ # put invalid bucket name
+ self.assertRaises(nos.exceptions.InvalidBucketName,
+ client.put_object, '', 'bb', '')
+
+ # put object
+ r = client.put_object(self.BUCKET, self.KEYS[0], self.BODY_STR,
+ meta_data={'x-nos-meta-hello': 'world'})
+ p_md5 = r['etag']
+
+ # get object
+ r = client.get_object(self.BUCKET, self.KEYS[0])
+ g_md5 = r['etag']
+ body = r['body'].read()
+
+ # head object
+ r = client.head_object(self.BUCKET, self.KEYS[0])
+ h_md5 = r['etag']
+
+ b_str = self.BODY_STR
+ md5_str = hashlib.md5(b_str).hexdigest()
+ info[self.KEYS[0]] = md5_str
+ self.assertEquals(h_md5, md5_str)
+ self.assertEquals(g_md5, md5_str)
+ self.assertEquals(p_md5, md5_str)
+ self.assertEquals(b_str, body)
+
+ # put object
+ r = client.put_object(self.BUCKET, self.KEYS[1], self.BODY_DICT)
+ p_md5 = r['etag']
+
+ # get object
+ r = client.get_object(self.BUCKET, self.KEYS[1])
+ g_md5 = r['etag']
+ body = r['body'].read()
+
+ # head object
+ r = client.head_object(self.BUCKET, self.KEYS[1])
+ h_md5 = r['etag']
+
+ b_dict = json.dumps(self.BODY_DICT)
+ md5_dict = hashlib.md5(b_dict).hexdigest()
+ info[self.KEYS[1]] = md5_dict
+ self.assertEquals(h_md5, md5_dict)
+ self.assertEquals(g_md5, md5_dict)
+ self.assertEquals(p_md5, md5_dict)
+ self.assertEquals(b_dict, body)
+
+ # put object
+ r = client.put_object(self.BUCKET, self.KEYS[2], self.BODY_LIST)
+ p_md5 = r['etag']
+
+ # get object
+ r = client.get_object(self.BUCKET, self.KEYS[2])
+ g_md5 = r['etag']
+ body = r['body'].read()
+
+ # head object
+ r = client.head_object(self.BUCKET, self.KEYS[2])
+ h_md5 = r['etag']
+
+ b_list = json.dumps(self.BODY_LIST)
+ md5_list = hashlib.md5(b_list).hexdigest()
+ info[self.KEYS[2]] = md5_list
+ self.assertEquals(h_md5, md5_list)
+ self.assertEquals(g_md5, md5_list)
+ self.assertEquals(p_md5, md5_list)
+ self.assertEquals(b_list, body)
+
+ # put object
+ self.BODY_FILE.seek(0)
+ r = client.put_object(self.BUCKET, self.KEYS[3], self.BODY_FILE)
+ p_md5 = r['etag']
+
+ # get object
+ r = client.get_object(self.BUCKET, self.KEYS[3])
+ g_md5 = r['etag']
+ body = r['body'].read()
+
+ # head object
+ r = client.head_object(self.BUCKET, self.KEYS[3])
+ h_md5 = r['etag']
+
+ self.BODY_FILE.seek(0)
+ b_file = self.BODY_FILE.read()
+ md5_file = hashlib.md5(b_file).hexdigest()
+ info[self.KEYS[3]] = md5_file
+ self.assertEquals(h_md5, md5_file)
+ self.assertEquals(g_md5, md5_file)
+ self.assertEquals(p_md5, md5_file)
+ self.assertEquals(b_file, body)
+
+ # list objects
+ r = client.list_objects(
+ bucket=self.BUCKET
+ )
+ resp = r['response']
+ for i in resp.findall('Contents'):
+ self.assertEquals(
+ info[i.findtext('Key')],
+ i.findtext('ETag').strip('"')
+ )
+
+ # move object
+ r = client.move_object(
+ self.BUCKET,
+ self.KEYS[0],
+ self.BUCKET,
+ self.KEYS[3]
+ )
+ info[self.KEYS[3]] = info.pop(self.KEYS[0])
+
+ # list objects
+ r = client.list_objects(
+ bucket=self.BUCKET
+ )
+ resp = r['response']
+ for i in resp.findall('Contents'):
+ self.assertEquals(
+ info[i.findtext('Key')],
+ i.findtext('ETag').strip('"')
+ )
+
+ # copy object
+ r = client.copy_object(
+ self.BUCKET,
+ self.KEYS[1],
+ self.BUCKET,
+ self.KEYS[0]
+ )
+ info[self.KEYS[0]] = info[self.KEYS[1]]
+
+ # list objects
+ r = client.list_objects(
+ bucket=self.BUCKET
+ )
+ resp = r['response']
+ for i in resp.findall('Contents'):
+ self.assertEquals(
+ info[i.findtext('Key')],
+ i.findtext('ETag').strip('"')
+ )
+
+ # delete object
+ r = client.delete_object(
+ self.BUCKET,
+ self.KEYS[3]
+ )
+ info.pop(self.KEYS[3], '')
+ self.assertRaises(nos.exceptions.NotFoundError, client.head_object,
+ self.BUCKET, self.KEYS[3])
+
+ # list objects
+ r = client.list_objects(
+ bucket=self.BUCKET
+ )
+ resp = r['response']
+ for i in resp.findall('Contents'):
+ self.assertEquals(
+ info[i.findtext('Key')],
+ i.findtext('ETag').strip('"')
+ )
+
+ # delete objects
+ r = client.delete_objects(
+ self.BUCKET,
+ [self.KEYS[1], self.KEYS[2]]
+ )
+ self.assertRaises(nos.exceptions.BadRequestError,
+ client.delete_objects, self.BUCKET, [])
+ self.assertRaises(nos.exceptions.MultiObjectDeleteException,
+ client.delete_objects, self.BUCKET, [self.KEYS[3]])
+ info.pop(self.KEYS[1], '')
+ info.pop(self.KEYS[2], '')
+ info.pop(self.KEYS[3], '')
+ self.assertRaises(nos.exceptions.NotFoundError, client.head_object,
+ self.BUCKET, self.KEYS[1])
+ self.assertRaises(nos.exceptions.NotFoundError, client.head_object,
+ self.BUCKET, self.KEYS[2])
+ self.assertRaises(nos.exceptions.NotFoundError, client.head_object,
+ self.BUCKET, self.KEYS[3])
+
+ # list objects
+ r = client.list_objects(
+ bucket=self.BUCKET
+ )
+ resp = r['response']
+ for i in resp.findall('Contents'):
+ self.assertEquals(
+ info[i.findtext('Key')],
+ i.findtext('ETag').strip('"')
+ )
+
+ # head object
+ client.head_object(
+ bucket=self.BUCKET,
+ key=self.KEYS[0]
+ )
diff --git a/test_nos/test_transport.py b/test_nos/test_transport.py
new file mode 100644
index 0000000..abc04b3
--- /dev/null
+++ b/test_nos/test_transport.py
@@ -0,0 +1,161 @@
+# -*- coding:utf8 -*-
+
+from mock import Mock, patch
+from nos.exceptions import (ConnectionTimeout, ConnectionError,
+ ServiceException, FileOpenModeError,
+ BadRequestError)
+from nos.transport import Transport
+from nos.serializer import JSONSerializer
+from nos.connection import Urllib3HttpConnection
+from nos.client.utils import MAX_OBJECT_SIZE
+
+from .test_cases import TestCase
+
+
+class TestTransport(TestCase):
+ def test_init_default_args(self):
+ transport = Transport()
+ self.assertEquals(None, transport.access_key_id)
+ self.assertEquals(None, transport.access_key_secret)
+ self.assertEquals(2, transport.max_retries)
+ self.assertEquals(False, transport.retry_on_timeout)
+ self.assertEquals('nos.netease.com', transport.end_point)
+ self.assertEquals((500, 501, 503, ), transport.retry_on_status)
+ self.assertEquals(None, transport.timeout)
+ self.assertIsInstance(transport.serializer, JSONSerializer)
+ self.assertIsInstance(transport.connection, Urllib3HttpConnection)
+
+ def test_init_args(self):
+ access_key_id = '12345'
+ access_key_secret = '54321'
+ retry_on_timeout = True
+ retry_on_status = (500, 502, )
+ serializer = JSONSerializer()
+ kwargs = {
+ 'max_retries': 10,
+ 'end_point': 'nos110.netease.com',
+ 'timeout': 1
+ }
+ transport = Transport(
+ access_key_id=access_key_id,
+ access_key_secret=access_key_secret,
+ retry_on_timeout=retry_on_timeout,
+ retry_on_status=retry_on_status,
+ serializer=serializer,
+ **kwargs
+ )
+ self.assertEquals(access_key_id, transport.access_key_id)
+ self.assertEquals(access_key_secret, transport.access_key_secret)
+ self.assertEquals(retry_on_timeout, transport.retry_on_timeout)
+ self.assertEquals(retry_on_status, transport.retry_on_status)
+ self.assertEquals(kwargs['max_retries'], transport.max_retries)
+ self.assertEquals(kwargs['end_point'], transport.end_point)
+ self.assertEquals(kwargs['timeout'], transport.timeout)
+ self.assertEquals(serializer, transport.serializer)
+ self.assertIsInstance(transport.connection, Urllib3HttpConnection)
+
+ @patch('nos.transport.RequestMetaData')
+ def test_perform_request_default_args(self, mock_meta_data):
+ url = 'http://nos.neteast.com'
+ headers = {'a': 'b'}
+ d = Mock()
+ d.get_url.return_value = url
+ d.get_headers.return_value = headers
+ mock_meta_data.return_value = d
+
+ transport = Transport()
+ transport.connection.perform_request = Mock(return_value=(200, {}, ''))
+ self.assertEquals((200, {}, ''), transport.perform_request('GET'))
+ mock_meta_data.assert_called_once_with(
+ access_key_id=transport.access_key_id,
+ access_key_secret=transport.access_key_secret,
+ method='GET',
+ bucket=None,
+ key=None,
+ end_point='nos.netease.com',
+ params={},
+ body=None,
+ headers={}
+ )
+ transport.connection.perform_request.assert_called_once_with(
+ 'GET', url, None, headers, timeout=None
+ )
+
+ @patch('nos.transport.RequestMetaData')
+ def test_perform_request_with_timeout(self, mock_meta_data):
+ url = 'http://nos.neteast.com'
+ headers = {'a': 'b'}
+ d = Mock()
+ d.get_url.return_value = url
+ d.get_headers.return_value = headers
+ mock_meta_data.return_value = d
+
+ transport = Transport()
+ transport.serializer.dumps = Mock(return_value='54321')
+ transport.connection.perform_request = Mock(
+ side_effect=ConnectionTimeout
+ )
+ self.assertRaises(ConnectionTimeout, transport.perform_request,
+ 'GET', body='12345')
+ transport.serializer.dumps.assert_called_once_with('12345')
+ transport.connection.perform_request.assert_called_once_with(
+ 'GET', url, '54321', headers, timeout=None
+ )
+
+ @patch('nos.transport.RequestMetaData')
+ def test_perform_request_with_connectionerror(self, mock_meta_data):
+ url = 'http://nos.neteast.com'
+ headers = {'a': 'b'}
+ d = Mock()
+ d.get_url.return_value = url
+ d.get_headers.return_value = headers
+ mock_meta_data.return_value = d
+
+ transport = Transport()
+ transport.serializer.dumps = Mock(return_value='54321')
+ transport.connection.perform_request = Mock(
+ side_effect=ConnectionError('', '')
+ )
+ self.assertRaises(ConnectionError, transport.perform_request,
+ 'GET', body='12345')
+ transport.serializer.dumps.assert_called_once_with('12345')
+ transport.connection.perform_request.assert_called_with(
+ 'GET', url, '54321', headers, timeout=None
+ )
+
+ @patch('nos.transport.RequestMetaData')
+ def test_perform_request_with_serviceexception(self, mock_meta_data):
+ url = 'http://nos.neteast.com'
+ headers = {'a': 'b'}
+ d = Mock()
+ d.get_url.return_value = url
+ d.get_headers.return_value = headers
+ mock_meta_data.return_value = d
+
+ transport = Transport()
+ transport.serializer.dumps = Mock(return_value='54321')
+ transport.connection.perform_request = Mock(
+ side_effect=ServiceException(503)
+ )
+ self.assertRaises(ServiceException, transport.perform_request,
+ 'GET', body='12345')
+ transport.serializer.dumps.assert_called_once_with('12345')
+ transport.connection.perform_request.assert_called_with(
+ 'GET', url, '54321', headers, timeout=None
+ )
+
+ def test_perform_request_with_fileopenmodeerror(self):
+ transport = Transport()
+ f = open('/etc/passwd', 'r')
+ transport.serializer.dumps = Mock(return_value=f)
+ self.assertRaises(
+ FileOpenModeError, transport.perform_request, 'GET', body=f
+ )
+
+ def test_perform_request_with_badrequesterror(self):
+ d = '12' * MAX_OBJECT_SIZE
+ transport = Transport()
+ transport.serializer.dumps = Mock(return_value=d)
+ self.assertRaises(
+ BadRequestError, transport.perform_request, 'GET', body=d
+ )