From 548c0ad8dba42149a5cf7551220a48c8a06013d3 Mon Sep 17 00:00:00 2001 From: Danny Hermes Date: Mon, 25 Jan 2016 17:30:37 -0800 Subject: [PATCH] Implementing storage usage doc. Updating auto-generated docs in the process, including `inherited-members` for `Blob` and `Bucket` so `reload` and `patch` show up. --- docs/index.rst | 1 + docs/storage-blobs.rst | 1 + docs/storage-buckets.rst | 1 + docs/storage-usage.rst | 845 ++++++++++++++++++++++++++++++++++ gcloud/storage/bucket.py | 16 +- gcloud/storage/client.py | 8 +- gcloud/storage/test_bucket.py | 6 +- 7 files changed, 863 insertions(+), 15 deletions(-) create mode 100644 docs/storage-usage.rst diff --git a/docs/index.rst b/docs/index.rst index e97e005076d6..feb80ebc5fdb 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -23,6 +23,7 @@ :hidden: :caption: Storage + storage-usage Client storage-blobs storage-buckets diff --git a/docs/storage-blobs.rst b/docs/storage-blobs.rst index 384806d6e3d8..f3e6a64638eb 100644 --- a/docs/storage-blobs.rst +++ b/docs/storage-blobs.rst @@ -5,3 +5,4 @@ Blobs / Objects :members: :undoc-members: :show-inheritance: + :inherited-members: diff --git a/docs/storage-buckets.rst b/docs/storage-buckets.rst index 55c19a461b93..d4972449fca9 100644 --- a/docs/storage-buckets.rst +++ b/docs/storage-buckets.rst @@ -5,3 +5,4 @@ Buckets :members: :undoc-members: :show-inheritance: + :inherited-members: diff --git a/docs/storage-usage.rst b/docs/storage-usage.rst new file mode 100644 index 000000000000..a57ed01c5a68 --- /dev/null +++ b/docs/storage-usage.rst @@ -0,0 +1,845 @@ +Using the API +============= + +Authentication and Configuration +-------------------------------- + +- For an overview of authentication in ``gcloud-python``, + see :doc:`gcloud-auth`. + +- In addition to any authentication configuration, you should also set the + :envvar:`GCLOUD_PROJECT` environment variable for the project you'd like + to interact with. If you are Google App Engine or Google Compute Engine + this will be detected automatically. + +- After configuring your environment, create a + :class:`Client ` + + .. doctest:: + + >>> from gcloud import storage + >>> client = storage.Client() + + or pass in ``credentials`` and ``project`` explicitly + + .. doctest:: + + >>> from gcloud import storage + >>> client = storage.Client(project='my-project', credentials=creds) + +Manage buckets +-------------- + +The top-level concept (or "noun") for Google Cloud Storage is a bucket. +To create a new bucket within your project + +.. code-block:: python + + >>> new_bucket = client.bucket(bucket_name) + >>> new_bucket.exists() # API request + False + >>> new_bucket.create() # API request + >>> new_bucket.exists() # API request + True + +This instantiates a new :class:`Bucket ` +and then uses it to make API requests. + +.. warning:: + + If the bucket already exists, + :meth:`create() ` will throw an + exception corresponding to the `409 conflict`_ status code in the response. + +.. _409 conflict: http://en.wikipedia.org/wiki/List_of_HTTP_status_codes#4xx_Client_Error + +.. note:: + + You can use + :meth:`create_bucket() ` + directly to accomplish the same task. + +To load an existing bucket + +.. code-block:: python + + >>> bucket = client.bucket(bucket_name) + >>> bucket.last_sync is None + True + >>> bucket.self_link is None + True + >>> bucket.reload() # API request + >>> # May be necessary to include projection and fields for last sync + >>> bucket.last_sync + datetime.datetime(2015, 1, 1, 12, 0) + >>> bucket.self_link + u'https://www.googleapis.com/storage/v1/b/bucket-name' + +Instead of calling +:meth:`reload() `, you +can load the properties when the object is instantiated by using the +``eager`` keyword + +.. code-block:: python + + >>> bucket = client.bucket(bucket_name, eager=True) # API request + >>> bucket.last_sync + datetime.datetime(2015, 1, 1, 12, 0) + +If the bucket does not exist an exception will occur + +.. code-block:: python + + >>> bucket.reload() # API request + Traceback (most recent call last): + File "", line 1, in + gcloud.exceptions.NotFound: 404 Some Message + +:meth:`reload() ` retrieves +all properties associated with the bucket from the server, while +:meth:`exists() ` just +confirms the bucket exists. + +.. note:: + + You can also use + :meth:`get_bucket() ` + and + :meth:`lookup_bucket() ` + to achieve similar results. + +To retrieve multiple buckets by name in a single request + +.. code-block:: python + + >>> bucket1, bucket2, bucket3 = client.get_buckets( + ... 'bucket-name1', + ... 'bucket-name2', + ... 'bucket-name3') # API request + +This is equivalent to + +.. code-block:: python + + >>> with client.batch(): # API request + ... bucket_future1 = client.get_bucket('bucket-name1') + ... bucket_future2 = client.get_bucket('bucket-name2') + ... bucket_future3 = client.get_bucket('bucket-name3') + ... + >>> bucket1 = bucket_future1.get() + >>> bucket2 = bucket_future2.get() + >>> bucket3 = bucket_future3.get() + +To list all buckets associated to the default project + +.. code-block:: python + + >>> for bucket in client.list_buckets(): # API request + ... print(bucket) + + + + +To limit the list of buckets returned, +:meth:`list_buckets() ` accepts +optional arguments + +.. code-block:: python + + >>> bucket_iterator = client.list_buckets(max_results=2, + ... page_token='next-bucket-name', + ... prefix='foo', + ... projection='noAcl', + ... fields=None) + >>> for bucket in bucket_iterator: # API request + ... print(bucket) + +See the `buckets list`_ documentation for details. + +.. _buckets list: https://cloud.google.com/storage/docs/json_api/v1/buckets/list + +To delete a bucket + +.. code-block:: python + + >>> bucket.delete() # API request + +.. warning:: + + Deleting a bucket should happen very infrequently. Be careful that you + actually mean to delete the bucket. + +In the case that the bucket has existing blobs, the backend +will return a `409 conflict`_ and raise + +.. code-block:: python + + >>> bucket.delete() # API request + Traceback (most recent call last): + File "", line 1, in + gcloud.exceptions.Conflict: 409 Some Message + +.. note:: + + We use the term blob interchangeably with "object" when referring to the + API. The Google Cloud Storage documentation uses object, but we use ``blob`` + instead to avoid confusion with the Python builtin ``object``. + +This can be addressed by using the ``force`` keyword + + >>> bucket.delete(force=True) # API request + +Even using ``force=True`` will fail if the bucket contains more than +:attr:`MAX_OBJECTS_FOR_ITERATION ` +blobs. In this case, delete the blobs manually before deleting the bucket. + +To make updates to the bucket use +:meth:`patch() ` + +.. code-block:: python + + >>> bucket.versioning_enabled = True + >>> bucket.patch() # API request + +If there are no updates to send, an exception will occur + +.. code-block:: python + + >>> bucket.patch() # API request + Traceback (most recent call last): + File "", line 1, in + ValueError: No updates to send. + +In total, the properties that can be updated are + +.. code-block:: python + + >>> bucket.acl = [ + ... ACLEntity('project-editors-111111', 'OWNER'), + ... ACLEntity('project-owners-111111', 'OWNER'), + ... ACLEntity('project-viewers-111111, 'READER'), + ... ACLEntity('user-01234, 'OWNER'), + ... ] + >>> bucket.cors = [ + ... { + ... 'origin': ['http://example.appspot.com'], + ... 'responseHeader': ['Content-Type'], + ... 'method': ['GET', 'HEAD', 'DELETE'], + ... 'maxAgeSeconds': 3600, + ... } + ... ] + >>> bucket.default_object_acl = [ + ... ACLEntity('project-owners-111111', 'OWNER'), + ... ACLEntity('user-01234, 'OWNER'), + ... ] + >>> bucket.lifecycle = [ + ... { + ... 'action': {'type': 'Delete'}, + ... 'condition': {'age': 365}, + ... }, + ... ] + >>> bucket.location = 'ASIA' + >>> bucket.logging = { + ... 'logBucket': 'bucket-name', + ... 'logObjectPrefix': 'foo/', + ... } + >>> bucket.storage_class = 'DURABLE_REDUCED_AVAILABILITY' + >>> bucket.versioning_enabled = True + >>> bucket.website = { + ... 'mainPageSuffix': 'index.html', + ... 'notFoundPage': '404.html', + ... } + +In general, many of these properties are optional and will not need to be +used (or changed from the defaults). + +In addition, a bucket has several read-only properties + +.. code-block:: python + + >>> bucket.etag + u'CAI=' + >>> bucket.id + u'bucket-name' + >>> bucket.metageneration + 2L + >>> bucket.name + u'bucket-name' + >>> bucket.owner + + >>> bucket.project_number + 111111L + >>> bucket.self_link + u'https://www.googleapis.com/storage/v1/b/bucket-name' + >>> bucket.time_created + datetime.datetime(2015, 1, 1, 12, 0) + +See `buckets`_ specification for more details. `Access control`_ data is +complex enough to be a topic of its own. We provide the +:class:`ACLEntity ` class to represent these +objects and will discuss more further on. + +.. _buckets: https://cloud.google.com/storage/docs/json_api/v1/buckets +.. _Access control: https://cloud.google.com/storage/docs/access-control + +.. note:: + + **BREAKING THE FOURTH WALL**: Note that ``storage.buckets.update`` is + absent. This doesn't seem necessary to implement given the presence of + :meth:`patch() `. + +Manage Blobs +------------ + +One level below a bucket in the Google Cloud Storage hierarchy is +a blob (called an object by the GCS docs, but a blob here). A blob +can be thought of a file stored in a bucket. + +To create a new blob within one of your buckets + +.. code-block:: python + + >>> new_blob = bucket.blob(blob_name) + >>> new_blob.exists() # API request + False + >>> new_blob.create() # API request + >>> new_blob.exists() # API request + True + >>> new_blob + + +You can pass the arguments ``if_generation_match`` or +``if_generation_not_match`` (mutually exclusive) and ``if_metageneration_match`` +or ``if_metageneration_not_match`` (also mutually exclusive). See documentation +for `objects.insert`_ for more details. + +.. _objects.insert: https://cloud.google.com/storage/docs/json_api/v1/objects/insert + +By default, just constructing a :class:`Blob ` +does not load any of the associated blob metadata. To load all blob +properties + +.. code-block:: python + + >>> blob = bucket.blob(blob_name) + >>> blob.last_sync is None + True + >>> blob.content_type is None + True + >>> blob.reload() # API request + >>> blob.last_sync + datetime.datetime(2015, 1, 1, 12, 0) + >>> blob.content_type + u'text/plain' + +.. note:: + + Simply calling :meth:`reload() ` + will not actually retrieve the contents stored + for the given blob. Instead, it retrieves the metadata associated with + the blob. + +Instead of calling +:meth:`reload() `, you +can load the properties when the object is instantiated by using the +``eager`` keyword + +.. code-block:: python + + >>> blob = bucket.blob(blob_name, eager=True) # API request + >>> blob.last_sync + datetime.datetime(2015, 1, 1, 12, 0) + +.. note:: + + You can use + :meth:`get_blob() ` + directly to accomplish the same task. + +To retrieve multiple blobs in a single request + +.. code-block:: python + + >>> blob1, blob2, blob3 = bucket.get_blobs('blob-name1', + ... 'blob-name2', + ... 'blob-name3') # API request + +This is equivalent to + +.. code-block:: python + + >>> with client.batch(): # API request + ... blob_future1 = bucket.get_blob('blob-name1') + ... blob_future2 = bucket.get_blob('blob-name2') + ... blob_future3 = bucket.get_blob('blob-name3') + ... + >>> blob1 = blob_future1.get() + >>> blob2 = blob_future2.get() + >>> blob3 = blob_future3.get() + +To list all blobs in a bucket + +.. code-block:: python + + >>> for blob in bucket.list_blobs(): # API request + ... print(blob) + + + + +.. warning:: + + In a production application, a typical bucket may very likely have thousands + or even millions of blobs. Iterating through all of them in such an + application is a very bad idea. + +To limit the list of blobs returned, +:meth:`list_blobs() ` accepts +optional arguments + +.. code-block:: python + + >>> blob_iterator = bucket.list_blobs(max_results=2, + ... page_token='next-blob-name', + ... prefix='foo', + ... delimiter='/', + ... versions=True, + ... projection='noAcl', + ... fields=None) + >>> for blob in blob_iterator: # API request + ... print(blob) + +See the `objects list`_ documentation for details. + +.. _objects list: https://cloud.google.com/storage/docs/json_api/v1/objects/list + +To delete a blob + +.. code-block:: python + + >>> blob.delete() # API request + +.. note:: + + You can use + :meth:`delete_blob() ` + directly to accomplish the same task. + +As with retrieving, you may also delete multiple blobs in a single request + +.. code-block:: python + + >>> bucket.delete_blobs('blob-name1', + ... 'blob-name2', + ... 'blob-name3') # API request + +This is equivalent to + +.. code-block:: python + + >>> with client.batch(): # API request + ... bucket.delete_blob('blob-name1') + ... bucket.delete_blob('blob-name2') + ... bucket.delete_blob('blob-name3') + +In the case that some of the deletes may fail, you can handle each +error with custom behavior: + +.. code-block:: python + + >>> def handle_not_found(blob): + ... msg = '%s not found' % (blob,) + ... print(msg) + ... + >>> bucket.delete_blobs('blob-name1', 'blob-name2', 'blob-name3', + ... on_error=handle_not_found) # API request + blob-name2 not found + +To make updates to a blob use +:meth:`patch() ` + +.. code-block:: python + + >>> blob.versioning_enabled = True + >>> blob.patch() # API request + +If there are no updates to send, an exception will occur + +.. code-block:: python + + >>> blob.patch() # API request + Traceback (most recent call last): + File "", line 1, in + ValueError: No updates to send. + +In total, the properties that can be updated are + +.. code-block:: python + + >>> blob.acl = [ + ... ACLEntity('project-owners-111111', 'OWNER'), + ... ACLEntity('user-01234, 'OWNER'), + ... ] + >>> blob.cache_control = 'private, max-age=0, no-cache' + >>> blob.content_disposition = 'Attachment; filename=example.html' + >>> blob.content_encoding = 'gzip' + >>> blob.content_language = 'en-US' + >>> blob.content_type = 'text/plain' + >>> blob.crc32c = u'z8SuHQ==' # crc32-c of "foo" + >>> blob.md5_hash = u'rL0Y20zC+Fzt72VPzMSk2A==' # md5 of "foo" + >>> blob.metadata = {'foo': 'bar', 'baz': 'qux'} + +.. note:: + + **BREAKING THE FOURTH WALL**: Why are ``crc32c`` and ``md5_hash`` writable? + +In general, many of these properties are optional and will not need to be +used (or changed from the defaults). + +In addition, a blob has several read-only properties + +.. code-block:: python + + >>> blob.bucket + + >>> blob.component_count + 1 + >>> blob.etag + u'CNiOr665xcQCEAE=' + >>> blob.generation + 12345L + >>> blob.id + u'bucket-name/blob-name/12345' + >>> blob.media_link + u'https://www.googleapis.com/download/storage/v1/b/bucket-name/o/blob-name?generation=12345&alt=media' + >>> blob.metageneration + 1L + >>> blob.name + 'blob-name' + >>> blob.owner + + >>> blob.self_link + u'https://www.googleapis.com/storage/v1/b/bucket-name/o/blob-name' + >>> blob.size + 3L + >>> blob.storage_class + u'STANDARD' + >>> print(blob.time_deleted) + None + >>> blob.updated + datetime.datetime(2015, 1, 1, 12, 0) + +To copy an existing blob to a new location, potentially even in +a new bucket + +.. code-block:: python + + >>> new_bucket = client.bucket(new_bucket_name) + >>> new_blob = bucket.copy_blob(blob, new_bucket, + ... new_name='new-blob-name') # API request + +To compose multiple blobs together + +.. code-block:: python + + >>> blob1, blob2 = bucket.get_blobs('blob-name1', 'blob-name2') + >>> new_blob = bucket.compose('composed-blob', + ... parts=[blob1, blob2]) # API request + +See `objects`_ specification for more details. `Access control`_ data is +complex enough to be a topic of its own. We provide the +:class:`ACLEntity ` class to represent these +objects and will discuss more further on. + +.. _objects: https://cloud.google.com/storage/docs/json_api/v1/objects + +Working with Blob Data +---------------------- + +The most important use of a blob is not accessing and updating the metadata, +it is storing data in the cloud (hence Cloud Storage). + +To upload string data into a blob + + .. code-block:: python + + >>> blob.upload_from_string('foo') # API request + +If the data has a known content-type, set it on the blob before +uploading: + + .. code-block:: python + + >>> blob.content_type = 'application/zip' + >>> blob.upload_from_string('foo') # API request + +To upload instead from a file-like object + + .. code-block:: python + + >>> blob.upload_from_stream(file_object) # API request + +To upload directly from a file + + .. code-block:: python + + >>> blob.upload_from_filename('/path/on/local/machine.file') # API request + +This is roughly equivalent to + + .. code-block:: python + + >>> with open('/path/on/local/machine.file', 'w') as file_object: + ... blob.upload_from_stream(file_object) # API request + +with some extra behavior to set local file properties. + +.. note:: + + If you ``upload`` a blob which didn't already exist, it will also be + created with all the properties you have set locally. + +To download blob data into a string + + .. code-block:: python + + >>> blob_contents = blob.download_as_string() # API request + +To download instead to a file-like object + + .. code-block:: python + + >>> blob.download_to_stream(file_object) # API request + +To download directly to a file + + .. code-block:: python + + >>> blob.download_to_filename('/path/on/local/machine.file') # API request + +Dealing with Sharing and ACLs +----------------------------- + +To generate a signed URL for temporary privileged access to the +contents of a blob + +.. code-block:: python + + >>> expiration_seconds = 600 + >>> signed_url = blob.generate_signed_url(expiration_seconds) + +A :class:`Bucket ` has both its own ACLs +and a set of default ACLs to be used for newly created blobs. + +.. code-block:: python + + >>> bucket.acl + [, + , + , + ] + >>> bucket.default_object_acl + [, + ] + +This will be updated when calling +:meth:`bucket.reload() `, +since by default ``projection=full`` is used to get the bucket +properties. + +To update these directly + +.. code-block:: python + + >>> bucket.update_acl() # API request + >>> bucket.acl + [, + , + , + , + ] + >>> bucket.update_default_object_acl() # API request + >>> bucket.default_object_acl + [, + , + ] + +These methods call `bucketAccessControls.list`_ and +`defaultObjectAccessControls.list`_ instead of updating +every single property associated with the bucket. + +.. _bucketAccessControls.list: https://cloud.google.com/storage/docs/json_api/v1/bucketAccessControls/list +.. _defaultObjectAccessControls.list: https://cloud.google.com/storage/docs/json_api/v1/defaultObjectAccessControls/list + +You can limit the results of +:meth:`update_default_object_acl() ` +by using + +.. code-block:: python + + >>> bucket.update_default_object_acl(if_metageneration_match=3) # API request + +or + +.. code-block:: python + + >>> bucket.update_default_object_acl(if_metageneration_not_match=5) # API request + +Similarly, a :class:`Blob ` has its own ACLs + +.. code-block:: python + + >>> blob.acl + [, + ] + +This will be updated when calling +:meth:`blob.reload() `, +since by default ``projection=full`` is used to get the blob properties. + +To update these directly + +.. code-block:: python + + >>> blob.update_acl() # API request + >>> blob.acl + [, + , + ] + +When sending the `objectAccessControls.list`_ request, the blob's current +generation is sent. + +.. _objectAccessControls.list: https://cloud.google.com/storage/docs/json_api/v1/objectAccessControls/list + +Individual :class:`ACLEntity ` objects can be +edited and updated directly + +.. code-block:: python + + >>> entity = bucket.acl[1] + >>> entity + + >>> entity.role = storage.ROLES.WRITER + + >>> entity.patch() # API request + +A :class:`ACLEntity ` object has two +properties that can be updated + +.. code-block:: python + + >>> entity.entity = 'user-01234' + >>> entity.role = 'WRITER' + +and several read-only properties + +.. code-block:: python + + >>> entity.bucket + u'bucket-name' + >>> entity.domain + u'foo.com' + >>> entity.email + u'foo@gmail.com' + >>> entity.entityId + u'00b4903a9708670FAKEDATA3109ed94bFAKEDATA3e3090f8c566691bFAKEDATA' + >>> entity.etag + u'CAI=' + >>> entity.generation + 1L + >>> entity.id + u'bucket-name/project-owners-111111' + >>> entity.project_team + {u'projectNumber': u'111111', u'team': u'owners'} + >>> entity.self_link + u'https://www.googleapis.com/storage/v1/b/bucket-name/acl/project-owners-111111' + +To update the values in an ACL, you can either update the entire parent + +.. code-block:: python + + >>> blob.acl + [, + ] + >>> blob.reload() # API request + >>> blob.acl + [, + ] + +or just reload the individual ACL + +.. code-block:: python + + >>> blob.acl + [, + ] + >>> blob.acl[1].reload() # API request + >>> blob.acl + [, + ] + +To add an ACL to an existing object + +.. code-block:: python + + >>> bucket.add_acl_entity('group-foo@googlegroups.com', 'WRITER') + >>> bucket.add_default_object_acl_entity('domain-foo.com', 'OWNER') + >>> blob.add_acl_entity('user-01234', 'READER') + +To remove an ACL, you can either reduce the list and update + +.. code-block:: python + + >>> blob.acl + [, + , + ] + >>> blob.acl.remove(blob.acl[1]) + >>> blob.patch() # API request + >>> blob.acl + [, + ] + +or delete the ACL directly + +.. code-block:: python + + >>> blob.acl + [, + , + ] + >>> blob.acl[1].delete() # API request + >>> blob.acl + [, + ] + +.. note:: + + **BREAKING THE FOURTH WALL**: Note that ``storage.*AccessControls.insert`` + and ``storage.*AccessControls.update`` are absent. This is done + intentionally, with the philosophy that an + :class:`ACLEntity ` must be attached to either + a :class:`Bucket ` or + :class:`Blob ` + +Predefined ACLs +--------------- + +When creating a new bucket, you can set predefined ACLs + +.. code-block:: python + + >>> bucket.create(predefined_acl=storage.ACLS.PROJECT_PRIVATE, + ... predefined_default_object_acl=storage.ACLS.PRIVATE) # API request + +The enum variable ``storage.ACLS`` contains all acceptable values. See +documentation for `buckets.insert`_ for more details. + +.. _buckets.insert: https://cloud.google.com/storage/docs/json_api/v1/buckets/insert + +When creating a new blob, you can set a predefined ACL + +.. code-block:: python + + >>> blob.create(predefined_acl=storage.ACLS.AUTHENTICATED_READ) # API request diff --git a/gcloud/storage/bucket.py b/gcloud/storage/bucket.py index 589f8527fb3f..4c672347297f 100644 --- a/gcloud/storage/bucket.py +++ b/gcloud/storage/bucket.py @@ -81,10 +81,10 @@ class Bucket(_PropertyMixin): """ _iterator_class = _BlobIterator - _MAX_OBJECTS_FOR_ITERATION = 256 + MAX_OBJECTS_FOR_ITERATION = 256 """Maximum number of existing objects allowed in iteration. - This is used in Bucket.delete() and Bucket.make_public(). + This is used in :meth:`delete` and :meth:`make_public`. """ _STORAGE_CLASSES = ('STANDARD', 'NEARLINE', 'DURABLE_REDUCED_AVAILABILITY') @@ -336,15 +336,15 @@ def delete(self, force=False, client=None): client = self._require_client(client) if force: blobs = list(self.list_blobs( - max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, + max_results=self.MAX_OBJECTS_FOR_ITERATION + 1, client=client)) - if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: + if len(blobs) > self.MAX_OBJECTS_FOR_ITERATION: message = ( 'Refusing to delete bucket with more than ' '%d objects. If you actually want to delete ' 'this bucket, please delete the objects ' 'yourself before calling Bucket.delete().' - ) % (self._MAX_OBJECTS_FOR_ITERATION,) + ) % (self.MAX_OBJECTS_FOR_ITERATION,) raise ValueError(message) # Ignore 404 errors on delete. @@ -810,15 +810,15 @@ def make_public(self, recursive=False, future=False, client=None): if recursive: blobs = list(self.list_blobs( projection='full', - max_results=self._MAX_OBJECTS_FOR_ITERATION + 1, + max_results=self.MAX_OBJECTS_FOR_ITERATION + 1, client=client)) - if len(blobs) > self._MAX_OBJECTS_FOR_ITERATION: + if len(blobs) > self.MAX_OBJECTS_FOR_ITERATION: message = ( 'Refusing to make public recursively with more than ' '%d objects. If you actually want to make every object ' 'in this bucket public, please do it on the objects ' 'yourself.' - ) % (self._MAX_OBJECTS_FOR_ITERATION,) + ) % (self.MAX_OBJECTS_FOR_ITERATION,) raise ValueError(message) for blob in blobs: diff --git a/gcloud/storage/client.py b/gcloud/storage/client.py index b3abe09e7913..b8f1da452ab5 100644 --- a/gcloud/storage/client.py +++ b/gcloud/storage/client.py @@ -154,7 +154,7 @@ def get_bucket(self, bucket_name): >>> except gcloud.exceptions.NotFound: >>> print 'Sorry, that bucket does not exist!' - This implements "storage.buckets.get". + This implements ``storage.buckets.get``. :type bucket_name: string :param bucket_name: The name of the bucket to get. @@ -200,7 +200,7 @@ def create_bucket(self, bucket_name): >>> print bucket - This implements "storage.buckets.insert". + This implements ``storage.buckets.insert``. If the bucket already exists, will raise :class:`gcloud.exceptions.Conflict`. @@ -223,9 +223,9 @@ def list_buckets(self, max_results=None, page_token=None, prefix=None, bucket. >>> for bucket in client.list_buckets(): - >>> print bucket + >>> print(bucket) - This implements "storage.buckets.list". + This implements ``storage.buckets.list``. :type max_results: integer or ``NoneType`` :param max_results: Optional. Maximum number of buckets to return. diff --git a/gcloud/storage/test_bucket.py b/gcloud/storage/test_bucket.py index 947b1f0e600d..e8af775b3e36 100644 --- a/gcloud/storage/test_bucket.py +++ b/gcloud/storage/test_bucket.py @@ -391,7 +391,7 @@ def test_delete_too_many(self): bucket = self._makeOne(client=client, name=NAME) # Make the Bucket refuse to delete with 2 objects. - bucket._MAX_OBJECTS_FOR_ITERATION = 1 + bucket.MAX_OBJECTS_FOR_ITERATION = 1 self.assertRaises(ValueError, bucket.delete, force=True) self.assertEqual(connection._deleted_buckets, []) @@ -921,7 +921,7 @@ def get_items_from_response(self, response): self.assertEqual(kw[0]['query_params'], {'projection': 'full'}) self.assertEqual(kw[1]['method'], 'GET') self.assertEqual(kw[1]['path'], '/b/%s/o' % NAME) - max_results = bucket._MAX_OBJECTS_FOR_ITERATION + 1 + max_results = bucket.MAX_OBJECTS_FOR_ITERATION + 1 self.assertEqual(kw[1]['query_params'], {'maxResults': max_results, 'projection': 'full'}) @@ -947,7 +947,7 @@ def test_make_public_recursive_too_many(self): bucket.default_object_acl.loaded = True # Make the Bucket refuse to make_public with 2 objects. - bucket._MAX_OBJECTS_FOR_ITERATION = 1 + bucket.MAX_OBJECTS_FOR_ITERATION = 1 self.assertRaises(ValueError, bucket.make_public, recursive=True)