diff --git a/go.mod b/go.mod index e8beaa77be..95122acdc5 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/GoogleContainerTools/kaniko go 1.19 require ( - cloud.google.com/go/storage v1.35.1 + cloud.google.com/go/storage v1.36.0 github.com/Azure/azure-storage-blob-go v0.14.0 // This docker-credential-gcr dependency version is actually the same as v2.1.8. // See https://github.com/GoogleCloudPlatform/docker-credential-gcr/issues/128 diff --git a/go.sum b/go.sum index 6221a1371f..9724a41823 100644 --- a/go.sum +++ b/go.sum @@ -44,8 +44,8 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.35.1 h1:B59ahL//eDfx2IIKFBeT5Atm9wnNmj3+8xG/W4WB//w= -cloud.google.com/go/storage v1.35.1/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= +cloud.google.com/go/storage v1.36.0 h1:P0mOkAcaJxhCTvAkMhxMfrTKiNcub4YmmPBtlhAyTr8= +cloud.google.com/go/storage v1.36.0/go.mod h1:M6M/3V/D3KpzMTJyPOR/HU6n2Si5QdaXYEsng2xgOs8= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= diff --git a/vendor/cloud.google.com/go/storage/CHANGES.md b/vendor/cloud.google.com/go/storage/CHANGES.md index 30ee040f76..8b3fa6fc48 100644 --- a/vendor/cloud.google.com/go/storage/CHANGES.md +++ b/vendor/cloud.google.com/go/storage/CHANGES.md @@ -1,6 +1,19 @@ # Changes +## [1.36.0](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.1...storage/v1.36.0) (2023-12-14) + + +### Features + +* **storage:** Add object retention feature ([#9072](https://github.com/googleapis/google-cloud-go/issues/9072)) ([16ecfd1](https://github.com/googleapis/google-cloud-go/commit/16ecfd150ff1982f03d207a80a82e934d1013874)) + + +### Bug Fixes + +* **storage:** Do not inhibit the dead code elimination. ([#8543](https://github.com/googleapis/google-cloud-go/issues/8543)) ([ca2493f](https://github.com/googleapis/google-cloud-go/commit/ca2493f43c299bbaed5f7e5b70f66cc763ff9802)) +* **storage:** Set flush and get_state to false on the last write in gRPC ([#9013](https://github.com/googleapis/google-cloud-go/issues/9013)) ([c1e9fe5](https://github.com/googleapis/google-cloud-go/commit/c1e9fe5f4166a71e55814ccf126926ec0e0e7945)) + ## [1.35.1](https://github.com/googleapis/google-cloud-go/compare/storage/v1.35.0...storage/v1.35.1) (2023-11-09) diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go index 3818c4498c..1059d4e8b7 100644 --- a/vendor/cloud.google.com/go/storage/bucket.go +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -41,13 +41,14 @@ import ( // BucketHandle provides operations on a Google Cloud Storage bucket. // Use Client.Bucket to get a handle. type BucketHandle struct { - c *Client - name string - acl ACLHandle - defaultObjectACL ACLHandle - conds *BucketConditions - userProject string // project for Requester Pays buckets - retry *retryConfig + c *Client + name string + acl ACLHandle + defaultObjectACL ACLHandle + conds *BucketConditions + userProject string // project for Requester Pays buckets + retry *retryConfig + enableObjectRetention *bool } // Bucket returns a BucketHandle, which provides operations on the named bucket. @@ -85,7 +86,8 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck defer func() { trace.EndSpan(ctx, err) }() o := makeStorageOpts(true, b.retry, b.userProject) - if _, err := b.c.tc.CreateBucket(ctx, projectID, b.name, attrs, o...); err != nil { + + if _, err := b.c.tc.CreateBucket(ctx, projectID, b.name, attrs, b.enableObjectRetention, o...); err != nil { return err } return nil @@ -462,6 +464,15 @@ type BucketAttrs struct { // allows for the automatic selection of the best storage class // based on object access patterns. Autoclass *Autoclass + + // ObjectRetentionMode reports whether individual objects in the bucket can + // be configured with a retention policy. An empty value means that object + // retention is disabled. + // This field is read-only. Object retention can be enabled only by creating + // a bucket with SetObjectRetention set to true on the BucketHandle. It + // cannot be modified once the bucket is created. + // ObjectRetention cannot be configured or reported through the gRPC API. + ObjectRetentionMode string } // BucketPolicyOnly is an alias for UniformBucketLevelAccess. @@ -757,6 +768,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) { if err != nil { return nil, err } + return &BucketAttrs{ Name: b.Name, Location: b.Location, @@ -771,6 +783,7 @@ func newBucket(b *raw.Bucket) (*BucketAttrs, error) { RequesterPays: b.Billing != nil && b.Billing.RequesterPays, Lifecycle: toLifecycle(b.Lifecycle), RetentionPolicy: rp, + ObjectRetentionMode: toBucketObjectRetention(b.ObjectRetention), CORS: toCORS(b.Cors), Encryption: toBucketEncryption(b.Encryption), Logging: toBucketLogging(b.Logging), @@ -1348,6 +1361,17 @@ func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error { return b.c.tc.LockBucketRetentionPolicy(ctx, b.name, b.conds, o...) } +// SetObjectRetention returns a new BucketHandle that will enable object retention +// on bucket creation. To enable object retention, you must use the returned +// handle to create the bucket. This has no effect on an already existing bucket. +// ObjectRetention is not enabled by default. +// ObjectRetention cannot be configured through the gRPC API. +func (b *BucketHandle) SetObjectRetention(enable bool) *BucketHandle { + b2 := *b + b2.enableObjectRetention = &enable + return &b2 +} + // applyBucketConds modifies the provided call using the conditions in conds. // call is something that quacks like a *raw.WhateverCall. func applyBucketConds(method string, conds *BucketConditions, call interface{}) error { @@ -1360,11 +1384,11 @@ func applyBucketConds(method string, conds *BucketConditions, call interface{}) cval := reflect.ValueOf(call) switch { case conds.MetagenerationMatch != 0: - if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { + if !setIfMetagenerationMatch(cval, conds.MetagenerationMatch) { return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) } case conds.MetagenerationNotMatch != 0: - if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { + if !setIfMetagenerationNotMatch(cval, conds.MetagenerationNotMatch) { return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) } } @@ -1447,6 +1471,13 @@ func toRetentionPolicyFromProto(rp *storagepb.Bucket_RetentionPolicy) *Retention } } +func toBucketObjectRetention(or *raw.BucketObjectRetention) string { + if or == nil { + return "" + } + return or.Mode +} + func toRawCORS(c []CORS) []*raw.BucketCors { var out []*raw.BucketCors for _, v := range c { diff --git a/vendor/cloud.google.com/go/storage/client.go b/vendor/cloud.google.com/go/storage/client.go index 3bed9b64c2..4906b1d1f7 100644 --- a/vendor/cloud.google.com/go/storage/client.go +++ b/vendor/cloud.google.com/go/storage/client.go @@ -44,7 +44,7 @@ type storageClient interface { // Top-level methods. GetServiceAccount(ctx context.Context, project string, opts ...storageOption) (string, error) - CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) + CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) ListBuckets(ctx context.Context, project string, opts ...storageOption) *BucketIterator Close() error @@ -60,7 +60,7 @@ type storageClient interface { DeleteObject(ctx context.Context, bucket, object string, gen int64, conds *Conditions, opts ...storageOption) error GetObject(ctx context.Context, bucket, object string, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) - UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) + UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error) // Default Object ACL methods. @@ -291,6 +291,15 @@ type newRangeReaderParams struct { readCompressed bool // Use accept-encoding: gzip. Only works for HTTP currently. } +type updateObjectParams struct { + bucket, object string + uattrs *ObjectAttrsToUpdate + gen int64 + encryptionKey []byte + conds *Conditions + overrideRetention *bool +} + type composeObjectRequest struct { dstBucket string dstObject destinationObject diff --git a/vendor/cloud.google.com/go/storage/grpc_client.go b/vendor/cloud.google.com/go/storage/grpc_client.go index 99dfba4677..a51cf9c086 100644 --- a/vendor/cloud.google.com/go/storage/grpc_client.go +++ b/vendor/cloud.google.com/go/storage/grpc_client.go @@ -152,7 +152,12 @@ func (c *grpcStorageClient) GetServiceAccount(ctx context.Context, project strin return resp.EmailAddress, err } -func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) { +func (c *grpcStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) { + if enableObjectRetention != nil { + // TO-DO: implement ObjectRetention once available - see b/308194853 + return nil, status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC") + } + s := callSettings(c.settings, opts...) b := attrs.toProtoBucket() b.Project = toProjectResource(project) @@ -507,25 +512,30 @@ func (c *grpcStorageClient) GetObject(ctx context.Context, bucket, object string return attrs, err } -func (c *grpcStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { +func (c *grpcStorageClient) UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error) { + uattrs := params.uattrs + if params.overrideRetention != nil || uattrs.Retention != nil { + // TO-DO: implement ObjectRetention once available - see b/308194853 + return nil, status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC") + } s := callSettings(c.settings, opts...) - o := uattrs.toProtoObject(bucketResourceName(globalProjectAlias, bucket), object) + o := uattrs.toProtoObject(bucketResourceName(globalProjectAlias, params.bucket), params.object) // For Update, generation is passed via the object message rather than a field on the request. - if gen >= 0 { - o.Generation = gen + if params.gen >= 0 { + o.Generation = params.gen } req := &storagepb.UpdateObjectRequest{ Object: o, PredefinedAcl: uattrs.PredefinedACL, } - if err := applyCondsProto("grpcStorageClient.UpdateObject", defaultGen, conds, req); err != nil { + if err := applyCondsProto("grpcStorageClient.UpdateObject", defaultGen, params.conds, req); err != nil { return nil, err } if s.userProject != "" { ctx = setUserProjectMetadata(ctx, s.userProject) } - if encryptionKey != nil { - req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(encryptionKey) + if params.encryptionKey != nil { + req.CommonObjectRequestParams = toProtoCommonObjectRequestParams(params.encryptionKey) } fieldMask := &fieldmaskpb.FieldMask{Paths: nil} @@ -739,7 +749,8 @@ func (c *grpcStorageClient) DeleteObjectACL(ctx context.Context, bucket, object } uattrs := &ObjectAttrsToUpdate{ACL: acl} // Call UpdateObject with the specified metageneration. - if _, err = c.UpdateObject(ctx, bucket, object, uattrs, defaultGen, nil, &Conditions{MetagenerationMatch: attrs.Metageneration}, opts...); err != nil { + params := &updateObjectParams{bucket: bucket, object: object, uattrs: uattrs, gen: defaultGen, conds: &Conditions{MetagenerationMatch: attrs.Metageneration}} + if _, err = c.UpdateObject(ctx, params, opts...); err != nil { return err } return nil @@ -769,7 +780,8 @@ func (c *grpcStorageClient) UpdateObjectACL(ctx context.Context, bucket, object acl = append(attrs.ACL, aclRule) uattrs := &ObjectAttrsToUpdate{ACL: acl} // Call UpdateObject with the specified metageneration. - if _, err = c.UpdateObject(ctx, bucket, object, uattrs, defaultGen, nil, &Conditions{MetagenerationMatch: attrs.Metageneration}, opts...); err != nil { + params := &updateObjectParams{bucket: bucket, object: object, uattrs: uattrs, gen: defaultGen, conds: &Conditions{MetagenerationMatch: attrs.Metageneration}} + if _, err = c.UpdateObject(ctx, params, opts...); err != nil { return err } return nil @@ -1049,6 +1061,13 @@ func (c *grpcStorageClient) OpenWriter(params *openWriterParams, opts ...storage return } + if params.attrs.Retention != nil { + // TO-DO: remove once ObjectRetention is available - see b/308194853 + err = status.Errorf(codes.Unimplemented, "storage: object retention is not supported in gRPC") + errorf(err) + pr.CloseWithError(err) + return + } // The chunk buffer is full, but there is no end in sight. This // means that either: // 1. A resumable upload will need to be used to send @@ -1629,8 +1648,8 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st }, WriteOffset: writeOffset, FinishWrite: lastWriteOfEntireObject, - Flush: remainingDataFitsInSingleReq, - StateLookup: remainingDataFitsInSingleReq, + Flush: remainingDataFitsInSingleReq && !lastWriteOfEntireObject, + StateLookup: remainingDataFitsInSingleReq && !lastWriteOfEntireObject, } // Open a new stream if necessary and set the first_message field on @@ -1723,32 +1742,33 @@ func (w *gRPCWriter) uploadBuffer(recvd int, start int64, doneReading bool) (*st return nil, writeOffset, nil } - // Done sending data (remainingDataFitsInSingleReq should == true if we - // reach this code). Receive from the stream to confirm the persisted data. - resp, err := w.stream.Recv() + // Done sending the data in the buffer (remainingDataFitsInSingleReq + // should == true if we reach this code). + // If we are done sending the whole object, close the stream and get the final + // object. Otherwise, receive from the stream to confirm the persisted data. + if !lastWriteOfEntireObject { + resp, err := w.stream.Recv() + + // Retriable errors mean we should start over and attempt to + // resend the entire buffer via a new stream. + // If not retriable, falling through will return the error received + // from closing the stream. + if shouldRetry(err) { + writeOffset, err = w.determineOffset(start) + if err != nil { + return nil, 0, err + } + sent = int(writeOffset) - int(start) + + // Drop the stream reference as a new one will need to be created. + w.stream = nil - // Retriable errors mean we should start over and attempt to - // resend the entire buffer via a new stream. - // If not retriable, falling through will return the error received - // from closing the stream. - if shouldRetry(err) { - writeOffset, err = w.determineOffset(start) + continue + } if err != nil { return nil, 0, err } - sent = int(writeOffset) - int(start) - - // Drop the stream reference as a new one will need to be created. - w.stream = nil - - continue - } - if err != nil { - return nil, 0, err - } - // Confirm the persisted data if we have not finished uploading the object. - if !lastWriteOfEntireObject { if resp.GetPersistedSize() != writeOffset { // Retry if not all bytes were persisted. writeOffset = resp.GetPersistedSize() diff --git a/vendor/cloud.google.com/go/storage/http_client.go b/vendor/cloud.google.com/go/storage/http_client.go index b62f009da1..0e157e4ba9 100644 --- a/vendor/cloud.google.com/go/storage/http_client.go +++ b/vendor/cloud.google.com/go/storage/http_client.go @@ -159,7 +159,7 @@ func (c *httpStorageClient) GetServiceAccount(ctx context.Context, project strin return res.EmailAddress, nil } -func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, opts ...storageOption) (*BucketAttrs, error) { +func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket string, attrs *BucketAttrs, enableObjectRetention *bool, opts ...storageOption) (*BucketAttrs, error) { s := callSettings(c.settings, opts...) var bkt *raw.Bucket if attrs != nil { @@ -181,6 +181,9 @@ func (c *httpStorageClient) CreateBucket(ctx context.Context, project, bucket st if attrs != nil && attrs.PredefinedDefaultObjectACL != "" { req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL) } + if enableObjectRetention != nil { + req.EnableObjectRetention(*enableObjectRetention) + } var battrs *BucketAttrs err := run(ctx, func(ctx context.Context) error { b, err := req.Context(ctx).Do() @@ -431,7 +434,8 @@ func (c *httpStorageClient) GetObject(ctx context.Context, bucket, object string return newObject(obj), nil } -func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object string, uattrs *ObjectAttrsToUpdate, gen int64, encryptionKey []byte, conds *Conditions, opts ...storageOption) (*ObjectAttrs, error) { +func (c *httpStorageClient) UpdateObject(ctx context.Context, params *updateObjectParams, opts ...storageOption) (*ObjectAttrs, error) { + uattrs := params.uattrs s := callSettings(c.settings, opts...) var attrs ObjectAttrs @@ -496,11 +500,21 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object str // we don't append to nullFields here. forceSendFields = append(forceSendFields, "Acl") } - rawObj := attrs.toRawObject(bucket) + if uattrs.Retention != nil { + // For ObjectRetention it's an error to send empty fields. + // Instead we send a null as the user's intention is to remove. + if uattrs.Retention.Mode == "" && uattrs.Retention.RetainUntil.IsZero() { + nullFields = append(nullFields, "Retention") + } else { + attrs.Retention = uattrs.Retention + forceSendFields = append(forceSendFields, "Retention") + } + } + rawObj := attrs.toRawObject(params.bucket) rawObj.ForceSendFields = forceSendFields rawObj.NullFields = nullFields - call := c.raw.Objects.Patch(bucket, object, rawObj).Projection("full") - if err := applyConds("Update", gen, conds, call); err != nil { + call := c.raw.Objects.Patch(params.bucket, params.object, rawObj).Projection("full") + if err := applyConds("Update", params.gen, params.conds, call); err != nil { return nil, err } if s.userProject != "" { @@ -509,9 +523,14 @@ func (c *httpStorageClient) UpdateObject(ctx context.Context, bucket, object str if uattrs.PredefinedACL != "" { call.PredefinedAcl(uattrs.PredefinedACL) } - if err := setEncryptionHeaders(call.Header(), encryptionKey, false); err != nil { + if err := setEncryptionHeaders(call.Header(), params.encryptionKey, false); err != nil { return nil, err } + + if params.overrideRetention != nil { + call.OverrideUnlockedRetention(*params.overrideRetention) + } + var obj *raw.Object var err error err = run(ctx, func(ctx context.Context) error { obj, err = call.Context(ctx).Do(); return err }, s.retry, s.idempotent) diff --git a/vendor/cloud.google.com/go/storage/internal/version.go b/vendor/cloud.google.com/go/storage/internal/version.go index eca9b294aa..2d5cf890e0 100644 --- a/vendor/cloud.google.com/go/storage/internal/version.go +++ b/vendor/cloud.google.com/go/storage/internal/version.go @@ -15,4 +15,4 @@ package internal // Version is the current tagged release of the library. -const Version = "1.35.1" +const Version = "1.36.0" diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go index a16e512f5e..78ecbf0e89 100644 --- a/vendor/cloud.google.com/go/storage/storage.go +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -879,16 +879,17 @@ func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) { // ObjectHandle provides operations on an object in a Google Cloud Storage bucket. // Use BucketHandle.Object to get a handle. type ObjectHandle struct { - c *Client - bucket string - object string - acl ACLHandle - gen int64 // a negative value indicates latest - conds *Conditions - encryptionKey []byte // AES-256 key - userProject string // for requester-pays buckets - readCompressed bool // Accept-Encoding: gzip - retry *retryConfig + c *Client + bucket string + object string + acl ACLHandle + gen int64 // a negative value indicates latest + conds *Conditions + encryptionKey []byte // AES-256 key + userProject string // for requester-pays buckets + readCompressed bool // Accept-Encoding: gzip + retry *retryConfig + overrideRetention *bool } // ACL provides access to the object's access control list. @@ -958,7 +959,15 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) ( } isIdempotent := o.conds != nil && o.conds.MetagenerationMatch != 0 opts := makeStorageOpts(isIdempotent, o.retry, o.userProject) - return o.c.tc.UpdateObject(ctx, o.bucket, o.object, &uattrs, o.gen, o.encryptionKey, o.conds, opts...) + return o.c.tc.UpdateObject(ctx, + &updateObjectParams{ + bucket: o.bucket, + object: o.object, + uattrs: &uattrs, + gen: o.gen, + encryptionKey: o.encryptionKey, + conds: o.conds, + overrideRetention: o.overrideRetention}, opts...) } // BucketName returns the name of the bucket. @@ -973,16 +982,19 @@ func (o *ObjectHandle) ObjectName() string { // ObjectAttrsToUpdate is used to update the attributes of an object. // Only fields set to non-nil values will be updated. -// For all fields except CustomTime, set the field to its zero value to delete -// it. CustomTime cannot be deleted or changed to an earlier time once set. +// For all fields except CustomTime and Retention, set the field to its zero +// value to delete it. CustomTime cannot be deleted or changed to an earlier +// time once set. Retention can be deleted (only if the Mode is Unlocked) by +// setting it to an empty value (not nil). // -// For example, to change ContentType and delete ContentEncoding and -// Metadata, use +// For example, to change ContentType and delete ContentEncoding, Metadata and +// Retention, use: // // ObjectAttrsToUpdate{ // ContentType: "text/html", // ContentEncoding: "", // Metadata: map[string]string{}, +// Retention: &ObjectRetention{}, // } type ObjectAttrsToUpdate struct { EventBasedHold optional.Bool @@ -999,6 +1011,12 @@ type ObjectAttrsToUpdate struct { // If not empty, applies a predefined set of access controls. ACL must be nil. // See https://cloud.google.com/storage/docs/json_api/v1/objects/patch. PredefinedACL string + + // Retention contains the retention configuration for this object. + // Operations other than setting the retention for the first time or + // extending the RetainUntil time on the object retention must be done + // on an ObjectHandle with OverrideUnlockedRetention set to true. + Retention *ObjectRetention } // Delete deletes the single specified object. @@ -1020,6 +1038,17 @@ func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle { return &o2 } +// OverrideUnlockedRetention provides an option for overriding an Unlocked +// Retention policy. This must be set to true in order to change a policy +// from Unlocked to Locked, to set it to null, or to reduce its +// RetainUntil attribute. It is not required for setting the ObjectRetention for +// the first time nor for extending the RetainUntil time. +func (o *ObjectHandle) OverrideUnlockedRetention(override bool) *ObjectHandle { + o2 := *o + o2.overrideRetention = &override + return &o2 +} + // NewWriter returns a storage Writer that writes to the GCS object // associated with this ObjectHandle. // @@ -1109,6 +1138,7 @@ func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { Acl: toRawObjectACL(o.ACL), Metadata: o.Metadata, CustomTime: ct, + Retention: o.Retention.toRawObjectRetention(), } } @@ -1344,6 +1374,42 @@ type ObjectAttrs struct { // For non-composite objects, the value will be zero. // This field is read-only. ComponentCount int64 + + // Retention contains the retention configuration for this object. + // ObjectRetention cannot be configured or reported through the gRPC API. + Retention *ObjectRetention +} + +// ObjectRetention contains the retention configuration for this object. +type ObjectRetention struct { + // Mode is the retention policy's mode on this object. Valid values are + // "Locked" and "Unlocked". + // Locked retention policies cannot be changed. Unlocked policies require an + // override to change. + Mode string + + // RetainUntil is the time this object will be retained until. + RetainUntil time.Time +} + +func (r *ObjectRetention) toRawObjectRetention() *raw.ObjectRetention { + if r == nil { + return nil + } + return &raw.ObjectRetention{ + Mode: r.Mode, + RetainUntilTime: r.RetainUntil.Format(time.RFC3339), + } +} + +func toObjectRetention(r *raw.ObjectRetention) *ObjectRetention { + if r == nil { + return nil + } + return &ObjectRetention{ + Mode: r.Mode, + RetainUntil: convertTime(r.RetainUntilTime), + } } // convertTime converts a time in RFC3339 format to time.Time. @@ -1415,6 +1481,7 @@ func newObject(o *raw.Object) *ObjectAttrs { Etag: o.Etag, CustomTime: convertTime(o.CustomTime), ComponentCount: o.ComponentCount, + Retention: toObjectRetention(o.Retention), } } @@ -1587,6 +1654,7 @@ var attrToFieldMap = map[string]string{ "Etag": "etag", "CustomTime": "customTime", "ComponentCount": "componentCount", + "Retention": "retention", } // attrToProtoFieldMap maps the field names of ObjectAttrs to the underlying field @@ -1621,6 +1689,7 @@ var attrToProtoFieldMap = map[string]string{ "ComponentCount": "component_count", // MediaLink was explicitly excluded from the proto as it is an HTTP-ism. // "MediaLink": "mediaLink", + // TODO: add object retention - b/308194853 } // SetAttrSelection makes the query populate only specific attributes of @@ -1806,7 +1875,7 @@ func (c *Conditions) isMetagenerationValid() bool { func applyConds(method string, gen int64, conds *Conditions, call interface{}) error { cval := reflect.ValueOf(call) if gen >= 0 { - if !setConditionField(cval, "Generation", gen) { + if !setGeneration(cval, gen) { return fmt.Errorf("storage: %s: generation not supported", method) } } @@ -1818,25 +1887,25 @@ func applyConds(method string, gen int64, conds *Conditions, call interface{}) e } switch { case conds.GenerationMatch != 0: - if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) { + if !setIfGenerationMatch(cval, conds.GenerationMatch) { return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) } case conds.GenerationNotMatch != 0: - if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) { + if !setIfGenerationNotMatch(cval, conds.GenerationNotMatch) { return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) } case conds.DoesNotExist: - if !setConditionField(cval, "IfGenerationMatch", int64(0)) { + if !setIfGenerationMatch(cval, int64(0)) { return fmt.Errorf("storage: %s: DoesNotExist not supported", method) } } switch { case conds.MetagenerationMatch != 0: - if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { + if !setIfMetagenerationMatch(cval, conds.MetagenerationMatch) { return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) } case conds.MetagenerationNotMatch != 0: - if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { + if !setIfMetagenerationNotMatch(cval, conds.MetagenerationNotMatch) { return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) } } @@ -1897,16 +1966,45 @@ func applySourceCondsProto(gen int64, conds *Conditions, call *storagepb.Rewrite return nil } -// setConditionField sets a field on a *raw.WhateverCall. +// setGeneration sets Generation on a *raw.WhateverCall. // We can't use anonymous interfaces because the return type is // different, since the field setters are builders. -func setConditionField(call reflect.Value, name string, value interface{}) bool { - m := call.MethodByName(name) - if !m.IsValid() { - return false +// We also make sure to supply a compile-time constant to MethodByName; +// otherwise, the Go Linker will disable dead code elimination, leading +// to larger binaries for all packages that import storage. +func setGeneration(cval reflect.Value, value interface{}) bool { + return setCondition(cval.MethodByName("Generation"), value) +} + +// setIfGenerationMatch sets IfGenerationMatch on a *raw.WhateverCall. +// See also setGeneration. +func setIfGenerationMatch(cval reflect.Value, value interface{}) bool { + return setCondition(cval.MethodByName("IfGenerationMatch"), value) +} + +// setIfGenerationNotMatch sets IfGenerationNotMatch on a *raw.WhateverCall. +// See also setGeneration. +func setIfGenerationNotMatch(cval reflect.Value, value interface{}) bool { + return setCondition(cval.MethodByName("IfGenerationNotMatch"), value) +} + +// setIfMetagenerationMatch sets IfMetagenerationMatch on a *raw.WhateverCall. +// See also setGeneration. +func setIfMetagenerationMatch(cval reflect.Value, value interface{}) bool { + return setCondition(cval.MethodByName("IfMetagenerationMatch"), value) +} + +// setIfMetagenerationNotMatch sets IfMetagenerationNotMatch on a *raw.WhateverCall. +// See also setGeneration. +func setIfMetagenerationNotMatch(cval reflect.Value, value interface{}) bool { + return setCondition(cval.MethodByName("IfMetagenerationNotMatch"), value) +} + +func setCondition(setter reflect.Value, value interface{}) bool { + if setter.IsValid() { + setter.Call([]reflect.Value{reflect.ValueOf(value)}) } - m.Call([]reflect.Value{reflect.ValueOf(value)}) - return true + return setter.IsValid() } // Retryer returns an object handle that is configured with custom retry diff --git a/vendor/modules.txt b/vendor/modules.txt index 395b1b7870..70db581147 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -14,7 +14,7 @@ cloud.google.com/go/compute/metadata ## explicit; go 1.19 cloud.google.com/go/iam cloud.google.com/go/iam/apiv1/iampb -# cloud.google.com/go/storage v1.35.1 +# cloud.google.com/go/storage v1.36.0 ## explicit; go 1.19 cloud.google.com/go/storage cloud.google.com/go/storage/internal