| // Copyright 2014 Google LLC |
| // |
| // Licensed under the Apache License, Version 2.0 (the "License"); |
| // you may not use this file except in compliance with the License. |
| // You may obtain a copy of the License at |
| // |
| // http://www.apache.org/licenses/LICENSE-2.0 |
| // |
| // Unless required by applicable law or agreed to in writing, software |
| // distributed under the License is distributed on an "AS IS" BASIS, |
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| // See the License for the specific language governing permissions and |
| // limitations under the License. |
| |
| package storage |
| |
| import ( |
| "bytes" |
| "compress/gzip" |
| "context" |
| "crypto" |
| "crypto/md5" |
| cryptorand "crypto/rand" |
| "crypto/rsa" |
| "crypto/sha256" |
| "encoding/base64" |
| "encoding/json" |
| "errors" |
| "flag" |
| "fmt" |
| "hash/crc32" |
| "io" |
| "log" |
| "math" |
| "math/rand" |
| "mime/multipart" |
| "net/http" |
| "net/http/httputil" |
| "os" |
| "sort" |
| "strconv" |
| "strings" |
| "testing" |
| "time" |
| |
| "cloud.google.com/go/httpreplay" |
| "cloud.google.com/go/iam" |
| "cloud.google.com/go/iam/apiv1/iampb" |
| "cloud.google.com/go/internal/testutil" |
| "cloud.google.com/go/internal/uid" |
| control "cloud.google.com/go/storage/control/apiv2" |
| "cloud.google.com/go/storage/control/apiv2/controlpb" |
| "github.com/google/go-cmp/cmp" |
| "github.com/google/go-cmp/cmp/cmpopts" |
| "github.com/googleapis/gax-go/v2/apierror" |
| "golang.org/x/oauth2/google" |
| "google.golang.org/api/googleapi" |
| "google.golang.org/api/iterator" |
| itesting "google.golang.org/api/iterator/testing" |
| "google.golang.org/api/option" |
| "google.golang.org/api/transport" |
| "google.golang.org/grpc" |
| "google.golang.org/grpc/codes" |
| "google.golang.org/grpc/status" |
| ) |
| |
| type skipTransportTestKey string |
| |
| const ( |
| testPrefix = "go-integration-test" |
| replayFilename = "storage.replay" |
| // TODO(jba): move to testutil, factor out from firestore/integration_test.go. |
| envFirestoreProjID = "GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID" |
| envFirestorePrivateKey = "GCLOUD_TESTS_GOLANG_FIRESTORE_KEY" |
| grpcTestPrefix = "golang-grpc-test" |
| ) |
| |
| var ( |
| record = flag.Bool("record", false, "record RPCs") |
| |
| uidSpace *uid.Space |
| uidSpaceObjects *uid.Space |
| bucketName string |
| grpcBucketName string |
| // Use our own random number generator to isolate the sequence of random numbers from |
| // other packages. This makes it possible to use HTTP replay and draw the same sequence |
| // of numbers as during recording. |
| rng *rand.Rand |
| newTestClient func(ctx context.Context, opts ...option.ClientOption) (*Client, error) |
| replaying bool |
| testTime time.Time |
| controlClient *control.StorageControlClient |
| ) |
| |
| func TestMain(m *testing.M) { |
| grpc.EnableTracing = true |
| cleanup := initIntegrationTest() |
| cleanupEmulatorClients := initEmulatorClients() |
| exit := m.Run() |
| if err := cleanup(); err != nil { |
| // Don't fail the test if cleanup fails. |
| log.Printf("Post-test cleanup failed: %v", err) |
| } |
| if err := cleanupEmulatorClients(); err != nil { |
| // Don't fail the test if cleanup fails. |
| log.Printf("Post-test cleanup failed for emulator clients: %v", err) |
| } |
| |
| os.Exit(exit) |
| } |
| |
| // If integration tests will be run, create a unique bucket for them. |
| // Also, set newTestClient to handle record/replay. |
| // Return a cleanup function. |
| func initIntegrationTest() func() error { |
| flag.Parse() // needed for testing.Short() |
| switch { |
| case testing.Short() && *record: |
| log.Fatal("cannot combine -short and -record") |
| return nil |
| |
| case testing.Short() && httpreplay.Supported() && testutil.CanReplay(replayFilename) && testutil.ProjID() != "": |
| // go test -short with a replay file will replay the integration tests, if |
| // the appropriate environment variables have been set. |
| replaying = true |
| httpreplay.DebugHeaders() |
| replayer, err := httpreplay.NewReplayer(replayFilename) |
| if err != nil { |
| log.Fatal(err) |
| } |
| var t time.Time |
| if err := json.Unmarshal(replayer.Initial(), &t); err != nil { |
| log.Fatal(err) |
| } |
| initUIDsAndRand(t) |
| newTestClient = func(ctx context.Context, _ ...option.ClientOption) (*Client, error) { |
| hc, err := replayer.Client(ctx) // no creds needed |
| if err != nil { |
| return nil, err |
| } |
| return NewClient(ctx, option.WithHTTPClient(hc)) |
| } |
| log.Printf("replaying from %s", replayFilename) |
| return func() error { return replayer.Close() } |
| |
| case testing.Short(): |
| // go test -short without a replay file skips the integration tests. |
| if testutil.CanReplay(replayFilename) && testutil.ProjID() != "" { |
| log.Print("replay not supported for Go versions before 1.8") |
| } |
| newTestClient = nil |
| return func() error { return nil } |
| |
| default: // Run integration tests against a real backend. |
| now := time.Now().UTC() |
| initUIDsAndRand(now) |
| var cleanup func() error |
| if *record && httpreplay.Supported() { |
| // Remember the time for replay. |
| nowBytes, err := json.Marshal(now) |
| if err != nil { |
| log.Fatal(err) |
| } |
| recorder, err := httpreplay.NewRecorder(replayFilename, nowBytes) |
| if err != nil { |
| log.Fatalf("could not record: %v", err) |
| } |
| newTestClient = func(ctx context.Context, opts ...option.ClientOption) (*Client, error) { |
| hc, err := recorder.Client(ctx, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return NewClient(ctx, option.WithHTTPClient(hc)) |
| } |
| cleanup = func() error { |
| err1 := cleanupBuckets() |
| err2 := recorder.Close() |
| if err1 != nil { |
| return err1 |
| } |
| return err2 |
| } |
| log.Printf("recording to %s", replayFilename) |
| } else { |
| if *record { |
| log.Print("record not supported for Go versions before 1.8") |
| } |
| newTestClient = NewClient |
| cleanup = cleanupBuckets |
| } |
| ctx := context.Background() |
| client, err := newTestClient(ctx) |
| if err != nil { |
| log.Fatalf("NewClient: %v", err) |
| } |
| if client == nil { |
| return func() error { return nil } |
| } |
| defer client.Close() |
| // Create storage control client; in some cases this is needed for test |
| // setup and cleanup. |
| controlClient, err = control.NewStorageControlClient(ctx) |
| if err != nil { |
| log.Fatalf("NewStorageControlClient: %v", err) |
| } |
| if err := client.Bucket(bucketName).Create(ctx, testutil.ProjID(), nil); err != nil { |
| log.Fatalf("creating bucket %q: %v", bucketName, err) |
| } |
| if err := client.Bucket(grpcBucketName).Create(ctx, testutil.ProjID(), nil); err != nil { |
| log.Fatalf("creating bucket %q: %v", grpcBucketName, err) |
| } |
| return cleanup |
| } |
| } |
| |
| func initUIDsAndRand(t time.Time) { |
| uidSpace = uid.NewSpace("", &uid.Options{Time: t}) |
| bucketName = testPrefix + uidSpace.New() |
| uidSpaceObjects = uid.NewSpace("obj", &uid.Options{Time: t}) |
| grpcBucketName = grpcTestPrefix + uidSpace.New() |
| // Use our own random source, to avoid other parts of the program taking |
| // random numbers from the global source and putting record and replay |
| // out of sync. |
| rng = testutil.NewRand(t) |
| testTime = t |
| } |
| |
| // testConfig returns the Client used to access GCS. testConfig skips |
| // the current test if credentials are not available or when being run |
| // in Short mode. |
| func testConfig(ctx context.Context, t *testing.T, opts ...option.ClientOption) *Client { |
| if testing.Short() && !replaying { |
| t.Skip("Integration tests skipped in short mode") |
| } |
| client, err := newTestClient(ctx, opts...) |
| if err != nil { |
| t.Fatalf("NewClient: %v", err) |
| } |
| if client == nil { |
| t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") |
| } |
| return client |
| } |
| |
| // testConfigGPRC returns a gRPC-based client to access GCS. testConfigGRPC |
| // skips the curent test when being run in Short mode. |
| func testConfigGRPC(ctx context.Context, t *testing.T, opts ...option.ClientOption) (gc *Client) { |
| if testing.Short() { |
| t.Skip("Integration tests skipped in short mode") |
| } |
| |
| gc, err := NewGRPCClient(ctx, opts...) |
| if err != nil { |
| t.Fatalf("NewGRPCClient: %v", err) |
| } |
| |
| return |
| } |
| |
| // initTransportClients initializes Storage clients for each supported transport. |
| func initTransportClients(ctx context.Context, t *testing.T, opts ...option.ClientOption) map[string]*Client { |
| withJSON := append(opts, WithJSONReads()) |
| return map[string]*Client{ |
| "http": testConfig(ctx, t, opts...), |
| "grpc": testConfigGRPC(ctx, t, opts...), |
| // TODO: remove jsonReads when support for XML reads is dropped |
| "jsonReads": testConfig(ctx, t, withJSON...), |
| } |
| } |
| |
| // multiTransportTest initializes fresh clients for each transport, then runs |
| // given testing function using each transport-specific client, supplying the |
| // test function with the sub-test instance, the context it was given, the name |
| // of an existing bucket to use, a bucket name to use for bucket creation, and |
| // the client to use. |
| func multiTransportTest(ctx context.Context, t *testing.T, |
| test func(*testing.T, context.Context, string, string, *Client), |
| opts ...option.ClientOption) { |
| for transport, client := range initTransportClients(ctx, t, opts...) { |
| t.Run(transport, func(t *testing.T) { |
| t.Cleanup(func() { |
| client.Close() |
| }) |
| |
| if reason := ctx.Value(skipTransportTestKey(transport)); reason != nil { |
| t.Skip("transport", fmt.Sprintf("%q", transport), "explicitly skipped:", reason) |
| } |
| |
| bucket := bucketName |
| prefix := testPrefix |
| if transport == "grpc" { |
| bucket = grpcBucketName |
| prefix = grpcTestPrefix |
| } |
| |
| // Don't let any individual test run more than 5 minutes. This eases debugging if |
| // test runs get stalled out. |
| ctx, cancel := context.WithTimeout(ctx, 5*time.Minute) |
| t.Cleanup(func() { |
| cancel() |
| }) |
| |
| test(t, ctx, bucket, prefix, client) |
| }) |
| } |
| } |
| |
| // Use two different reading funcs for some tests to cover both Read and WriteTo. |
| type readCase struct { |
| desc string |
| readFunc (func(io.Reader) ([]byte, error)) |
| } |
| |
| var readCases = []readCase{ |
| { |
| desc: "Read", |
| readFunc: io.ReadAll, |
| }, |
| { |
| desc: "WriteTo", |
| readFunc: func(r io.Reader) ([]byte, error) { |
| b := new(bytes.Buffer) |
| _, err := io.Copy(b, r) |
| return b.Bytes(), err |
| }, |
| }, |
| } |
| |
| func TestIntegration_BucketCreateDelete(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| projectID := testutil.ProjID() |
| |
| labels := map[string]string{ |
| "l1": "v1", |
| "empty": "", |
| } |
| |
| lifecycle := Lifecycle{ |
| Rules: []LifecycleRule{{ |
| Action: LifecycleAction{ |
| Type: SetStorageClassAction, |
| StorageClass: "NEARLINE", |
| }, |
| Condition: LifecycleCondition{ |
| AgeInDays: 10, |
| Liveness: Archived, |
| CreatedBefore: time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC), |
| MatchesStorageClasses: []string{"STANDARD"}, |
| NumNewerVersions: 3, |
| }, |
| }, { |
| Action: LifecycleAction{ |
| Type: SetStorageClassAction, |
| StorageClass: "ARCHIVE", |
| }, |
| Condition: LifecycleCondition{ |
| CustomTimeBefore: time.Date(2020, 1, 2, 0, 0, 0, 0, time.UTC), |
| DaysSinceCustomTime: 20, |
| Liveness: Live, |
| MatchesStorageClasses: []string{"STANDARD"}, |
| }, |
| }, { |
| Action: LifecycleAction{ |
| Type: DeleteAction, |
| }, |
| Condition: LifecycleCondition{ |
| DaysSinceNoncurrentTime: 30, |
| Liveness: Live, |
| NoncurrentTimeBefore: time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC), |
| MatchesStorageClasses: []string{"NEARLINE"}, |
| NumNewerVersions: 10, |
| }, |
| }, { |
| Action: LifecycleAction{ |
| Type: DeleteAction, |
| }, |
| Condition: LifecycleCondition{ |
| AgeInDays: 10, |
| MatchesPrefix: []string{"testPrefix"}, |
| MatchesSuffix: []string{"testSuffix"}, |
| NumNewerVersions: 3, |
| }, |
| }, { |
| Action: LifecycleAction{ |
| Type: DeleteAction, |
| }, |
| Condition: LifecycleCondition{ |
| AllObjects: true, |
| }, |
| }}, |
| } |
| |
| // testedAttrs are the bucket attrs directly compared in this test |
| type testedAttrs struct { |
| StorageClass string |
| VersioningEnabled bool |
| LocationType string |
| Labels map[string]string |
| Location string |
| Lifecycle Lifecycle |
| CustomPlacementConfig *CustomPlacementConfig |
| } |
| |
| for _, test := range []struct { |
| name string |
| attrs *BucketAttrs |
| wantAttrs testedAttrs |
| }{ |
| { |
| name: "no attrs", |
| attrs: nil, |
| wantAttrs: testedAttrs{ |
| StorageClass: "STANDARD", |
| VersioningEnabled: false, |
| LocationType: "multi-region", |
| Location: "US", |
| }, |
| }, |
| { |
| name: "with attrs", |
| attrs: &BucketAttrs{ |
| StorageClass: "NEARLINE", |
| VersioningEnabled: true, |
| Labels: labels, |
| Lifecycle: lifecycle, |
| Location: "SOUTHAMERICA-EAST1", |
| }, |
| wantAttrs: testedAttrs{ |
| StorageClass: "NEARLINE", |
| VersioningEnabled: true, |
| Labels: labels, |
| Location: "SOUTHAMERICA-EAST1", |
| LocationType: "region", |
| Lifecycle: lifecycle, |
| }, |
| }, |
| { |
| name: "dual-region", |
| attrs: &BucketAttrs{ |
| Location: "US", |
| CustomPlacementConfig: &CustomPlacementConfig{ |
| DataLocations: []string{"US-EAST1", "US-WEST1"}, |
| }, |
| }, |
| wantAttrs: testedAttrs{ |
| Location: "US", |
| LocationType: "dual-region", |
| StorageClass: "STANDARD", |
| CustomPlacementConfig: &CustomPlacementConfig{ |
| DataLocations: []string{"US-EAST1", "US-WEST1"}, |
| }, |
| }, |
| }, |
| } { |
| t.Run(test.name, func(t *testing.T) { |
| newBucketName := prefix + uidSpace.New() |
| b := client.Bucket(newBucketName) |
| |
| if err := b.Create(ctx, projectID, test.attrs); err != nil { |
| t.Fatalf("BucketHandle.Create(%q): %v", newBucketName, err) |
| } |
| |
| gotAttrs, err := b.Attrs(ctx) |
| if err != nil { |
| t.Fatalf("BucketHandle.Attrs(%q): %v", newBucketName, err) |
| } |
| |
| // All newly created buckets should conform to the following: |
| if gotAttrs.MetaGeneration != 1 { |
| t.Errorf("metageneration: got %d, should be 1", gotAttrs.MetaGeneration) |
| } |
| if gotAttrs.ProjectNumber == 0 { |
| t.Errorf("got a zero ProjectNumber") |
| } |
| |
| // Test specific wanted bucket attrs |
| if gotAttrs.VersioningEnabled != test.wantAttrs.VersioningEnabled { |
| t.Errorf("versioning enabled: got %t, want %t", gotAttrs.VersioningEnabled, test.wantAttrs.VersioningEnabled) |
| } |
| if got, want := gotAttrs.Labels, test.wantAttrs.Labels; !testutil.Equal(got, want) { |
| t.Errorf("labels: got %v, want %v", got, want) |
| } |
| if diff := cmp.Diff(gotAttrs.Lifecycle, test.wantAttrs.Lifecycle); diff != "" { |
| t.Errorf("lifecycle: diff got vs. want: %v", diff) |
| } |
| if gotAttrs.LocationType != test.wantAttrs.LocationType { |
| t.Errorf("location type: got %s, want %s", gotAttrs.LocationType, test.wantAttrs.LocationType) |
| } |
| if gotAttrs.StorageClass != test.wantAttrs.StorageClass { |
| t.Errorf("storage class: got %s, want %s", gotAttrs.StorageClass, test.wantAttrs.StorageClass) |
| } |
| if gotAttrs.Location != test.wantAttrs.Location { |
| t.Errorf("location: got %s, want %s", gotAttrs.Location, test.wantAttrs.Location) |
| } |
| if got, want := gotAttrs.CustomPlacementConfig, test.wantAttrs.CustomPlacementConfig; !testutil.Equal(got, want) { |
| t.Errorf("customPlacementConfig: \ngot\t%v\nwant\t%v", got, want) |
| } |
| |
| // Delete the bucket and check that the deletion was succesful |
| if err := b.Delete(ctx); err != nil { |
| t.Fatalf("BucketHandle.Delete(%q): %v", newBucketName, err) |
| } |
| _, err = b.Attrs(ctx) |
| if err != ErrBucketNotExist { |
| t.Fatalf("expected ErrBucketNotExist, got %v", err) |
| } |
| }) |
| } |
| }) |
| } |
| |
| func TestIntegration_BucketLifecycle(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| h := testHelper{t} |
| |
| wantLifecycle := Lifecycle{ |
| Rules: []LifecycleRule{ |
| { |
| Action: LifecycleAction{Type: AbortIncompleteMPUAction}, |
| Condition: LifecycleCondition{AgeInDays: 30}, |
| }, |
| { |
| Action: LifecycleAction{Type: DeleteAction}, |
| Condition: LifecycleCondition{AllObjects: true}, |
| }, |
| }, |
| } |
| |
| bucket := client.Bucket(prefix + uidSpace.New()) |
| |
| // Create bucket with lifecycle rules |
| h.mustCreate(bucket, testutil.ProjID(), &BucketAttrs{ |
| Lifecycle: wantLifecycle, |
| }) |
| defer h.mustDeleteBucket(bucket) |
| |
| attrs := h.mustBucketAttrs(bucket) |
| if !testutil.Equal(attrs.Lifecycle, wantLifecycle) { |
| t.Fatalf("got %v, want %v", attrs.Lifecycle, wantLifecycle) |
| } |
| |
| // Remove lifecycle rules |
| ua := BucketAttrsToUpdate{Lifecycle: &Lifecycle{}} |
| attrs = h.mustUpdateBucket(bucket, ua, attrs.MetaGeneration) |
| if !testutil.Equal(attrs.Lifecycle, Lifecycle{}) { |
| t.Fatalf("got %v, want %v", attrs.Lifecycle, Lifecycle{}) |
| } |
| |
| // Update bucket with a lifecycle rule |
| ua = BucketAttrsToUpdate{Lifecycle: &wantLifecycle} |
| attrs = h.mustUpdateBucket(bucket, ua, attrs.MetaGeneration) |
| if !testutil.Equal(attrs.Lifecycle, wantLifecycle) { |
| t.Fatalf("got %v, want %v", attrs.Lifecycle, wantLifecycle) |
| } |
| }) |
| } |
| |
| func TestIntegration_BucketUpdate(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| h := testHelper{t} |
| |
| b := client.Bucket(prefix + uidSpace.New()) |
| h.mustCreate(b, testutil.ProjID(), nil) |
| defer h.mustDeleteBucket(b) |
| |
| attrs := h.mustBucketAttrs(b) |
| if attrs.VersioningEnabled { |
| t.Fatal("bucket should not have versioning by default") |
| } |
| if len(attrs.Labels) > 0 { |
| t.Fatal("bucket should not have labels initially") |
| } |
| |
| // Turn on versioning, add some labels. |
| ua := BucketAttrsToUpdate{VersioningEnabled: true} |
| ua.SetLabel("l1", "v1") |
| ua.SetLabel("empty", "") |
| attrs = h.mustUpdateBucket(b, ua, attrs.MetaGeneration) |
| if !attrs.VersioningEnabled { |
| t.Fatal("should have versioning now") |
| } |
| wantLabels := map[string]string{ |
| "l1": "v1", |
| "empty": "", |
| } |
| if !testutil.Equal(attrs.Labels, wantLabels) { |
| t.Fatalf("add labels: got %v, want %v", attrs.Labels, wantLabels) |
| } |
| |
| // Turn off versioning again; add and remove some more labels. |
| ua = BucketAttrsToUpdate{VersioningEnabled: false} |
| ua.SetLabel("l1", "v2") // update |
| ua.SetLabel("new", "new") // create |
| ua.DeleteLabel("empty") // delete |
| ua.DeleteLabel("absent") // delete non-existent |
| attrs = h.mustUpdateBucket(b, ua, attrs.MetaGeneration) |
| if attrs.VersioningEnabled { |
| t.Fatal("should have versioning off") |
| } |
| wantLabels = map[string]string{ |
| "l1": "v2", |
| "new": "new", |
| } |
| if !testutil.Equal(attrs.Labels, wantLabels) { |
| t.Fatalf("got %v, want %v", attrs.Labels, wantLabels) |
| } |
| |
| // Configure a lifecycle |
| wantLifecycle := Lifecycle{ |
| Rules: []LifecycleRule{ |
| { |
| Action: LifecycleAction{Type: "Delete"}, |
| Condition: LifecycleCondition{ |
| AgeInDays: 30, |
| MatchesPrefix: []string{"testPrefix"}, |
| MatchesSuffix: []string{"testSuffix"}, |
| }, |
| }, |
| }, |
| } |
| ua = BucketAttrsToUpdate{Lifecycle: &wantLifecycle} |
| attrs = h.mustUpdateBucket(b, ua, attrs.MetaGeneration) |
| if !testutil.Equal(attrs.Lifecycle, wantLifecycle) { |
| t.Fatalf("got %v, want %v", attrs.Lifecycle, wantLifecycle) |
| } |
| // Check that StorageClass has "STANDARD" value for unset field by default |
| // before passing new value. |
| wantStorageClass := "STANDARD" |
| if !testutil.Equal(attrs.StorageClass, wantStorageClass) { |
| t.Fatalf("got %v, want %v", attrs.StorageClass, wantStorageClass) |
| } |
| wantStorageClass = "NEARLINE" |
| ua = BucketAttrsToUpdate{StorageClass: wantStorageClass} |
| attrs = h.mustUpdateBucket(b, ua, attrs.MetaGeneration) |
| if !testutil.Equal(attrs.StorageClass, wantStorageClass) { |
| t.Fatalf("got %v, want %v", attrs.StorageClass, wantStorageClass) |
| } |
| |
| // Empty update should succeed without changing the bucket. |
| gotAttrs, err := b.Update(ctx, BucketAttrsToUpdate{}) |
| if err != nil { |
| t.Fatalf("empty update: %v", err) |
| } |
| if !testutil.Equal(attrs, gotAttrs) { |
| t.Fatalf("empty update: got %v, want %v", gotAttrs, attrs) |
| } |
| }) |
| } |
| |
| func TestIntegration_BucketPolicyOnly(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| h := testHelper{t} |
| |
| bkt := client.Bucket(prefix + uidSpace.New()) |
| h.mustCreate(bkt, testutil.ProjID(), nil) |
| defer h.mustDeleteBucket(bkt) |
| |
| // Insert an object with custom ACL. |
| o := bkt.Object("bucketPolicyOnly") |
| defer func() { |
| if err := o.Delete(ctx); err != nil { |
| log.Printf("failed to delete test object: %v", err) |
| } |
| }() |
| wc := o.NewWriter(ctx) |
| wc.ContentType = "text/plain" |
| h.mustWrite(wc, []byte("test")) |
| a := o.ACL() |
| aclEntity := ACLEntity("user-test@example.com") |
| err := a.Set(ctx, aclEntity, RoleReader) |
| if err != nil { |
| t.Fatalf("set ACL failed: %v", err) |
| } |
| |
| // Enable BucketPolicyOnly. |
| ua := BucketAttrsToUpdate{BucketPolicyOnly: &BucketPolicyOnly{Enabled: true}} |
| attrs := h.mustUpdateBucket(bkt, ua, h.mustBucketAttrs(bkt).MetaGeneration) |
| if got, want := attrs.BucketPolicyOnly.Enabled, true; got != want { |
| t.Fatalf("got %v, want %v", got, want) |
| } |
| if got := attrs.BucketPolicyOnly.LockedTime; got.IsZero() { |
| t.Fatal("got a zero time value, want a populated value") |
| } |
| |
| // Confirm BucketAccessControl returns error, since we cannot get legacy ACL |
| // for a bucket that has uniform bucket-level access. |
| |
| // Metadata updates may be delayed up to 10s. Since we expect an error from |
| // this call, we retry on a nil error until we get the non-retryable error |
| // that we are expecting. |
| ctxWithTimeout, cancelCtx := context.WithTimeout(ctx, time.Second*10) |
| b := bkt.Retryer(WithErrorFunc(retryOnNilAndTransientErrs)) |
| _, err = b.ACL().List(ctxWithTimeout) |
| cancelCtx() |
| if err == nil { |
| t.Errorf("ACL.List: expected bucket ACL list to fail") |
| } |
| |
| // Confirm ObjectAccessControl returns error, for same reason as above. |
| ctxWithTimeout, cancelCtx = context.WithTimeout(ctx, time.Second*10) |
| _, err = o.Retryer(WithErrorFunc(retryOnNilAndTransientErrs)).ACL().List(ctxWithTimeout) |
| cancelCtx() |
| if err == nil { |
| t.Errorf("ACL.List: expected object ACL list to fail") |
| } |
| |
| // Disable BucketPolicyOnly. |
| ua = BucketAttrsToUpdate{BucketPolicyOnly: &BucketPolicyOnly{Enabled: false}} |
| attrs = h.mustUpdateBucket(bkt, ua, attrs.MetaGeneration) |
| if got, want := attrs.BucketPolicyOnly.Enabled, false; got != want { |
| t.Fatalf("attrs.BucketPolicyOnly.Enabled: got %v, want %v", got, want) |
| } |
| |
| // Check that the object ACL rules are the same. |
| |
| // Metadata updates may be delayed up to 10s. Before that, we can get a 400 |
| // indicating that uniform bucket-level access is still enabled in HTTP. |
| // We need to retry manually as GRPC will not error but provide empty ACL. |
| var acl []ACLRule |
| err = retry(ctx, func() error { |
| var err error |
| acl, err = o.ACL().List(ctx) |
| if err != nil { |
| return fmt.Errorf("ACL.List: object ACL list failed: %v", err) |
| } |
| return nil |
| }, func() error { |
| if !containsACLRule(acl, entityRoleACL{aclEntity, RoleReader}) { |
| return fmt.Errorf("containsACL: expected ACL %v to include custom ACL entity %v", acl, entityRoleACL{aclEntity, RoleReader}) |
| } |
| return nil |
| }) |
| if err != nil { |
| t.Fatal(err) |
| } |
| }) |
| } |
| |
| func TestIntegration_UniformBucketLevelAccess(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| h := testHelper{t} |
| bkt := client.Bucket(prefix + uidSpace.New()) |
| h.mustCreate(bkt, testutil.ProjID(), nil) |
| defer h.mustDeleteBucket(bkt) |
| |
| // Insert an object with custom ACL. |
| o := bkt.Object("uniformBucketLevelAccess") |
| defer func() { |
| if err := o.Delete(ctx); err != nil { |
| log.Printf("failed to delete test object: %v", err) |
| } |
| }() |
| wc := o.NewWriter(ctx) |
| wc.ContentType = "text/plain" |
| h.mustWrite(wc, []byte("test")) |
| a := o.ACL() |
| aclEntity := ACLEntity("user-test@example.com") |
| err := a.Set(ctx, aclEntity, RoleReader) |
| if err != nil { |
| t.Fatalf("set ACL failed: %v", err) |
| } |
| |
| // Enable UniformBucketLevelAccess. |
| ua := BucketAttrsToUpdate{UniformBucketLevelAccess: &UniformBucketLevelAccess{Enabled: true}} |
| attrs := h.mustUpdateBucket(bkt, ua, h.mustBucketAttrs(bkt).MetaGeneration) |
| if got, want := attrs.UniformBucketLevelAccess.Enabled, true; got != want { |
| t.Fatalf("got %v, want %v", got, want) |
| } |
| if got := attrs.UniformBucketLevelAccess.LockedTime; got.IsZero() { |
| t.Fatal("got a zero time value, want a populated value") |
| } |
| |
| // Confirm BucketAccessControl returns error. |
| // We retry on nil to account for propagation delay in metadata update. |
| ctxWithTimeout, cancelCtx := context.WithTimeout(ctx, time.Second*10) |
| b := bkt.Retryer(WithErrorFunc(retryOnNilAndTransientErrs)) |
| _, err = b.ACL().List(ctxWithTimeout) |
| cancelCtx() |
| if err == nil { |
| t.Errorf("ACL.List: expected bucket ACL list to fail") |
| } |
| |
| // Confirm ObjectAccessControl returns error. |
| ctxWithTimeout, cancelCtx = context.WithTimeout(ctx, time.Second*10) |
| _, err = o.Retryer(WithErrorFunc(retryOnNilAndTransientErrs)).ACL().List(ctxWithTimeout) |
| cancelCtx() |
| if err == nil { |
| t.Errorf("ACL.List: expected object ACL list to fail") |
| } |
| |
| // Disable UniformBucketLevelAccess. |
| ua = BucketAttrsToUpdate{UniformBucketLevelAccess: &UniformBucketLevelAccess{Enabled: false}} |
| attrs = h.mustUpdateBucket(bkt, ua, attrs.MetaGeneration) |
| if got, want := attrs.UniformBucketLevelAccess.Enabled, false; got != want { |
| t.Fatalf("got %v, want %v", got, want) |
| } |
| |
| // Metadata updates may be delayed up to 10s. Before that, we can get a 400 |
| // indicating that uniform bucket-level access is still enabled in HTTP. |
| // We need to retry manually as GRPC will not error but provide empty ACL. |
| var acl []ACLRule |
| err = retry(ctx, func() error { |
| var err error |
| acl, err = o.ACL().List(ctx) |
| if err != nil { |
| return fmt.Errorf("ACL.List: object ACL list failed: %v", err) |
| } |
| return nil |
| }, func() error { |
| if !containsACLRule(acl, entityRoleACL{aclEntity, RoleReader}) { |
| return fmt.Errorf("containsACL: expected ACL %v to include custom ACL entity %v", acl, entityRoleACL{aclEntity, RoleReader}) |
| } |
| return nil |
| }) |
| if err != nil { |
| t.Fatal(err) |
| } |
| }) |
| } |
| |
| func TestIntegration_PublicAccessPrevention(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| h := testHelper{t} |
| |
| // Create a bucket with PublicAccessPrevention enforced. |
| bkt := client.Bucket(prefix + uidSpace.New()) |
| h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{PublicAccessPrevention: PublicAccessPreventionEnforced}) |
| defer h.mustDeleteBucket(bkt) |
| |
| // Making bucket public should fail. |
| policy, err := bkt.IAM().V3().Policy(ctx) |
| if err != nil { |
| t.Fatalf("fetching bucket IAM policy: %v", err) |
| } |
| policy.Bindings = append(policy.Bindings, &iampb.Binding{ |
| Role: "roles/storage.objectViewer", |
| Members: []string{iam.AllUsers}, |
| }) |
| if err := bkt.IAM().V3().SetPolicy(ctx, policy); err == nil { |
| t.Error("SetPolicy: expected adding AllUsers policy to bucket should fail") |
| } |
| |
| // Making object public via ACL should fail. |
| o := bkt.Object("publicAccessPrevention") |
| defer func() { |
| if err := o.Delete(ctx); err != nil { |
| log.Printf("failed to delete test object: %v", err) |
| } |
| }() |
| wc := o.NewWriter(ctx) |
| wc.ContentType = "text/plain" |
| h.mustWrite(wc, []byte("test")) |
| a := o.ACL() |
| if err := a.Set(ctx, AllUsers, RoleReader); err == nil { |
| t.Error("ACL.Set: expected adding AllUsers ACL to object should fail") |
| } |
| |
| // Update PAP setting to inherited should work and not affect UBLA setting. |
| attrs, err := bkt.Update(ctx, BucketAttrsToUpdate{PublicAccessPrevention: PublicAccessPreventionInherited}) |
| if err != nil { |
| t.Fatalf("updating PublicAccessPrevention failed: %v", err) |
| } |
| if attrs.PublicAccessPrevention != PublicAccessPreventionInherited { |
| t.Errorf("updating PublicAccessPrevention: got %s, want %s", attrs.PublicAccessPrevention, PublicAccessPreventionInherited) |
| } |
| if attrs.UniformBucketLevelAccess.Enabled || attrs.BucketPolicyOnly.Enabled { |
| t.Error("updating PublicAccessPrevention changed UBLA setting") |
| } |
| |
| // Now, making object public or making bucket public should succeed. Run with |
| // retry because ACL settings may take time to propagate. |
| retrier := func(err error) bool { |
| // Once ACL settings propagate, PAP should no longer be enforced and the call will succeed. |
| // In the meantime, while PAP is enforced, trying to set ACL results in: |
| // - FailedPrecondition for gRPC |
| // - condition not met (412) for HTTP |
| return ShouldRetry(err) || status.Code(err) == codes.FailedPrecondition || extractErrCode(err) == http.StatusPreconditionFailed |
| } |
| |
| ctxWithTimeout, cancelCtx := context.WithTimeout(ctx, time.Second*10) |
| a = o.Retryer(WithErrorFunc(retrier), WithPolicy(RetryAlways)).ACL() |
| err = a.Set(ctxWithTimeout, AllUsers, RoleReader) |
| cancelCtx() |
| if err != nil { |
| t.Errorf("ACL.Set: making object public failed: %v", err) |
| } |
| |
| policy, err = bkt.IAM().V3().Policy(ctx) |
| if err != nil { |
| t.Fatalf("fetching bucket IAM policy: %v", err) |
| } |
| policy.Bindings = append(policy.Bindings, &iampb.Binding{ |
| Role: "roles/storage.objectViewer", |
| Members: []string{iam.AllUsers}, |
| }) |
| if err := bkt.IAM().V3().SetPolicy(ctx, policy); err != nil { |
| t.Errorf("SetPolicy: making bucket public failed: %v", err) |
| } |
| |
| // Updating UBLA should not affect PAP setting. |
| attrs, err = bkt.Update(ctx, BucketAttrsToUpdate{UniformBucketLevelAccess: &UniformBucketLevelAccess{Enabled: true}}) |
| if err != nil { |
| t.Fatalf("updating UBLA failed: %v", err) |
| } |
| if !attrs.UniformBucketLevelAccess.Enabled { |
| t.Error("updating UBLA: got UBLA not enabled, want enabled") |
| } |
| if attrs.PublicAccessPrevention != PublicAccessPreventionInherited { |
| t.Errorf("updating UBLA: got %s, want %s", attrs.PublicAccessPrevention, PublicAccessPreventionInherited) |
| } |
| }) |
| } |
| |
| func TestIntegration_Autoclass(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| h := testHelper{t} |
| |
| // Create a bucket with Autoclass enabled. |
| bkt := client.Bucket(prefix + uidSpace.New()) |
| h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{Autoclass: &Autoclass{Enabled: true}}) |
| defer h.mustDeleteBucket(bkt) |
| |
| // Get Autoclass configuration from bucket attrs. |
| // Autoclass.TerminalStorageClass is defaulted to NEARLINE if not specified. |
| attrs, err := bkt.Attrs(ctx) |
| if err != nil { |
| t.Fatalf("get bucket attrs failed: %v", err) |
| } |
| var toggleTime time.Time |
| var tscUpdateTime time.Time |
| if attrs != nil && attrs.Autoclass != nil { |
| if got, want := attrs.Autoclass.Enabled, true; got != want { |
| t.Errorf("attr.Autoclass.Enabled = %v, want %v", got, want) |
| } |
| if toggleTime = attrs.Autoclass.ToggleTime; toggleTime.IsZero() { |
| t.Error("got a zero time value, want a populated value") |
| } |
| if got, want := attrs.Autoclass.TerminalStorageClass, "NEARLINE"; got != want { |
| t.Errorf("attr.Autoclass.TerminalStorageClass = %v, want %v", got, want) |
| } |
| if tscUpdateTime := attrs.Autoclass.TerminalStorageClassUpdateTime; tscUpdateTime.IsZero() { |
| t.Error("got a zero time value, want a populated value") |
| } |
| } |
| |
| // Update TerminalStorageClass on the bucket. |
| ua := BucketAttrsToUpdate{Autoclass: &Autoclass{Enabled: true, TerminalStorageClass: "ARCHIVE"}} |
| attrs = h.mustUpdateBucket(bkt, ua, attrs.MetaGeneration) |
| if got, want := attrs.Autoclass.Enabled, true; got != want { |
| t.Errorf("attr.Autoclass.Enabled = %v, want %v", got, want) |
| } |
| if got, want := attrs.Autoclass.TerminalStorageClass, "ARCHIVE"; got != want { |
| t.Errorf("attr.Autoclass.TerminalStorageClass = %v, want %v", got, want) |
| } |
| latestTSCUpdateTime := attrs.Autoclass.TerminalStorageClassUpdateTime |
| if latestTSCUpdateTime.IsZero() { |
| t.Error("got a zero time value, want a populated value") |
| } |
| if !latestTSCUpdateTime.After(tscUpdateTime) { |
| t.Error("latestTSCUpdateTime should be newer than bucket creation tscUpdateTime") |
| } |
| |
| // Disable Autoclass on the bucket. |
| ua = BucketAttrsToUpdate{Autoclass: &Autoclass{Enabled: false}} |
| attrs = h.mustUpdateBucket(bkt, ua, attrs.MetaGeneration) |
| if got, want := attrs.Autoclass.Enabled, false; got != want { |
| t.Errorf("attr.Autoclass.Enabled = %v, want %v", got, want) |
| } |
| latestToggleTime := attrs.Autoclass.ToggleTime |
| if latestToggleTime.IsZero() { |
| t.Error("got a zero time value, want a populated value") |
| } |
| if !latestToggleTime.After(toggleTime) { |
| t.Error("latestToggleTime should be newer than bucket creation toggleTime") |
| } |
| }) |
| } |
| |
| func TestIntegration_ConditionalDelete(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| h := testHelper{t} |
| |
| o := client.Bucket(bucket).Object("conddel") |
| |
| wc := o.NewWriter(ctx) |
| wc.ContentType = "text/plain" |
| h.mustWrite(wc, []byte("foo")) |
| |
| gen := wc.Attrs().Generation |
| metaGen := wc.Attrs().Metageneration |
| |
| if err := o.Generation(gen - 1).Delete(ctx); err == nil { |
| t.Fatalf("Unexpected successful delete with Generation") |
| } |
| if err := o.If(Conditions{MetagenerationMatch: metaGen + 1}).Delete(ctx); err == nil { |
| t.Fatalf("Unexpected successful delete with IfMetaGenerationMatch") |
| } |
| if err := o.If(Conditions{MetagenerationNotMatch: metaGen}).Delete(ctx); err == nil { |
| t.Fatalf("Unexpected successful delete with IfMetaGenerationNotMatch") |
| } |
| if err := o.Generation(gen).Delete(ctx); err != nil { |
| t.Fatalf("final delete failed: %v", err) |
| } |
| }) |
| } |
| |
| func TestIntegration_HierarchicalNamespace(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, bucket string, prefix string, client *Client) { |
| h := testHelper{t} |
| |
| // Create a bucket with HNS enabled. |
| hnsBucketName := prefix + uidSpace.New() |
| bkt := client.Bucket(hnsBucketName) |
| h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{ |
| UniformBucketLevelAccess: UniformBucketLevelAccess{Enabled: true}, |
| HierarchicalNamespace: &HierarchicalNamespace{Enabled: true}, |
| }) |
| defer h.mustDeleteBucket(bkt) |
| |
| attrs, err := bkt.Attrs(ctx) |
| if err != nil { |
| t.Fatalf("bkt(%q).Attrs: %v", hnsBucketName, err) |
| } |
| |
| if got, want := (attrs.HierarchicalNamespace), (&HierarchicalNamespace{Enabled: true}); cmp.Diff(got, want) != "" { |
| t.Errorf("HierarchicalNamespace: got %+v, want %+v", got, want) |
| } |
| |
| // Folder creation should work on HNS bucket, but not on standard bucket. |
| req := &controlpb.CreateFolderRequest{ |
| Parent: fmt.Sprintf("projects/_/buckets/%v", hnsBucketName), |
| FolderId: "foo/", |
| Folder: &controlpb.Folder{}, |
| } |
| if _, err := controlClient.CreateFolder(ctx, req); err != nil { |
| t.Errorf("creating folder in bucket %q: %v", hnsBucketName, err) |
| } |
| |
| req2 := &controlpb.CreateFolderRequest{ |
| Parent: fmt.Sprintf("projects/_/buckets/%v", bucket), |
| FolderId: "foo/", |
| Folder: &controlpb.Folder{}, |
| } |
| if _, err := controlClient.CreateFolder(ctx, req2); status.Code(err) != codes.FailedPrecondition { |
| t.Errorf("creating folder in non-HNS bucket %q: got error %v, want FailedPrecondition", bucket, err) |
| } |
| }) |
| } |
| |
| func TestIntegration_ObjectsRangeReader(t *testing.T) { |
| multiTransportTest(context.Background(), t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| bkt := client.Bucket(bucket) |
| |
| objName := uidSpaceObjects.New() |
| obj := bkt.Object(objName) |
| contents := []byte("Hello, world this is a range request") |
| |
| w := obj.If(Conditions{DoesNotExist: true}).NewWriter(ctx) |
| if _, err := w.Write(contents); err != nil { |
| t.Errorf("Failed to write contents: %v", err) |
| } |
| if err := w.Close(); err != nil { |
| t.Errorf("Failed to close writer: %v", err) |
| } |
| |
| last5s := []struct { |
| name string |
| start int64 |
| length int64 |
| }{ |
| {name: "negative offset", start: -5, length: -1}, |
| {name: "offset with specified length", start: int64(len(contents)) - 5, length: 5}, |
| {name: "offset and read till end", start: int64(len(contents)) - 5, length: -1}, |
| } |
| |
| for _, last5 := range last5s { |
| t.Run(last5.name, func(t *testing.T) { |
| // Test Read and WriteTo. |
| for _, c := range readCases { |
| t.Run(c.desc, func(t *testing.T) { |
| wantBuf := contents[len(contents)-5:] |
| r, err := obj.NewRangeReader(ctx, last5.start, last5.length) |
| if err != nil { |
| t.Fatalf("Failed to make range read: %v", err) |
| } |
| defer r.Close() |
| |
| if got, want := r.Attrs.StartOffset, int64(len(contents))-5; got != want { |
| t.Errorf("StartOffset mismatch, got %d want %d", got, want) |
| } |
| |
| gotBuf, err := c.readFunc(r) |
| if err != nil { |
| t.Fatalf("reading object: %v", err) |
| } |
| if got, want := len(gotBuf), 5; got != want { |
| t.Errorf("Body length mismatch, got %d want %d", got, want) |
| } else if diff := cmp.Diff(string(gotBuf), string(wantBuf)); diff != "" { |
| t.Errorf("Content read does not match - got(-),want(+):\n%s", diff) |
| } |
| }) |
| } |
| |
| }) |
| } |
| }) |
| } |
| |
| func TestIntegration_ObjectReadChunksGRPC(t *testing.T) { |
| multiTransportTest(skipHTTP("gRPC implementation specific test"), t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| h := testHelper{t} |
| // Use a larger blob to test chunking logic. This is a little over 5MB. |
| content := make([]byte, 5<<20) |
| rand.New(rand.NewSource(0)).Read(content) |
| |
| // Upload test data. |
| obj := client.Bucket(bucket).Object(uidSpaceObjects.New()) |
| if err := writeObject(ctx, obj, "text/plain", content); err != nil { |
| t.Fatal(err) |
| } |
| defer h.mustDeleteObject(obj) |
| |
| r, err := obj.NewReader(ctx) |
| if err != nil { |
| t.Fatal(err) |
| } |
| defer r.Close() |
| |
| if size := r.Size(); size != int64(len(content)) { |
| t.Errorf("got size = %v, want %v", size, len(content)) |
| } |
| if rem := r.Remain(); rem != int64(len(content)) { |
| t.Errorf("got %v bytes remaining, want %v", rem, len(content)) |
| } |
| |
| bufSize := len(content) |
| buf := make([]byte, bufSize) |
| |
| // Read in smaller chunks, offset to provoke reading across a Recv boundary. |
| chunk := 4<<10 + 1234 |
| offset := 0 |
| for { |
| end := math.Min(float64(offset+chunk), float64(bufSize)) |
| n, err := r.Read(buf[offset:int(end)]) |
| if err == io.EOF { |
| break |
| } |
| if err != nil { |
| t.Fatal(err) |
| } |
| offset += n |
| } |
| |
| if rem := r.Remain(); rem != 0 { |
| t.Errorf("got %v bytes remaining, want 0", rem) |
| } |
| if !bytes.Equal(buf, content) { |
| t.Errorf("content mismatch") |
| } |
| }) |
| } |
| |
| func TestIntegration_MultiMessageWriteGRPC(t *testing.T) { |
| multiTransportTest(skipHTTP("gRPC implementation specific test"), t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| h := testHelper{t} |
| |
| name := uidSpaceObjects.New() |
| obj := client.Bucket(bucket).Object(name).Retryer(WithPolicy(RetryAlways)) |
| defer h.mustDeleteObject(obj) |
| |
| // Use a larger blob to test multi-message logic. This is a little over 5MB. |
| content := bytes.Repeat([]byte("a"), 5<<20) |
| |
| crc32c := crc32.Checksum(content, crc32cTable) |
| w := obj.NewWriter(ctx) |
| w.ProgressFunc = func(p int64) { |
| t.Logf("%s: committed %d\n", t.Name(), p) |
| } |
| w.SendCRC32C = true |
| w.CRC32C = crc32c |
| got, err := w.Write(content) |
| if err != nil { |
| t.Fatalf("Writer.Write: %v", err) |
| } |
| // Flush the buffer to finish the upload. |
| if err := w.Close(); err != nil { |
| t.Fatalf("Writer.Close: %v", err) |
| } |
| |
| want := len(content) |
| if got != want { |
| t.Errorf("While writing got: %d want %d", got, want) |
| } |
| |
| // Read back the Object for verification. |
| reader, err := client.Bucket(bucket).Object(name).NewReader(ctx) |
| if err != nil { |
| t.Fatal(err) |
| } |
| defer reader.Close() |
| |
| buf := make([]byte, want+4<<10) |
| b := bytes.NewBuffer(buf) |
| gotr, err := io.Copy(b, reader) |
| if err != nil { |
| t.Fatal(err) |
| } |
| if gotr != int64(want) { |
| t.Errorf("While reading got: %d want %d", gotr, want) |
| } |
| }) |
| } |
| |
| func TestIntegration_MultiChunkWrite(t *testing.T) { |
| multiTransportTest(context.Background(), t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| h := testHelper{t} |
| obj := client.Bucket(bucket).Object(uidSpaceObjects.New()).Retryer(WithPolicy(RetryAlways)) |
| defer h.mustDeleteObject(obj) |
| |
| // Use a larger blob to test multi-message logic. This is a little over 5MB. |
| content := bytes.Repeat([]byte("a"), 5<<20) |
| crc32c := crc32.Checksum(content, crc32cTable) |
| |
| w := obj.NewWriter(ctx) |
| w.SendCRC32C = true |
| w.CRC32C = crc32c |
| // Use a 1 MB chunk size. |
| w.ChunkSize = 1 << 20 |
| w.ProgressFunc = func(p int64) { |
| t.Logf("%s: committed %d\n", t.Name(), p) |
| } |
| got, err := w.Write(content) |
| if err != nil { |
| t.Fatalf("Writer.Write: %v", err) |
| } |
| // Flush the buffer to finish the upload. |
| if err := w.Close(); err != nil { |
| t.Fatalf("Writer.Close: %v", err) |
| } |
| |
| want := len(content) |
| if got != want { |
| t.Errorf("While writing got: %d want %d", got, want) |
| } |
| |
| r, err := obj.NewReader(ctx) |
| if err != nil { |
| t.Fatal(err) |
| } |
| defer r.Close() |
| |
| buf := make([]byte, want+4<<10) |
| b := bytes.NewBuffer(buf) |
| gotr, err := io.Copy(b, r) |
| if err != nil { |
| t.Fatal(err) |
| } |
| if gotr != int64(want) { |
| t.Errorf("While reading got: %d want %d", gotr, want) |
| } |
| }) |
| } |
| |
| func TestIntegration_ConditionalDownload(t *testing.T) { |
| multiTransportTest(context.Background(), t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| h := testHelper{t} |
| |
| o := client.Bucket(bucket).Object("condread") |
| defer o.Delete(ctx) |
| |
| wc := o.NewWriter(ctx) |
| wc.ContentType = "text/plain" |
| h.mustWrite(wc, []byte("foo")) |
| |
| gen := wc.Attrs().Generation |
| metaGen := wc.Attrs().Metageneration |
| |
| if _, err := o.Generation(gen + 1).NewReader(ctx); err == nil { |
| t.Fatalf("Unexpected successful download with nonexistent Generation") |
| } |
| if _, err := o.If(Conditions{MetagenerationMatch: metaGen + 1}).NewReader(ctx); err == nil { |
| t.Fatalf("Unexpected successful download with failed preconditions IfMetaGenerationMatch") |
| } |
| if _, err := o.If(Conditions{GenerationMatch: gen + 1}).NewReader(ctx); err == nil { |
| t.Fatalf("Unexpected successful download with failed preconditions IfGenerationMatch") |
| } |
| if _, err := o.If(Conditions{GenerationMatch: gen}).NewReader(ctx); err != nil { |
| t.Fatalf("Download failed: %v", err) |
| } |
| }) |
| } |
| |
| func TestIntegration_ObjectIteration(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| // Reset testTime, 'cause object last modification time should be within 5 min |
| // from test (test iteration if -count passed) start time. |
| testTime = time.Now().UTC() |
| newBucketName := prefix + uidSpace.New() |
| h := testHelper{t} |
| bkt := client.Bucket(newBucketName).Retryer(WithPolicy(RetryAlways)) |
| |
| h.mustCreate(bkt, testutil.ProjID(), nil) |
| defer func() { |
| if err := killBucket(ctx, client, newBucketName); err != nil { |
| log.Printf("deleting %q: %v", newBucketName, err) |
| } |
| }() |
| const defaultType = "text/plain" |
| |
| // Populate object names and make a map for their contents. |
| objects := []string{ |
| "obj1", |
| "obj2", |
| "obj/with/slashes", |
| "obj/", |
| } |
| contents := make(map[string][]byte) |
| |
| // Test Writer. |
| for _, obj := range objects { |
| c := randomContents() |
| if err := writeObject(ctx, bkt.Object(obj), defaultType, c); err != nil { |
| t.Errorf("Write for %v failed with %v", obj, err) |
| } |
| contents[obj] = c |
| } |
| |
| testObjectIterator(t, bkt, objects) |
| testObjectsIterateSelectedAttrs(t, bkt, objects) |
| testObjectsIterateAllSelectedAttrs(t, bkt, objects) |
| testObjectIteratorWithOffset(t, bkt, objects) |
| testObjectsIterateWithProjection(t, bkt) |
| t.Run("testObjectsIterateSelectedAttrsDelimiter", func(t *testing.T) { |
| query := &Query{Prefix: "", Delimiter: "/"} |
| if err := query.SetAttrSelection([]string{"Name"}); err != nil { |
| t.Fatalf("selecting query attrs: %v", err) |
| } |
| |
| var gotNames []string |
| var gotPrefixes []string |
| it := bkt.Objects(context.Background(), query) |
| for { |
| attrs, err := it.Next() |
| if err == iterator.Done { |
| break |
| } |
| if err != nil { |
| t.Fatalf("iterator.Next: %v", err) |
| } |
| if attrs.Name != "" { |
| gotNames = append(gotNames, attrs.Name) |
| } else if attrs.Prefix != "" { |
| gotPrefixes = append(gotPrefixes, attrs.Prefix) |
| } |
| |
| if attrs.Bucket != "" { |
| t.Errorf("Bucket field not selected, want empty, got = %v", attrs.Bucket) |
| } |
| } |
| |
| sortedNames := []string{"obj1", "obj2"} |
| if !cmp.Equal(sortedNames, gotNames) { |
| t.Errorf("names = %v, want %v", gotNames, sortedNames) |
| } |
| sortedPrefixes := []string{"obj/"} |
| if !cmp.Equal(sortedPrefixes, gotPrefixes) { |
| t.Errorf("prefixes = %v, want %v", gotPrefixes, sortedPrefixes) |
| } |
| }) |
| t.Run("testObjectsIterateSelectedAttrsDelimiterIncludeTrailingDelimiter", func(t *testing.T) { |
| query := &Query{Prefix: "", Delimiter: "/", IncludeTrailingDelimiter: true} |
| if err := query.SetAttrSelection([]string{"Name"}); err != nil { |
| t.Fatalf("selecting query attrs: %v", err) |
| } |
| |
| var gotNames []string |
| var gotPrefixes []string |
| it := bkt.Objects(context.Background(), query) |
| for { |
| attrs, err := it.Next() |
| if err == iterator.Done { |
| break |
| } |
| if err != nil { |
| t.Fatalf("iterator.Next: %v", err) |
| } |
| if attrs.Name != "" { |
| gotNames = append(gotNames, attrs.Name) |
| } else if attrs.Prefix != "" { |
| gotPrefixes = append(gotPrefixes, attrs.Prefix) |
| } |
| |
| if attrs.Bucket != "" { |
| t.Errorf("Bucket field not selected, want empty, got = %v", attrs.Bucket) |
| } |
| } |
| |
| sortedNames := []string{"obj/", "obj1", "obj2"} |
| if !cmp.Equal(sortedNames, gotNames) { |
| t.Errorf("names = %v, want %v", gotNames, sortedNames) |
| } |
| sortedPrefixes := []string{"obj/"} |
| if !cmp.Equal(sortedPrefixes, gotPrefixes) { |
| t.Errorf("prefixes = %v, want %v", gotPrefixes, sortedPrefixes) |
| } |
| }) |
| }) |
| } |
| |
| func TestIntegration_ObjectIterationMatchGlob(t *testing.T) { |
| multiTransportTest(skipJSONReads(context.Background(), "no reads in test"), t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| // Reset testTime, 'cause object last modification time should be within 5 min |
| // from test (test iteration if -count passed) start time. |
| testTime = time.Now().UTC() |
| newBucketName := prefix + uidSpace.New() |
| h := testHelper{t} |
| bkt := client.Bucket(newBucketName).Retryer(WithPolicy(RetryAlways)) |
| |
| h.mustCreate(bkt, testutil.ProjID(), nil) |
| defer func() { |
| if err := killBucket(ctx, client, newBucketName); err != nil { |
| log.Printf("deleting %q: %v", newBucketName, err) |
| } |
| }() |
| const defaultType = "text/plain" |
| |
| // Populate object names and make a map for their contents. |
| objects := []string{ |
| "obj1", |
| "obj2", |
| "obj/with/slashes", |
| "obj/", |
| "other/obj1", |
| } |
| contents := make(map[string][]byte) |
| |
| // Test Writer. |
| for _, obj := range objects { |
| c := randomContents() |
| if err := writeObject(ctx, bkt.Object(obj), defaultType, c); err != nil { |
| t.Errorf("Write for %v failed with %v", obj, err) |
| } |
| contents[obj] = c |
| } |
| query := &Query{MatchGlob: "**obj1"} |
| |
| var gotNames []string |
| it := bkt.Objects(context.Background(), query) |
| for { |
| attrs, err := it.Next() |
| if err == iterator.Done { |
| break |
| } |
| if err != nil { |
| t.Fatalf("iterator.Next: %v", err) |
| } |
| if attrs.Name != "" { |
| gotNames = append(gotNames, attrs.Name) |
| } |
| } |
| |
| sortedNames := []string{"obj1", "other/obj1"} |
| if !cmp.Equal(sortedNames, gotNames) { |
| t.Errorf("names = %v, want %v", gotNames, sortedNames) |
| } |
| }) |
| } |
| |
| func TestIntegration_ObjectIterationManagedFolder(t *testing.T) { |
| ctx := skipGRPC("not yet implemented in gRPC") |
| multiTransportTest(skipJSONReads(ctx, "no reads in test"), t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| newBucketName := prefix + uidSpace.New() |
| h := testHelper{t} |
| bkt := client.Bucket(newBucketName).Retryer(WithPolicy(RetryAlways)) |
| |
| // Create bucket with UBLA enabled as this is necessary for managed folders. |
| h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{ |
| UniformBucketLevelAccess: UniformBucketLevelAccess{ |
| Enabled: true, |
| }, |
| }) |
| |
| t.Cleanup(func() { |
| if err := killBucket(ctx, client, newBucketName); err != nil { |
| log.Printf("deleting %q: %v", newBucketName, err) |
| } |
| }) |
| const defaultType = "text/plain" |
| |
| // Populate object names and make a map for their contents. |
| objects := []string{ |
| "obj1", |
| "obj2", |
| "obj/with/slashes", |
| "obj/", |
| "other/obj1", |
| } |
| contents := make(map[string][]byte) |
| |
| // Test Writer. |
| for _, obj := range objects { |
| c := randomContents() |
| if err := writeObject(ctx, bkt.Object(obj), defaultType, c); err != nil { |
| t.Errorf("Write for %v failed with %v", obj, err) |
| } |
| contents[obj] = c |
| } |
| |
| req := &controlpb.CreateManagedFolderRequest{ |
| Parent: fmt.Sprintf("projects/_/buckets/%s", newBucketName), |
| ManagedFolder: &controlpb.ManagedFolder{}, |
| ManagedFolderId: "mf", |
| } |
| mf, err := controlClient.CreateManagedFolder(ctx, req) |
| if err != nil { |
| t.Fatalf("creating managed folder: %v", err) |
| } |
| |
| t.Cleanup(func() { |
| req := &controlpb.DeleteManagedFolderRequest{ |
| Name: mf.Name, |
| } |
| controlClient.DeleteManagedFolder(ctx, req) |
| }) |
| |
| // Test that managed folders are only included when IncludeFoldersAsPrefixes is set. |
| cases := []struct { |
| name string |
| query *Query |
| want []string |
| }{ |
| { |
| name: "include folders", |
| query: &Query{Delimiter: "/", IncludeFoldersAsPrefixes: true}, |
| want: []string{"mf/", "obj/", "other/"}, |
| }, |
| { |
| name: "no folders", |
| query: &Query{Delimiter: "/"}, |
| want: []string{"obj/", "other/"}, |
| }, |
| } |
| |
| for _, c := range cases { |
| t.Run(c.name, func(t *testing.T) { |
| var gotNames []string |
| var gotPrefixes []string |
| it := bkt.Objects(context.Background(), c.query) |
| for { |
| attrs, err := it.Next() |
| if err == iterator.Done { |
| break |
| } |
| if err != nil { |
| t.Fatalf("iterator.Next: %v", err) |
| } |
| if attrs.Name != "" { |
| gotNames = append(gotNames, attrs.Name) |
| } |
| if attrs.Prefix != "" { |
| gotPrefixes = append(gotPrefixes, attrs.Prefix) |
| } |
| } |
| |
| sortedNames := []string{"obj1", "obj2"} |
| if !cmp.Equal(sortedNames, gotNames) { |
| t.Errorf("names = %v, want %v", gotNames, sortedNames) |
| } |
| |
| if !cmp.Equal(c.want, gotPrefixes) { |
| t.Errorf("prefixes = %v, want %v", gotPrefixes, c.want) |
| } |
| }) |
| } |
| }) |
| } |
| |
| func TestIntegration_ObjectUpdate(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| b := client.Bucket(bucket) |
| |
| o := b.Object("update-obj" + uidSpaceObjects.New()) |
| w := o.NewWriter(ctx) |
| _, err := io.Copy(w, bytes.NewReader(randomContents())) |
| if err != nil { |
| t.Fatalf("io.Copy: %v", err) |
| } |
| if err := w.Close(); err != nil { |
| t.Fatalf("w.Close: %v", err) |
| } |
| defer func() { |
| if err := o.Delete(ctx); err != nil { |
| t.Errorf("o.Delete : %v", err) |
| } |
| }() |
| |
| attrs, err := o.Attrs(ctx) |
| if err != nil { |
| t.Fatalf("o.Attrs: %v", err) |
| } |
| |
| // Test UpdateAttrs. |
| metadata := map[string]string{"key": "value"} |
| |
| updated, err := o.If(Conditions{MetagenerationMatch: attrs.Metageneration}).Update(ctx, ObjectAttrsToUpdate{ |
| ContentType: "text/html", |
| ContentLanguage: "en", |
| Metadata: metadata, |
| ACL: []ACLRule{{Entity: "domain-google.com", Role: RoleReader}}, |
| }) |
| if err != nil { |
| t.Fatalf("o.Update: %v", err) |
| } |
| |
| if got, want := updated.ContentType, "text/html"; got != want { |
| t.Errorf("updated.ContentType == %q; want %q", got, want) |
| } |
| if got, want := updated.ContentLanguage, "en"; got != want { |
| t.Errorf("updated.ContentLanguage == %q; want %q", updated.ContentLanguage, want) |
| } |
| if got, want := updated.Metadata, metadata; !testutil.Equal(got, want) { |
| t.Errorf("updated.Metadata == %+v; want %+v", updated.Metadata, want) |
| } |
| if got, want := updated.Created, attrs.Created; got != want { |
| t.Errorf("updated.Created == %q; want %q", got, want) |
| } |
| if !updated.Created.Before(updated.Updated) { |
| t.Errorf("updated.Updated should be newer than update.Created") |
| } |
| |
| // Add another metadata key |
| anotherKey := map[string]string{"key2": "value2"} |
| metadata["key2"] = "value2" |
| |
| updated, err = o.Update(ctx, ObjectAttrsToUpdate{ |
| Metadata: anotherKey, |
| }) |
| if err != nil { |
| t.Fatalf("o.Update: %v", err) |
| } |
| |
| if got, want := updated.Metadata, metadata; !testutil.Equal(got, want) { |
| t.Errorf("updated.Metadata == %+v; want %+v", updated.Metadata, want) |
| } |
| |
| // Delete ContentType and ContentLanguage and Metadata. |
| updated, err = o.If(Conditions{MetagenerationMatch: updated.Metageneration}).Update(ctx, ObjectAttrsToUpdate{ |
| ContentType: "", |
| ContentLanguage: "", |
| Metadata: map[string]string{}, |
| ACL: []ACLRule{{Entity: "domain-google.com", Role: RoleReader}}, |
| }) |
| if err != nil { |
| t.Fatalf("o.Update: %v", err) |
| } |
| |
| if got, want := updated.ContentType, ""; got != want { |
| t.Errorf("updated.ContentType == %q; want %q", got, want) |
| } |
| if got, want := updated.ContentLanguage, ""; got != want { |
| t.Errorf("updated.ContentLanguage == %q; want %q", updated.ContentLanguage, want) |
| } |
| if updated.Metadata != nil { |
| t.Errorf("updated.Metadata == %+v; want nil", updated.Metadata) |
| } |
| if got, want := updated.Created, attrs.Created; got != want { |
| t.Errorf("updated.Created == %q; want %q", got, want) |
| } |
| if !updated.Created.Before(updated.Updated) { |
| t.Errorf("updated.Updated should be newer than update.Created") |
| } |
| |
| // Test empty update. Most fields should be unchanged, but updating will |
| // increase the metageneration and update time. |
| wantAttrs := updated |
| gotAttrs, err := o.Update(ctx, ObjectAttrsToUpdate{}) |
| if err != nil { |
| t.Fatalf("empty update: %v", err) |
| } |
| if diff := testutil.Diff(gotAttrs, wantAttrs, cmpopts.IgnoreFields(ObjectAttrs{}, "Etag", "Metageneration", "Updated")); diff != "" { |
| t.Errorf("empty update: got=-, want=+:\n%s", diff) |
| } |
| }) |
| } |
| |
| func TestIntegration_ObjectChecksums(t *testing.T) { |
| multiTransportTest(context.Background(), t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| b := client.Bucket(bucket) |
| checksumCases := []struct { |
| name string |
| contents [][]byte |
| size int64 |
| md5 string |
| crc32c uint32 |
| }{ |
| { |
| name: "checksum-object", |
| contents: [][]byte{[]byte("hello"), []byte("world")}, |
| size: 10, |
| md5: "fc5e038d38a57032085441e7fe7010b0", |
| crc32c: 1456190592, |
| }, |
| { |
| name: "zero-object", |
| contents: [][]byte{}, |
| size: 0, |
| md5: "d41d8cd98f00b204e9800998ecf8427e", |
| crc32c: 0, |
| }, |
| } |
| for _, c := range checksumCases { |
| wc := b.Object(c.name + uidSpaceObjects.New()).NewWriter(ctx) |
| for _, data := range c.contents { |
| if _, err := wc.Write(data); err != nil { |
| t.Fatalf("Write(%q) failed with %q", data, err) |
| } |
| } |
| if err := wc.Close(); err != nil { |
| t.Fatalf("%q: close failed with %q", c.name, err) |
| } |
| obj := wc.Attrs() |
| if got, want := obj.Size, c.size; got != want { |
| t.Errorf("Object (%q) Size = %v; want %v", c.name, got, want) |
| } |
| if got, want := fmt.Sprintf("%x", obj.MD5), c.md5; got != want { |
| t.Errorf("Object (%q) MD5 = %q; want %q", c.name, got, want) |
| } |
| if got, want := obj.CRC32C, c.crc32c; got != want { |
| t.Errorf("Object (%q) CRC32C = %v; want %v", c.name, got, want) |
| } |
| } |
| }) |
| } |
| |
| func TestIntegration_ObjectCompose(t *testing.T) { |
| multiTransportTest(context.Background(), t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| b := client.Bucket(bucket) |
| |
| objects := []*ObjectHandle{ |
| b.Object("obj1" + uidSpaceObjects.New()), |
| b.Object("obj2" + uidSpaceObjects.New()), |
| b.Object("obj/with/slashes" + uidSpaceObjects.New()), |
| b.Object("obj/" + uidSpaceObjects.New()), |
| } |
| var compSrcs []*ObjectHandle |
| wantContents := make([]byte, 0) |
| |
| // Write objects to compose |
| for _, obj := range objects { |
| c := randomContents() |
| if err := writeObject(ctx, obj, "text/plain", c); err != nil { |
| t.Errorf("Write for %v failed with %v", obj, err) |
| } |
| compSrcs = append(compSrcs, obj) |
| wantContents = append(wantContents, c...) |
| defer obj.Delete(ctx) |
| } |
| |
| checkCompose := func(obj *ObjectHandle, contentTypeSet *string) { |
| r, err := obj.NewReader(ctx) |
| if err != nil { |
| t.Fatalf("new reader: %v", err) |
| } |
| |
| slurp, err := io.ReadAll(r) |
| if err != nil { |
| t.Fatalf("io.ReadAll: %v", err) |
| } |
| defer r.Close() |
| if !bytes.Equal(slurp, wantContents) { |
| t.Errorf("Composed object contents\ngot: %q\nwant: %q", slurp, wantContents) |
| } |
| got := r.ContentType() |
| // Accept both an empty string and octet-stream if the content type was not set; |
| // HTTP will set the content type as octet-stream whilst GRPC will not set it all. |
| if !(contentTypeSet == nil && (got == "" || got == "application/octet-stream")) && got != *contentTypeSet { |
| t.Errorf("Composed object content-type = %q, want %q", got, *contentTypeSet) |
| } |
| } |
| |
| // Compose should work even if the user sets no destination attributes. |
| compDst := b.Object("composed1") |
| c := compDst.ComposerFrom(compSrcs...) |
| attrs, err := c.Run(ctx) |
| if err != nil { |
| t.Fatalf("ComposeFrom error: %v", err) |
| } |
| if attrs.ComponentCount != int64(len(objects)) { |
| t.Errorf("mismatching ComponentCount: got %v, want %v", attrs.ComponentCount, int64(len(objects))) |
| } |
| checkCompose(compDst, nil) |
| |
| // It should also work if we do. |
| contentType := "text/json" |
| compDst = b.Object("composed2") |
| c = compDst.ComposerFrom(compSrcs...) |
| c.ContentType = contentType |
| attrs, err = c.Run(ctx) |
| if err != nil { |
| t.Fatalf("ComposeFrom error: %v", err) |
| } |
| if attrs.ComponentCount != int64(len(objects)) { |
| t.Errorf("mismatching ComponentCount: got %v, want %v", attrs.ComponentCount, int64(len(objects))) |
| } |
| checkCompose(compDst, &contentType) |
| }) |
| } |
| |
| func TestIntegration_Copy(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, bucket string, prefix string, client *Client) { |
| h := testHelper{t} |
| |
| bucketFrom := client.Bucket(bucket) |
| bucketInSameRegion := client.Bucket(prefix + uidSpace.New()) |
| bucketInDifferentRegion := client.Bucket(prefix + uidSpace.New()) |
| |
| // Create new bucket |
| if err := bucketInSameRegion.Create(ctx, testutil.ProjID(), nil); err != nil { |
| t.Fatalf("bucket.Create: %v", err) |
| } |
| t.Cleanup(func() { |
| h.mustDeleteBucket(bucketInSameRegion) |
| }) |
| |
| // Create new bucket |
| if err := bucketInDifferentRegion.Create(ctx, testutil.ProjID(), &BucketAttrs{Location: "NORTHAMERICA-NORTHEAST2"}); err != nil { |
| t.Fatalf("bucket.Create: %v", err) |
| } |
| t.Cleanup(func() { |
| h.mustDeleteBucket(bucketInDifferentRegion) |
| }) |
| |
| // We use a larger object size to be able to trigger multiple rewrite calls |
| minObjectSize := 2500000 // 2.5 Mb |
| obj := bucketFrom.Object("copy-object-original" + uidSpaceObjects.New()) |
| |
| // Create an object to copy from |
| w := obj.NewWriter(ctx) |
| c := randomContents() |
| for written := 0; written < minObjectSize; { |
| n, err := w.Write(c) |
| if err != nil { |
| t.Fatalf("w.Write: %v", err) |
| } |
| written += n |
| } |
| if err := w.Close(); err != nil { |
| t.Fatalf("w.Close: %v", err) |
| } |
| t.Cleanup(func() { |
| h.mustDeleteObject(obj) |
| }) |
| |
| attrs, err := obj.Attrs(ctx) |
| if err != nil { |
| t.Fatalf("obj.Attrs: %v", err) |
| } |
| |
| crc32c := attrs.CRC32C |
| |
| type copierAttrs struct { |
| contentEncoding string |
| maxBytesPerCall int64 |
| } |
| |
| for _, test := range []struct { |
| desc string |
| toObj string |
| toBucket *BucketHandle |
| copierAttrs *copierAttrs |
| numExpectedRewriteCalls int |
| }{ |
| { |
| desc: "copy within bucket", |
| toObj: "copy-within-bucket", |
| toBucket: bucketFrom, |
| numExpectedRewriteCalls: 1, |
| }, |
| { |
| desc: "copy to new bucket", |
| toObj: "copy-new-bucket", |
| toBucket: bucketInSameRegion, |
| numExpectedRewriteCalls: 1, |
| }, |
| { |
| desc: "copy with attributes", |
| toObj: "copy-with-attributes", |
| toBucket: bucketInSameRegion, |
| copierAttrs: &copierAttrs{contentEncoding: "identity"}, |
| numExpectedRewriteCalls: 1, |
| }, |
| { |
| // this test should trigger multiple re-write calls and may fail |
| // with a rate limit error if those calls are stuck in an infinite loop |
| desc: "copy to new region", |
| toObj: "copy-new-region", |
| toBucket: bucketInDifferentRegion, |
| copierAttrs: &copierAttrs{maxBytesPerCall: 1048576}, |
| numExpectedRewriteCalls: 3, |
| }, |
| } { |
| t.Run(test.desc, func(t *testing.T) { |
| copyObj := test.toBucket.Object(test.toObj) |
| copier := copyObj.CopierFrom(obj) |
| |
| if attrs := test.copierAttrs; attrs != nil { |
| if attrs.contentEncoding != "" { |
| copier.ContentEncoding = attrs.contentEncoding |
| } |
| if attrs.maxBytesPerCall != 0 { |
| copier.maxBytesRewrittenPerCall = attrs.maxBytesPerCall |
| } |
| } |
| |
| rewriteCallsCount := 0 |
| copier.ProgressFunc = func(_, _ uint64) { |
| rewriteCallsCount++ |
| } |
| |
| attrs, err = copier.Run(ctx) |
| if err != nil { |
| t.Fatalf("Copier.Run failed with %v", err) |
| } |
| t.Cleanup(func() { |
| h.mustDeleteObject(copyObj) |
| }) |
| |
| // Check copied object is in the correct bucket with the correct name |
| if attrs.Bucket != test.toBucket.name || attrs.Name != test.toObj { |
| t.Errorf("unexpected copy behaviour: got: %s in bucket %s, want: %s in bucket %s", attrs.Name, attrs.Bucket, attrs.Name, test.toBucket.name) |
| } |
| |
| // Check attrs |
| if test.copierAttrs != nil { |
| if attrs.ContentEncoding != test.copierAttrs.contentEncoding { |
| t.Errorf("unexpected ContentEncoding; got: %s, want: %s", attrs.ContentEncoding, test.copierAttrs.contentEncoding) |
| } |
| } |
| |
| // Check the copied contents |
| if attrs.CRC32C != crc32c { |
| t.Errorf("mismatching checksum: got %v, want %v", attrs.CRC32C, crc32c) |
| } |
| |
| // Check that the number of requests made is as expected |
| if rewriteCallsCount != test.numExpectedRewriteCalls { |
| t.Errorf("unexpected number of rewrite calls: got %v, want %v", rewriteCallsCount, test.numExpectedRewriteCalls) |
| } |
| }) |
| } |
| }) |
| } |
| |
| func TestIntegration_Encoding(t *testing.T) { |
| multiTransportTest(skipGRPC("gzip transcoding not supported"), t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| bkt := client.Bucket(bucket) |
| |
| // Test content encoding |
| const zeroCount = 20 << 1 // TODO: should be 20 << 20 |
| obj := bkt.Object("gzip-test") |
| w := obj.NewWriter(ctx) |
| w.ContentEncoding = "gzip" |
| gw := gzip.NewWriter(w) |
| if _, err := io.Copy(gw, io.LimitReader(zeros{}, zeroCount)); err != nil { |
| t.Fatalf("io.Copy, upload: %v", err) |
| } |
| if err := gw.Close(); err != nil { |
| t.Errorf("gzip.Close(): %v", err) |
| } |
| if err := w.Close(); err != nil { |
| t.Errorf("w.Close(): %v", err) |
| } |
| r, err := obj.NewReader(ctx) |
| if err != nil { |
| t.Fatalf("NewReader(gzip-test): %v", err) |
| } |
| n, err := io.Copy(io.Discard, r) |
| if err != nil { |
| t.Errorf("io.Copy, download: %v", err) |
| } |
| if n != zeroCount { |
| t.Errorf("downloaded bad data: got %d bytes, want %d", n, zeroCount) |
| } |
| |
| // Test NotFound. |
| _, err = bkt.Object("obj-not-exists").NewReader(ctx) |
| if err != ErrObjectNotExist { |
| t.Errorf("Object should not exist, err found to be %v", err) |
| } |
| }) |
| } |
| |
| func testObjectIterator(t *testing.T, bkt *BucketHandle, objects []string) { |
| ctx := context.Background() |
| h := testHelper{t} |
| // Collect the list of items we expect: ObjectAttrs in lexical order by name. |
| names := make([]string, len(objects)) |
| copy(names, objects) |
| sort.Strings(names) |
| var attrs []*ObjectAttrs |
| for _, name := range names { |
| attrs = append(attrs, h.mustObjectAttrs(bkt.Object(name))) |
| } |
| msg, ok := itesting.TestIterator(attrs, |
| func() interface{} { return bkt.Objects(ctx, &Query{Prefix: "obj"}) }, |
| func(it interface{}) (interface{}, error) { return it.(*ObjectIterator).Next() }) |
| if !ok { |
| t.Errorf("ObjectIterator.Next: %s", msg) |
| } |
| // TODO(jba): test query.Delimiter != "" |
| } |
| |
| func testObjectIteratorWithOffset(t *testing.T, bkt *BucketHandle, objects []string) { |
| ctx := context.Background() |
| h := testHelper{t} |
| // Collect the list of items we expect: ObjectAttrs in lexical order by name. |
| names := make([]string, len(objects)) |
| copy(names, objects) |
| sort.Strings(names) |
| var attrs []*ObjectAttrs |
| for _, name := range names { |
| attrs = append(attrs, h.mustObjectAttrs(bkt.Object(name))) |
| } |
| m := make(map[string][]*ObjectAttrs) |
| for i, name := range names { |
| // StartOffset takes the value of object names, the result must be for: |
| // ― obj/with/slashes: obj/with/slashes, obj1, obj2 |
| // ― obj1: obj1, obj2 |
| // ― obj2: obj2. |
| m[name] = attrs[i:] |
| msg, ok := itesting.TestIterator(m[name], |
| func() interface{} { return bkt.Objects(ctx, &Query{StartOffset: name}) }, |
| func(it interface{}) (interface{}, error) { return it.(*ObjectIterator).Next() }) |
| if !ok { |
| t.Errorf("ObjectIterator.Next: %s", msg) |
| } |
| // EndOffset takes the value of object names, the result must be for: |
| // ― obj/with/slashes: "" |
| // ― obj1: obj/with/slashes |
| // ― obj2: obj/with/slashes, obj1. |
| m[name] = attrs[:i] |
| msg, ok = itesting.TestIterator(m[name], |
| func() interface{} { return bkt.Objects(ctx, &Query{EndOffset: name}) }, |
| func(it interface{}) (interface{}, error) { return it.(*ObjectIterator).Next() }) |
| if !ok { |
| t.Errorf("ObjectIterator.Next: %s", msg) |
| } |
| } |
| } |
| |
| func testObjectsIterateSelectedAttrs(t *testing.T, bkt *BucketHandle, objects []string) { |
| // Create a query that will only select the "Name" attr of objects, and |
| // invoke object listing. |
| query := &Query{Prefix: ""} |
| query.SetAttrSelection([]string{"Name"}) |
| |
| var gotNames []string |
| it := bkt.Objects(context.Background(), query) |
| for { |
| attrs, err := it.Next() |
| if err == iterator.Done { |
| break |
| } |
| if err != nil { |
| t.Fatalf("iterator.Next: %v", err) |
| } |
| gotNames = append(gotNames, attrs.Name) |
| |
| if len(attrs.Bucket) > 0 { |
| t.Errorf("Bucket field not selected, want empty, got = %v", attrs.Bucket) |
| } |
| } |
| |
| sortedNames := make([]string, len(objects)) |
| copy(sortedNames, objects) |
| sort.Strings(sortedNames) |
| sort.Strings(gotNames) |
| |
| if !cmp.Equal(sortedNames, gotNames) { |
| t.Errorf("names = %v, want %v", gotNames, sortedNames) |
| } |
| } |
| |
| func testObjectsIterateAllSelectedAttrs(t *testing.T, bkt *BucketHandle, objects []string) { |
| // Tests that all selected attributes work - query succeeds (without actually |
| // verifying the returned results). |
| query := &Query{ |
| Prefix: "", |
| StartOffset: "obj/", |
| EndOffset: "obj2", |
| } |
| var selectedAttrs []string |
| for k := range attrToFieldMap { |
| selectedAttrs = append(selectedAttrs, k) |
| } |
| query.SetAttrSelection(selectedAttrs) |
| |
| count := 0 |
| it := bkt.Objects(context.Background(), query) |
| for { |
| _, err := it.Next() |
| if err == iterator.Done { |
| break |
| } |
| if err != nil { |
| t.Fatalf("iterator.Next: %v", err) |
| } |
| count++ |
| } |
| |
| if count != len(objects)-1 { |
| t.Errorf("count = %v, want %v", count, len(objects)-1) |
| } |
| } |
| |
| func testObjectsIterateWithProjection(t *testing.T, bkt *BucketHandle) { |
| projections := map[Projection]bool{ |
| ProjectionDefault: true, |
| ProjectionFull: true, |
| ProjectionNoACL: false, |
| } |
| |
| for projection, expectACL := range projections { |
| query := &Query{Projection: projection} |
| it := bkt.Objects(context.Background(), query) |
| attrs, err := it.Next() |
| if err == iterator.Done { |
| t.Fatalf("iterator: no objects") |
| } |
| if err != nil { |
| t.Fatalf("iterator.Next: %v", err) |
| } |
| |
| if expectACL { |
| if attrs.Owner == "" { |
| t.Errorf("projection %q: Owner is empty, want nonempty Owner", projection) |
| } |
| if len(attrs.ACL) == 0 { |
| t.Errorf("projection %q: ACL is empty, want at least one ACL rule", projection) |
| } |
| } else { |
| if attrs.Owner != "" { |
| t.Errorf("projection %q: got Owner = %q, want empty Owner", projection, attrs.Owner) |
| } |
| if len(attrs.ACL) != 0 { |
| t.Errorf("projection %q: got %d ACL rules, want empty ACL", projection, len(attrs.ACL)) |
| } |
| } |
| } |
| } |
| |
| func TestIntegration_SignedURL(t *testing.T) { |
| multiTransportTest(skipJSONReads(context.Background(), "no reads in test"), t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| // To test SignedURL, we need a real user email and private key. Extract them |
| // from the JSON key file. |
| jwtConf, err := testutil.JWTConfig() |
| if err != nil { |
| t.Fatal(err) |
| } |
| if jwtConf == nil { |
| t.Skip("JSON key file is not present") |
| } |
| |
| bkt := client.Bucket(bucket) |
| obj := "signedURL" |
| contents := []byte("This is a test of SignedURL.\n") |
| md5 := "Jyxvgwm9n2MsrGTMPbMeYA==" // base64-encoded MD5 of contents |
| if err := writeObject(ctx, bkt.Object(obj), "text/plain", contents); err != nil { |
| t.Fatalf("writing: %v", err) |
| } |
| for _, test := range []struct { |
| desc string |
| opts SignedURLOptions |
| headers map[string][]string |
| fail bool |
| }{ |
| { |
| desc: "basic v2", |
| }, |
| { |
| desc: "basic v4", |
| opts: SignedURLOptions{Scheme: SigningSchemeV4}, |
| }, |
| { |
| desc: "MD5 sent and matches", |
| opts: SignedURLOptions{MD5: md5}, |
| headers: map[string][]string{"Content-MD5": {md5}}, |
| }, |
| { |
| desc: "MD5 not sent", |
| opts: SignedURLOptions{MD5: md5}, |
| fail: true, |
| }, |
| { |
| desc: "Content-Type sent and matches", |
| opts: SignedURLOptions{ContentType: "text/plain"}, |
| headers: map[string][]string{"Content-Type": {"text/plain"}}, |
| }, |
| { |
| desc: "Content-Type sent but does not match", |
| opts: SignedURLOptions{ContentType: "text/plain"}, |
| headers: map[string][]string{"Content-Type": {"application/json"}}, |
| fail: true, |
| }, |
| { |
| desc: "Canonical headers sent and match", |
| opts: SignedURLOptions{Headers: []string{ |
| " X-Goog-Foo: Bar baz ", |
| "X-Goog-Novalue", // ignored: no value |
| "X-Google-Foo", // ignored: wrong prefix |
| "x-goog-meta-start-time: 2023-02-10T02:00:00Z", // with colons |
| }}, |
| headers: map[string][]string{"X-Goog-foo": {"Bar baz "}, "x-goog-meta-start-time": {"2023-02-10T02:00:00Z"}}, |
| }, |
| { |
| desc: "Canonical headers sent and match using V4", |
| opts: SignedURLOptions{Headers: []string{ |
| "x-goog-meta-start-time: 2023-02-10T02:", // with colons |
| " X-Goog-Foo: Bar baz ", |
| "X-Goog-Novalue", // ignored: no value |
| "X-Google-Foo", // ignored: wrong prefix |
| }, |
| Scheme: SigningSchemeV4, |
| }, |
| headers: map[string][]string{"x-goog-meta-start-time": {"2023-02-10T02:"}, "X-Goog-foo": {"Bar baz "}}, |
| }, |
| { |
| desc: "Canonical headers sent but don't match", |
| opts: SignedURLOptions{Headers: []string{" X-Goog-Foo: Bar baz"}}, |
| headers: map[string][]string{"X-Goog-Foo": {"bar baz"}}, |
| fail: true, |
| }, |
| { |
| desc: "Virtual hosted style with custom hostname", |
| opts: SignedURLOptions{ |
| Style: VirtualHostedStyle(), |
| Hostname: "storage.googleapis.com:443", |
| }, |
| fail: false, |
| }, |
| { |
| desc: "Hostname v4", |
| opts: SignedURLOptions{ |
| Hostname: "storage.googleapis.com:443", |
| Scheme: SigningSchemeV4, |
| }, |
| fail: false, |
| }, |
| } { |
| opts := test.opts |
| opts.GoogleAccessID = jwtConf.Email |
| opts.PrivateKey = jwtConf.PrivateKey |
| opts.Method = "GET" |
| opts.Expires = time.Now().Add(time.Hour) |
| |
| u, err := bkt.SignedURL(obj, &opts) |
| if err != nil { |
| t.Errorf("%s: SignedURL: %v", test.desc, err) |
| continue |
| } |
| |
| err = verifySignedURL(u, test.headers, contents) |
| if err != nil && !test.fail { |
| t.Errorf("%s: wanted success but got error:\n%v", test.desc, err) |
| } else if err == nil && test.fail { |
| t.Errorf("%s: wanted failure but test succeeded", test.desc) |
| } |
| } |
| }) |
| } |
| |
| func TestIntegration_SignedURL_WithEncryptionKeys(t *testing.T) { |
| multiTransportTest(skipJSONReads(context.Background(), "no reads in test"), t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| |
| // To test SignedURL, we need a real user email and private key. Extract |
| // them from the JSON key file. |
| jwtConf, err := testutil.JWTConfig() |
| if err != nil { |
| t.Fatal(err) |
| } |
| if jwtConf == nil { |
| t.Skip("JSON key file is not present") |
| } |
| |
| bkt := client.Bucket(bucket) |
| |
| // TODO(deklerk): document how these were generated and their significance |
| encryptionKey := "AAryxNglNkXQY0Wa+h9+7BLSFMhCzPo22MtXUWjOBbI=" |
| encryptionKeySha256 := "QlCdVONb17U1aCTAjrFvMbnxW/Oul8VAvnG1875WJ3k=" |
| headers := map[string][]string{ |
| "x-goog-encryption-algorithm": {"AES256"}, |
| "x-goog-encryption-key": {encryptionKey}, |
| "x-goog-encryption-key-sha256": {encryptionKeySha256}, |
| } |
| contents := []byte(`{"message":"encryption with csek works"}`) |
| tests := []struct { |
| desc string |
| opts *SignedURLOptions |
| }{ |
| { |
| desc: "v4 URL with customer supplied encryption keys for PUT", |
| opts: &SignedURLOptions{ |
| Method: "PUT", |
| Headers: []string{ |
| "x-goog-encryption-algorithm:AES256", |
| "x-goog-encryption-key:AAryxNglNkXQY0Wa+h9+7BLSFMhCzPo22MtXUWjOBbI=", |
| "x-goog-encryption-key-sha256:QlCdVONb17U1aCTAjrFvMbnxW/Oul8VAvnG1875WJ3k=", |
| }, |
| Scheme: SigningSchemeV4, |
| }, |
| }, |
| { |
| desc: "v4 URL with customer supplied encryption keys for GET", |
| opts: &SignedURLOptions{ |
| Method: "GET", |
| Headers: []string{ |
| "x-goog-encryption-algorithm:AES256", |
| fmt.Sprintf("x-goog-encryption-key:%s", encryptionKey), |
| fmt.Sprintf("x-goog-encryption-key-sha256:%s", encryptionKeySha256), |
| }, |
| Scheme: SigningSchemeV4, |
| }, |
| }, |
| } |
| defer func() { |
| // Delete encrypted object. |
| err := bkt.Object("csek.json").Delete(ctx) |
| if err != nil { |
| log.Printf("failed to deleted encrypted file: %v", err) |
| } |
| }() |
| |
| for _, test := range tests { |
| opts := test.opts |
| opts.GoogleAccessID = jwtConf.Email |
| opts.PrivateKey = jwtConf.PrivateKey |
| opts.Expires = time.Now().Add(time.Hour) |
| |
| u, err := bkt.SignedURL("csek.json", test.opts) |
| if err != nil { |
| t.Fatalf("%s: %v", test.desc, err) |
| } |
| |
| if test.opts.Method == "PUT" { |
| if _, err := putURL(u, headers, bytes.NewReader(contents)); err != nil { |
| t.Fatalf("%s: %v", test.desc, err) |
| } |
| } |
| |
| if test.opts.Method == "GET" { |
| if err := verifySignedURL(u, headers, contents); err != nil { |
| t.Fatalf("%s: %v", test.desc, err) |
| } |
| } |
| } |
| }) |
| } |
| |
| func TestIntegration_SignedURL_EmptyStringObjectName(t *testing.T) { |
| multiTransportTest(skipJSONReads(context.Background(), "no reads in test"), t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| |
| // To test SignedURL, we need a real user email and private key. Extract them |
| // from the JSON key file. |
| jwtConf, err := testutil.JWTConfig() |
| if err != nil { |
| t.Fatal(err) |
| } |
| if jwtConf == nil { |
| t.Skip("JSON key file is not present") |
| } |
| |
| opts := &SignedURLOptions{ |
| Scheme: SigningSchemeV4, |
| Method: "GET", |
| GoogleAccessID: jwtConf.Email, |
| PrivateKey: jwtConf.PrivateKey, |
| Expires: time.Now().Add(time.Hour), |
| } |
| |
| bkt := client.Bucket(bucket) |
| u, err := bkt.SignedURL("", opts) |
| if err != nil { |
| t.Fatal(err) |
| } |
| |
| // Should be some ListBucketResult response. |
| _, err = getURL(u, nil) |
| if err != nil { |
| t.Fatal(err) |
| } |
| }) |
| |
| } |
| |
| func TestIntegration_BucketACL(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| h := testHelper{t} |
| |
| bucket := prefix + uidSpace.New() |
| bkt := client.Bucket(bucket) |
| h.mustCreate(bkt, testutil.ProjID(), nil) |
| defer h.mustDeleteBucket(bkt) |
| |
| entity := ACLEntity("domain-google.com") |
| rule := ACLRule{Entity: entity, Role: RoleReader, Domain: "google.com"} |
| |
| if err := bkt.DefaultObjectACL().Set(ctx, entity, RoleReader); err != nil { |
| t.Errorf("Can't put default ACL rule for the bucket, errored with %v", err) |
| } |
| |
| acl, err := bkt.DefaultObjectACL().List(ctx) |
| if err != nil { |
| t.Errorf("DefaultObjectACL.List for bucket %q: %v", bucket, err) |
| } |
| if !containsACLRule(acl, testACLRule(rule)) { |
| t.Fatalf("default ACL rule missing; want: %#v, got rules: %+v", rule, acl) |
| } |
| |
| o := bkt.Object("acl1") |
| defer h.mustDeleteObject(o) |
| |
| // Retry to account for propagation delay in metadata update. |
| err = retry(ctx, func() error { |
| if err := writeObject(ctx, o, "", randomContents()); err != nil { |
| return fmt.Errorf("Write for %v failed with %v", o.ObjectName(), err) |
| } |
| acl, err = o.ACL().List(ctx) |
| return err |
| }, func() error { |
| if !containsACLRule(acl, testACLRule(rule)) { |
| return fmt.Errorf("object ACL rule missing %+v from ACL \n%+v", rule, acl) |
| } |
| return nil |
| }) |
| if err != nil { |
| t.Error(err) |
| } |
| |
| if err := o.ACL().Delete(ctx, entity); err != nil { |
| t.Errorf("object ACL: could not delete entity %s", entity) |
| } |
| // Delete the default ACL rule. We can't move this code earlier in the |
| // test, because the test depends on the fact that the object ACL inherits |
| // it. |
| if err := bkt.DefaultObjectACL().Delete(ctx, entity); err != nil { |
| t.Errorf("default ACL: could not delete entity %s", entity) |
| } |
| |
| entity2 := AllAuthenticatedUsers |
| rule2 := ACLRule{Entity: entity2, Role: RoleReader} |
| if err := bkt.ACL().Set(ctx, entity2, RoleReader); err != nil { |
| t.Errorf("Error while putting bucket ACL rule: %v", err) |
| } |
| |
| var bACL []ACLRule |
| |
| // Retry to account for propagation delay in metadata update. |
| err = retry(ctx, func() error { |
| bACL, err = bkt.ACL().List(ctx) |
| return err |
| }, func() error { |
| if !containsACLRule(bACL, testACLRule(rule2)) { |
| return fmt.Errorf("bucket ACL missing %+v", rule2) |
| } |
| return nil |
| }) |
| if err != nil { |
| t.Error(err) |
| } |
| |
| if err := bkt.ACL().Delete(ctx, entity2); err != nil { |
| t.Errorf("Error while deleting bucket ACL rule: %v", err) |
| } |
| }) |
| } |
| |
| func TestIntegration_ValidObjectNames(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, bucket, _ string, client *Client) { |
| bkt := client.Bucket(bucket) |
| |
| validNames := []string{ |
| "gopher", |
| "Гоферови", |
| "a", |
| strings.Repeat("a", 1024), |
| } |
| for _, name := range validNames { |
| if err := writeObject(ctx, bkt.Object(name), "", []byte("data")); err != nil { |
| t.Errorf("Object %q write failed: %v. Want success", name, err) |
| continue |
| } |
| defer bkt.Object(name).Delete(ctx) |
| } |
| |
| invalidNames := []string{ |
| "", // Too short. |
| strings.Repeat("a", 1025), // Too long. |
| "new\nlines", |
| "bad\xffunicode", |
| } |
| for _, name := range invalidNames { |
| // Invalid object names will either cause failure during Write or Close. |
| if err := writeObject(ctx, bkt.Object(name), "", []byte("data")); err != nil { |
| continue |
| } |
| defer bkt.Object(name).Delete(ctx) |
| t.Errorf("%q should have failed. Didn't", name) |
| } |
| }) |
| } |
| |
| func TestIntegration_WriterContentType(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, bucket, _ string, client *Client) { |
| obj := client.Bucket(bucket).Object("content") |
| testCases := []struct { |
| content string |
| setType, wantType string |
| forceEmptyContentType bool |
| }{ |
| { |
| // Sniffed content type. |
| content: "It was the best of times, it was the worst of times.", |
| wantType: "text/plain; charset=utf-8", |
| }, |
| { |
| // Sniffed content type. |
| content: "<html><head><title>My first page</title></head></html>", |
| wantType: "text/html; charset=utf-8", |
| }, |
| { |
| content: "<html><head><title>My first page</title></head></html>", |
| setType: "text/html", |
| wantType: "text/html", |
| }, |
| { |
| content: "<html><head><title>My first page</title></head></html>", |
| setType: "image/jpeg", |
| wantType: "image/jpeg", |
| }, |
| { |
| // Content type sniffing disabled. |
| content: "<html><head><title>My first page</title></head></html>", |
| setType: "", |
| wantType: "", |
| forceEmptyContentType: true, |
| }, |
| } |
| for i, tt := range testCases { |
| writer := newWriter(ctx, obj, tt.setType, tt.forceEmptyContentType) |
| if err := writeContents(writer, []byte(tt.content)); err != nil { |
| t.Errorf("writing #%d: %v", i, err) |
| } |
| attrs, err := obj.Attrs(ctx) |
| if err != nil { |
| t.Errorf("obj.Attrs: %v", err) |
| continue |
| } |
| if got := attrs.ContentType; got != tt.wantType { |
| t.Errorf("Content-Type = %q; want %q\nContent: %q\nSet Content-Type: %q", got, tt.wantType, tt.content, tt.setType) |
| } |
| } |
| }) |
| } |
| |
| func TestIntegration_WriterChunksize(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, bucket, _ string, client *Client) { |
| obj := client.Bucket(bucket).Object("writer-chunksize-test" + uidSpaceObjects.New()) |
| objSize := 1<<10<<10 + 1 // 1 Mib + 1 byte |
| contents := bytes.Repeat([]byte("a"), objSize) |
| |
| for _, test := range []struct { |
| desc string |
| chunksize int |
| wantBytesPerCall int64 |
| wantCallbacks int |
| }{ |
| { |
| desc: "default chunksize", |
| chunksize: 16 << 10 << 10, |
| wantBytesPerCall: 16 << 10 << 10, |
| wantCallbacks: 0, |
| }, |
| { |
| desc: "small chunksize rounds up to 256kib", |
| chunksize: 1, |
| wantBytesPerCall: 256 << 10, |
| wantCallbacks: 5, |
| }, |
| { |
| desc: "chunksize of 256kib", |
| chunksize: 256 << 10, |
| wantBytesPerCall: 256 << 10, |
| wantCallbacks: 5, |
| }, |
| { |
| desc: "chunksize of just over 256kib rounds up", |
| chunksize: 256<<10 + 1, |
| wantBytesPerCall: 256 * 2 << 10, |
| wantCallbacks: 3, |
| }, |
| { |
| desc: "multiple of 256kib", |
| chunksize: 256 * 3 << 10, |
| wantBytesPerCall: 256 * 3 << 10, |
| wantCallbacks: 2, |
| }, |
| { |
| desc: "chunksize 0 uploads everything", |
| chunksize: 0, |
| wantBytesPerCall: int64(objSize), |
| wantCallbacks: 0, |
| }, |
| } { |
| t.Run(test.desc, func(t *testing.T) { |
| t.Cleanup(func() { obj.Delete(ctx) }) |
| |
| w := obj.Retryer(WithPolicy(RetryAlways)).NewWriter(ctx) |
| w.ChunkSize = test.chunksize |
| |
| bytesWrittenSoFar := int64(0) |
| callbacks := 0 |
| |
| w.ProgressFunc = func(i int64) { |
| bytesWrittenByCall := i - bytesWrittenSoFar |
| |
| // Error if this is not the last call and we don't write exactly wantBytesPerCall |
| if i != int64(objSize) && bytesWrittenByCall != test.wantBytesPerCall { |
| t.Errorf("unexpected number of bytes written by call; wanted: %d, written: %d", test.wantBytesPerCall, bytesWrittenByCall) |
| } |
| |
| bytesWrittenSoFar = i |
| callbacks++ |
| } |
| |
| if _, err := w.Write(contents); err != nil { |
| _ = w.Close() |
| t.Fatalf("writer.Write: %v", err) |
| } |
| if err := w.Close(); err != nil { |
| t.Fatalf("writer.Close: %v", err) |
| } |
| |
| if callbacks != test.wantCallbacks { |
| t.Errorf("ProgressFunc was called %d times, expected %d", callbacks, test.wantCallbacks) |
| } |
| |
| // Confirm all bytes were uploaded. |
| attrs, err := obj.Attrs(ctx) |
| if err != nil { |
| t.Fatalf("obj.Attrs: %v", err) |
| } |
| if attrs.Size != int64(objSize) { |
| t.Errorf("incorrect number of bytes written; got %v, want %v", attrs.Size, objSize) |
| } |
| }) |
| } |
| }) |
| } |
| |
| func TestIntegration_ZeroSizedObject(t *testing.T) { |
| t.Parallel() |
| multiTransportTest(context.Background(), t, func(t *testing.T, ctx context.Context, bucket, _ string, client *Client) { |
| obj := client.Bucket(bucket).Object("zero") |
| |
| // Check writing it works as expected. |
| w := obj.NewWriter(ctx) |
| if err := w.Close(); err != nil { |
| t.Fatalf("Writer.Close: %v", err) |
| } |
| defer obj.Delete(ctx) |
| |
| // Check we can read it too. Test both with WriteTo and Read. |
| for _, c := range readCases { |
| t.Run(c.desc, func(t *testing.T) { |
| r, err := obj.NewReader(ctx) |
| if err != nil { |
| t.Fatalf("NewReader: %v", err) |
| } |
| body, err := c.readFunc(r) |
| if err != nil { |
| t.Fatalf("reading object: %v", err) |
| } |
| if len(body) != 0 { |
| t.Errorf("Body is %v, want empty []byte{}", body) |
| } |
| }) |
| } |
| }) |
| } |
| |
| func TestIntegration_Encryption(t *testing.T) { |
| // This function tests customer-supplied encryption keys for all operations |
| // involving objects. Bucket and ACL operations aren't tested because they |
| // aren't affected by customer encryption. Neither is deletion. |
| multiTransportTest(context.Background(), t, func(t *testing.T, ctx context.Context, bucket, _ string, client *Client) { |
| h := testHelper{t} |
| |
| obj := client.Bucket(bucket).Object("customer-encryption") |
| key := []byte("my-secret-AES-256-encryption-key") |
| keyHash := sha256.Sum256(key) |
| keyHashB64 := base64.StdEncoding.EncodeToString(keyHash[:]) |
| key2 := []byte("My-Secret-AES-256-Encryption-Key") |
| contents := "top secret." |
| |
| checkMetadataCall := func(msg string, f func(o *ObjectHandle) (*ObjectAttrs, error)) { |
| // Performing a metadata operation without the key should succeed. |
| attrs, err := f(obj) |
| if err != nil { |
| t.Fatalf("%s: %v", msg, err) |
| } |
| // The key hash should match... |
| if got, want := attrs.CustomerKeySHA256, keyHashB64; got != want { |
| t.Errorf("%s: key hash: got %q, want %q", msg, got, want) |
| } |
| // ...but CRC and MD5 should not be present. |
| if attrs.CRC32C != 0 { |
| t.Errorf("%s: CRC: got %v, want 0", msg, attrs.CRC32C) |
| } |
| if len(attrs.MD5) > 0 { |
| t.Errorf("%s: MD5: got %v, want len == 0", msg, attrs.MD5) |
| } |
| |
| // Performing a metadata operation with the key should succeed. |
| attrs, err = f(obj.Key(key)) |
| if err != nil { |
| t.Fatalf("%s: %v", msg, err) |
| } |
| // Check the key and content hashes. |
| if got, want := attrs.CustomerKeySHA256, keyHashB64; got != want { |
| t.Errorf("%s: key hash: got %q, want %q", msg, got, want) |
| } |
| if attrs.CRC32C == 0 { |
| t.Errorf("%s: CRC: got 0, want non-zero", msg) |
| } |
| if len(attrs.MD5) == 0 { |
| t.Errorf("%s: MD5: got len == 0, want len > 0", msg) |
| } |
| } |
| |
| checkRead := func(msg string, o *ObjectHandle, k []byte, wantContents string) { |
| // Reading the object without the key should fail. |
| if _, err := readObject(ctx, o); err == nil { |
| t.Errorf("%s: reading without key: want error, got nil", msg) |
| } |
| // Reading the object with the key should succeed. |
| got := h.mustRead(o.Key(k)) |
| gotContents := string(got) |
| // And the contents should match what we wrote. |
| if gotContents != wantContents { |
| t.Errorf("%s: contents: got %q, want %q", msg, gotContents, wantContents) |
| } |
| } |
| |
| checkReadUnencrypted := func(msg string, obj *ObjectHandle, wantContents string) { |
| got := h.mustRead(obj) |
| gotContents := string(got) |
| if gotContents != wantContents { |
| t.Errorf("%s: got %q, want %q", msg, gotContents, wantContents) |
| } |
| } |
| |
| // Write to obj using our own encryption key, which is a valid 32-byte |
| // AES-256 key. |
| h.mustWrite(obj.Key(key).NewWriter(ctx), []byte(contents)) |
| |
| checkMetadataCall("Attrs", func(o *ObjectHandle) (*ObjectAttrs, error) { |
| return o.Attrs(ctx) |
| }) |
| |
| checkMetadataCall("Update", func(o *ObjectHandle) (*ObjectAttrs, error) { |
| return o.Update(ctx, ObjectAttrsToUpdate{ContentLanguage: "en"}) |
| }) |
| |
| checkRead("first object", obj, key, contents) |
| |
| // We create 2 objects here and we can interleave operations to get around |
| // the rate limit for object mutation operations (create, update, and delete). |
| obj2 := client.Bucket(bucket).Object("customer-encryption-2") |
| obj4 := client.Bucket(bucket).Object("customer-encryption-4") |
| |
| // Copying an object without the key should fail. |
| if _, err := obj4.CopierFrom(obj).Run(ctx); err == nil { |
| t.Fatal("want error, got nil") |
| } |
| // Copying an object with the key should succeed. |
| if _, err := obj2.CopierFrom(obj.Key(key)).Run(ctx); err != nil { |
| t.Fatal(err) |
| } |
| // The destination object is not encrypted; we can read it without a key. |
| checkReadUnencrypted("copy dest", obj2, contents) |
| |
| // Providing a key on the destination but not the source should fail, |
| // since the source is encrypted. |
| if _, err := obj2.Key(key2).CopierFrom(obj).Run(ctx); err == nil { |
| t.Fatal("want error, got nil") |
| } |
| |
| // But copying with keys for both source and destination should succeed. |
| if _, err := obj2.Key(key2).CopierFrom(obj.Key(key)).Run(ctx); err != nil { |
| t.Fatal(err) |
| } |
| // And the destination should be encrypted, meaning we can only read it |
| // with a key. |
| checkRead("copy destination", obj2, key2, contents) |
| |
| // Change obj2's key to prepare for compose, where all objects must have |
| // the same key. Also illustrates key rotation: copy an object to itself |
| // with a different key. |
| if _, err := obj2.Key(key).CopierFrom(obj2.Key(key2)).Run(ctx); err != nil { |
| t.Fatal(err) |
| } |
| obj3 := client.Bucket(bucket).Object("customer-encryption-3") |
| // Composing without keys should fail. |
| if _, err := obj3.ComposerFrom(obj, obj2).Run(ctx); err == nil { |
| t.Fatal("want error, got nil") |
| } |
| // Keys on the source objects result in an error. |
| if _, err := obj3.ComposerFrom(obj.Key(key), obj2).Run(ctx); err == nil { |
| t.Fatal("want error, got nil") |
| } |
| // A key on the destination object both decrypts the source objects |
| // and encrypts the destination. |
| if _, err := obj3.Key(key).ComposerFrom(obj, obj2).Run(ctx); err != nil { |
| t.Fatalf("got %v, want nil", err) |
| } |
| // Check that the destination in encrypted. |
| checkRead("compose destination", obj3, key, contents+contents) |
| |
| // You can't compose one or more unencrypted source objects into an |
| // encrypted destination object. |
| _, err := obj4.CopierFrom(obj2.Key(key)).Run(ctx) // unencrypt obj2 |
| if err != nil { |
| t.Fatal(err) |
| } |
| if _, err := obj3.Key(key).ComposerFrom(obj4).Run(ctx); err == nil { |
| t.Fatal("got nil, want error") |
| } |
| }) |
| } |
| |
| func TestIntegration_NonexistentObjectRead(t *testing.T) { |
| t.Parallel() |
| multiTransportTest(context.Background(), t, func(t *testing.T, ctx context.Context, bucket, _ string, client *Client) { |
| _, err := client.Bucket(bucket).Object("object-does-not-exist").NewReader(ctx) |
| if !errors.Is(err, ErrObjectNotExist) { |
| t.Errorf("Objects: got %v, want ErrObjectNotExist", err) |
| } |
| }) |
| } |
| |
| func TestIntegration_NonexistentBucket(t *testing.T) { |
| t.Parallel() |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _, prefix string, client *Client) { |
| bkt := client.Bucket(prefix + uidSpace.New()) |
| if _, err := bkt.Attrs(ctx); err != ErrBucketNotExist { |
| t.Errorf("Attrs: got %v, want ErrBucketNotExist", err) |
| } |
| it := bkt.Objects(ctx, nil) |
| if _, err := it.Next(); err != ErrBucketNotExist { |
| t.Errorf("Objects: got %v, want ErrBucketNotExist", err) |
| } |
| }) |
| } |
| |
| func TestIntegration_PerObjectStorageClass(t *testing.T) { |
| const ( |
| defaultStorageClass = "STANDARD" |
| newStorageClass = "NEARLINE" |
| ) |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, bucket, _ string, client *Client) { |
| h := testHelper{t} |
| |
| bkt := client.Bucket(bucket) |
| |
| // The bucket should have the default storage class. |
| battrs := h.mustBucketAttrs(bkt) |
| if battrs.StorageClass != defaultStorageClass { |
| t.Fatalf("bucket storage class: got %q, want %q", |
| battrs.StorageClass, defaultStorageClass) |
| } |
| // Write an object; it should start with the bucket's storage class. |
| obj := bkt.Object("posc") |
| h.mustWrite(obj.NewWriter(ctx), []byte("foo")) |
| oattrs, err := obj.Attrs(ctx) |
| if err != nil { |
| t.Fatal(err) |
| } |
| if oattrs.StorageClass != defaultStorageClass { |
| t.Fatalf("object storage class: got %q, want %q", |
| oattrs.StorageClass, defaultStorageClass) |
| } |
| // Now use Copy to change the storage class. |
| copier := obj.CopierFrom(obj) |
| copier.StorageClass = newStorageClass |
| oattrs2, err := copier.Run(ctx) |
| if err != nil { |
| log.Fatal(err) |
| } |
| if oattrs2.StorageClass != newStorageClass { |
| t.Fatalf("new object storage class: got %q, want %q", |
| oattrs2.StorageClass, newStorageClass) |
| } |
| |
| // We can also write a new object using a non-default storage class. |
| obj2 := bkt.Object("posc2") |
| w := obj2.NewWriter(ctx) |
| w.StorageClass = newStorageClass |
| h.mustWrite(w, []byte("xxx")) |
| if w.Attrs().StorageClass != newStorageClass { |
| t.Fatalf("new object storage class: got %q, want %q", |
| w.Attrs().StorageClass, newStorageClass) |
| } |
| }) |
| } |
| |
| func TestIntegration_NoUnicodeNormalization(t *testing.T) { |
| t.Parallel() |
| multiTransportTest(context.Background(), t, func(t *testing.T, ctx context.Context, bucket, _ string, client *Client) { |
| bkt := client.Bucket(bucket) |
| h := testHelper{t} |
| |
| for _, tst := range []struct { |
| nameQuoted, content string |
| }{ |
| {`"Caf\u00e9"`, "Normalization Form C"}, |
| {`"Cafe\u0301"`, "Normalization Form D"}, |
| } { |
| name, err := strconv.Unquote(tst.nameQuoted) |
| w := bkt.Object(name).NewWriter(ctx) |
| h.mustWrite(w, []byte(tst.content)) |
| if err != nil { |
| t.Fatalf("invalid name: %s: %v", tst.nameQuoted, err) |
| } |
| if got := string(h.mustRead(bkt.Object(name))); got != tst.content { |
| t.Errorf("content of %s is %q, want %q", tst.nameQuoted, got, tst.content) |
| } |
| } |
| }) |
| } |
| |
| func TestIntegration_HashesOnUpload(t *testing.T) { |
| // Check that the user can provide hashes on upload, and that these are checked. |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, bucket, _ string, client *Client) { |
| obj := client.Bucket(bucket).Object("hashesOnUpload-1") |
| data := []byte("I can't wait to be verified") |
| |
| write := func(w *Writer) error { |
| if _, err := w.Write(data); err != nil { |
| _ = w.Close() |
| return err |
| } |
| return w.Close() |
| } |
| |
| crc32c := crc32.Checksum(data, crc32cTable) |
| // The correct CRC should succeed. |
| w := obj.NewWriter(ctx) |
| w.CRC32C = crc32c |
| w.SendCRC32C = true |
| if err := write(w); err != nil { |
| t.Error(err) |
| } |
| |
| // If we change the CRC, validation should fail. |
| w = obj.NewWriter(ctx) |
| w.CRC32C = crc32c + 1 |
| w.SendCRC32C = true |
| if err := write(w); err == nil { |
| t.Error("write with bad CRC32c: want error, got nil") |
| } |
| |
| // If we have the wrong CRC but forget to send it, we succeed. |
| w = obj.NewWriter(ctx) |
| w.CRC32C = crc32c + 1 |
| if err := write(w); err != nil { |
| t.Error(err) |
| } |
| |
| // MD5 |
| md5 := md5.Sum(data) |
| // The correct MD5 should succeed. |
| w = obj.NewWriter(ctx) |
| w.MD5 = md5[:] |
| if err := write(w); err != nil { |
| t.Error(err) |
| } |
| |
| // If we change the MD5, validation should fail. |
| w = obj.NewWriter(ctx) |
| w.MD5 = append([]byte(nil), md5[:]...) |
| w.MD5[0]++ |
| if err := write(w); err == nil { |
| t.Error("write with bad MD5: want error, got nil") |
| } |
| }) |
| } |
| |
| func TestIntegration_BucketIAM(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _, prefix string, client *Client) { |
| h := testHelper{t} |
| bkt := client.Bucket(prefix + uidSpace.New()) |
| h.mustCreate(bkt, testutil.ProjID(), nil) |
| defer h.mustDeleteBucket(bkt) |
| // This bucket is unique to this test run. So we don't have |
| // to worry about other runs interfering with our IAM policy |
| // changes. |
| |
| member := "projectViewer:" + testutil.ProjID() |
| role := iam.RoleName("roles/storage.objectViewer") |
| // Get the bucket's IAM policy. |
| policy, err := bkt.IAM().Policy(ctx) |
| if err != nil { |
| t.Fatalf("Getting policy: %v", err) |
| } |
| // The member should not have the role. |
| if policy.HasRole(member, role) { |
| t.Errorf("member %q has role %q", member, role) |
| } |
| // Change the policy. |
| policy.Add(member, role) |
| if err := bkt.IAM().SetPolicy(ctx, policy); err != nil { |
| t.Fatalf("SetPolicy: %v", err) |
| } |
| // Confirm that the binding was added. |
| policy, err = bkt.IAM().Policy(ctx) |
| if err != nil { |
| t.Fatalf("Getting policy: %v", err) |
| } |
| if !policy.HasRole(member, role) { |
| t.Errorf("member %q does not have role %q", member, role) |
| } |
| |
| // Check TestPermissions. |
| // This client should have all these permissions (and more). |
| perms := []string{"storage.buckets.get", "storage.buckets.delete"} |
| got, err := bkt.IAM().TestPermissions(ctx, perms) |
| if err != nil { |
| t.Fatalf("TestPermissions: %v", err) |
| } |
| sort.Strings(perms) |
| sort.Strings(got) |
| if !testutil.Equal(got, perms) { |
| t.Errorf("got %v, want %v", got, perms) |
| } |
| }) |
| } |
| |
| // This test tests only possibilities where the user making the request is an |
| // owner on the project that owns the requester pays bucket. Therefore, we don't |
| // need a second project for this test. |
| // |
| // There are up to three entities involved in a requester-pays call: |
| // |
| // 1. The user making the request. Here, we use the account used as credentials |
| // for most of our integration tests. The following must hold for this test: |
| // - this user must have resourcemanager.projects.createBillingAssignment |
| // permission (Owner role) on (2) (the project, not the bucket) |
| // - this user must NOT have that permission on (3b). |
| // 2. The project that owns the requester-pays bucket. Here, that |
| // is the test project ID (see testutil.ProjID). |
| // 3. The project provided as the userProject parameter of the request; |
| // the project to be billed. This test uses: |
| // a. The project that owns the requester-pays bucket (same as (2)) |
| // b. Another project (the Firestore project). |
| func TestIntegration_RequesterPaysOwner(t *testing.T) { |
| multiTransportTest(context.Background(), t, func(t *testing.T, ctx context.Context, _, prefix string, client *Client) { |
| jwt, err := testutil.JWTConfig() |
| if err != nil { |
| t.Fatalf("testutil.JWTConfig: %v", err) |
| } |
| // an account that has permissions on the project that owns the bucket |
| mainUserEmail := jwt.Email |
| |
| // the project that owns the requester-pays bucket |
| mainProjectID := testutil.ProjID() |
| |
| client.SetRetry(WithPolicy(RetryAlways)) |
| |
| // Secondary project: a project that does not own the bucket. |
| // The "main" user should not have permission on this. |
| secondaryProject := os.Getenv(envFirestoreProjID) |
| if secondaryProject == "" { |
| t.Fatalf("need a second project (env var %s)", envFirestoreProjID) |
| } |
| |
| for _, test := range []struct { |
| desc string |
| userProject *string // to set on bucket, nil if it should not be set |
| expectSuccess bool |
| }{ |
| { |
| desc: "user is Owner on the project that owns the bucket", |
| userProject: nil, |
| expectSuccess: true, // by the rule permitting access by owners of the containing bucket |
| }, |
| { |
| desc: "userProject is unnecessary but allowed", |
| userProject: &mainProjectID, |
| expectSuccess: true, // by the rule permitting access by owners of the containing bucket |
| }, |
| { |
| desc: "cannot use someone else's project for billing", |
| userProject: &secondaryProject, |
| expectSuccess: false, // we cannot use a project we don't have access to for billing |
| }, |
| } { |
| t.Run(test.desc, func(t *testing.T) { |
| h := testHelper{t} |
| ctx, cancel := context.WithTimeout(ctx, time.Minute) |
| defer cancel() |
| |
| printTestCase := func() string { |
| userProject := "none" |
| if test.userProject != nil { |
| userProject = *test.userProject |
| } |
| return fmt.Sprintf("user: %s\n\t\tcontaining project: %s\n\t\tUserProject: %s", mainUserEmail, mainProjectID, userProject) |
| } |
| |
| checkforErrors := func(desc string, err error) { |
| if err != nil && test.expectSuccess { |
| t.Errorf("%s: got unexpected error:%v\n\t\t%s", desc, err, printTestCase()) |
| } else if err == nil && !test.expectSuccess { |
| t.Errorf("%s: got unexpected success\n\t\t%s", desc, printTestCase()) |
| } |
| } |
| |
| bucketName := prefix + uidSpace.New() |
| requesterPaysBucket := client.Bucket(bucketName) |
| |
| // Create a requester-pays bucket |
| h.mustCreate(requesterPaysBucket, mainProjectID, &BucketAttrs{RequesterPays: true}) |
| t.Cleanup(func() { h.mustDeleteBucket(requesterPaysBucket) }) |
| |
| // Make sure the object exists, so we don't get confused by ErrObjectNotExist. |
| // The later write we perform may fail so we always write to the object as the user |
| // with permissions on the containing bucket (mainUser). |
| // The storage service may perform validation in any order (perhaps in parallel), |
| // so if we delete or update an object that doesn't exist and for which we lack permission, |
| // we could see either of those two errors. (See Google-internal bug 78341001.) |
| objectName := "acl-go-test" + uidSpaceObjects.New() |
| h.mustWrite(requesterPaysBucket.Object(objectName).NewWriter(ctx), []byte("hello")) |
| |
| // Set up the bucket to use depending on the test case |
| bucket := client.Bucket(bucketName) |
| if test.userProject != nil { |
| bucket = bucket.UserProject(*test.userProject) |
| } |
| |
| // Get bucket attrs |
| attrs, err := bucket.Attrs(ctx) |
| checkforErrors("get bucket attrs", err) |
| if attrs != nil { |
| if got, want := attrs.RequesterPays, true; got != want { |
| t.Fatalf("attr.RequesterPays = %t, want %t", got, want) |
| } |
| } |
| |
| // Bucket ACL operations |
| entity := ACLEntity("domain-google.com") |
| |
| checkforErrors("bucket acl set", bucket.ACL().Set(ctx, entity, RoleReader)) |
| _, err = bucket.ACL().List(ctx) |
| checkforErrors("bucket acl list", err) |
| checkforErrors("bucket acl delete", bucket.ACL().Delete(ctx, entity)) |
| |
| // Object operations (except for delete) |
| // Retry to account for propagation delay to objects in metadata update |
| // (we updated the metadata to add the otherUserEmail as owner on the bucket) |
| o := bucket.Object(objectName) |
| ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*10) |
| defer cancel() |
| // Only retry when we expect success to avoid retrying for 10 seconds |
| // when we know it will fail |
| if test.expectSuccess { |
| o = o.Retryer(WithErrorFunc(retryOnTransient400and403)) |
| } |
| checkforErrors("write object", writeObject(ctxWithTimeout, o, "text/plain", []byte("hello"))) |
| _, err = readObject(ctx, bucket.Object(objectName)) |
| checkforErrors("read object", err) |
| _, err = bucket.Object(objectName).Attrs(ctx) |
| checkforErrors("get object attrs", err) |
| _, err = bucket.Object(objectName).Update(ctx, ObjectAttrsToUpdate{ContentLanguage: "en"}) |
| checkforErrors("update object", err) |
| |
| // Object ACL operations |
| checkforErrors("object acl set", bucket.Object(objectName).ACL().Set(ctx, entity, RoleReader)) |
| _, err = bucket.Object(objectName).ACL().List(ctx) |
| checkforErrors("object acl list", err) |
| checkforErrors("object acl list", bucket.Object(objectName).ACL().Delete(ctx, entity)) |
| |
| // Default object ACL operations |
| // Once again, we interleave buckets to avoid rate limits |
| checkforErrors("default object acl set", bucket.DefaultObjectACL().Set(ctx, entity, RoleReader)) |
| _, err = bucket.DefaultObjectACL().List(ctx) |
| checkforErrors("default object acl list", err) |
| checkforErrors("default object acl delete", bucket.DefaultObjectACL().Delete(ctx, entity)) |
| |
| // Copy |
| _, err = bucket.Object("copy").CopierFrom(bucket.Object(objectName)).Run(ctx) |
| checkforErrors("copy", err) |
| // Delete "copy" object, if created |
| if err == nil { |
| t.Cleanup(func() { |
| h.mustDeleteObject(bucket.Object("copy")) |
| }) |
| } |
| |
| // Compose |
| _, err = bucket.Object("compose").ComposerFrom(bucket.Object(objectName), bucket.Object("copy")).Run(ctx) |
| checkforErrors("compose", err) |
| // Delete "compose" object, if created |
| if err == nil { |
| t.Cleanup(func() { |
| h.mustDeleteObject(bucket.Object("compose")) |
| }) |
| } |
| |
| // Delete object |
| if err = bucket.Object(objectName).Delete(ctx); err != nil { |
| // We still want to delete object if the test errors |
| h.mustDeleteObject(requesterPaysBucket.Object(objectName)) |
| } |
| checkforErrors("delete object", err) |
| }) |
| } |
| }) |
| } |
| |
| // This test needs a second project and user to test all possibilities. Since we |
| // need these things for Firestore already, we use them here. |
| // |
| // There are up to three entities involved in a requester-pays call: |
| // 1. The user making the request. Here, we use the account used for the |
| // Firestore tests. The following must hold for this test to work: |
| // - this user must NOT have resourcemanager.projects.createBillingAssignment |
| // on the project that owns the bucket (2). |
| // - this user must have serviceusage.services.use permission on the Firestore |
| // project (3b). |
| // - this user must NOT have that serviceusage.services.use permission on |
| // the project that owns the bucket (3a). |
| // 2. The project that owns the requester-pays bucket. Here, that |
| // is the test project ID (see testutil.ProjID). |
| // 3. The project provided as the userProject parameter of the request; |
| // the project to be billed. This test uses: |
| // a. The project that owns the requester-pays bucket (same as (2)) |
| // b. Another project (the Firestore project). |
| func TestIntegration_RequesterPaysNonOwner(t *testing.T) { |
| if testing.Short() && !replaying { |
| t.Skip("Integration tests skipped in short mode") |
| } |
| ctx := context.Background() |
| |
| // Main project: the project that owns the requester-pays bucket. |
| mainProject := testutil.ProjID() |
| |
| // Secondary project: a project that does not own the bucket. |
| // The "main" user does not have permission on this. |
| // This project should have billing enabled. |
| secondaryProject := os.Getenv(envFirestoreProjID) |
| if secondaryProject == "" { |
| t.Fatalf("need a second project (env var %s)", envFirestoreProjID) |
| } |
| |
| // Secondary email: an account with permissions on the secondary project, |
| // but not on the main project. |
| // We will grant this email permissions to the bucket created under the main |
| // project, but it must provide a user project to make requests |
| // against that bucket (since it's a requester-pays bucket). |
| secondaryUserEmail, err := keyFileEmail(os.Getenv(envFirestorePrivateKey)) |
| if err != nil { |
| t.Fatalf("keyFileEmail error getting second account (env var %s): %v", envFirestorePrivateKey, err) |
| } |
| |
| // Token source from secondary email to authenticate to client |
| ts := testutil.TokenSourceEnv(ctx, envFirestorePrivateKey, ScopeFullControl) |
| if ts == nil { |
| t.Fatalf("need a second account (env var %s)", envFirestorePrivateKey) |
| } |
| |
| multiTransportTest(context.Background(), t, func(t *testing.T, ctx context.Context, _, prefix string, client *Client) { |
| client.SetRetry(WithPolicy(RetryAlways)) |
| |
| for _, test := range []struct { |
| desc string |
| userProject *string // to set on bucket, nil if it should not be set |
| expectSuccess bool |
| wantErrorCode int |
| wantErrorCodeGRPC codes.Code |
| }{ |
| { |
| desc: "no UserProject", |
| userProject: nil, |
| expectSuccess: false, // by the standard requester-pays rule |
| }, |
| { |
| desc: "user is an Editor on UserProject", |
| userProject: &secondaryProject, |
| expectSuccess: true, // by the standard requester-pays rule |
| }, |
| { |
| desc: "user is not an Editor on UserProject", |
| userProject: &mainProject, |
| expectSuccess: false, // we cannot use a project we don't have access to for billing |
| wantErrorCode: 403, |
| wantErrorCodeGRPC: codes.PermissionDenied, |
| }, |
| } { |
| t.Run(test.desc, func(t *testing.T) { |
| ctx, cancel := context.WithTimeout(ctx, 30*time.Second) |
| t.Cleanup(cancel) |
| |
| printTestCase := func() string { |
| userProject := "none" |
| if test.userProject != nil { |
| userProject = *test.userProject |
| } |
| return fmt.Sprintf("user: %s\n\t\tcontaining project: %s\n\t\tUserProject: %s", secondaryUserEmail, mainProject, userProject) |
| } |
| |
| checkforErrors := func(desc string, err error) { |
| errCode := extractErrCode(err) |
| if err != nil && test.expectSuccess { |
| t.Errorf("%s: got unexpected error:%v\n\t\t%s", desc, err, printTestCase()) |
| } else if err == nil && !test.expectSuccess { |
| t.Errorf("%s: got unexpected success\n\t\t%s", desc, printTestCase()) |
| } else if !test.expectSuccess && test.wantErrorCode != 0 { |
| if (status.Code(err) != codes.OK && status.Code(err) != codes.Unknown && status.Code(err) != test.wantErrorCodeGRPC) || (errCode > 0 && errCode != test.wantErrorCode) { |
| fmt.Println(status.Code(err), " ", status.Code(err) != test.wantErrorCodeGRPC) |
| t.Errorf("%s: mismatched errors; want error code: %d or grpc error: %s, got error: %v \n\t\t%s\n", |
| desc, test.wantErrorCode, test.wantErrorCodeGRPC, err, printTestCase()) |
| } |
| } |
| } |
| |
| bucketName := prefix + uidSpace.New() |
| objectName := "acl-go-test" + uidSpaceObjects.New() |
| |
| setUpRequesterPaysBucket(ctx, t, bucketName, objectName, secondaryUserEmail) |
| |
| // Set up the bucket to use depending on the test case |
| bucket := client.Bucket(bucketName) |
| if test.userProject != nil { |
| bucket = bucket.UserProject(*test.userProject) |
| } |
| |
| // Get bucket attrs |
| attrs, err := bucket.Attrs(ctx) |
| checkforErrors("get bucket attrs", err) |
| if attrs != nil { |
| if got, want := attrs.RequesterPays, true; got != want { |
| t.Fatalf("attr.RequesterPays = %t, want %t", got, want) |
| } |
| } |
| |
| // Bucket ACL operations |
| entity := ACLEntity("domain-google.com") |
| |
| checkforErrors("bucket acl set", bucket.ACL().Set(ctx, entity, RoleReader)) |
| _, err = bucket.ACL().List(ctx) |
| checkforErrors("bucket acl list", err) |
| checkforErrors("bucket acl delete", bucket.ACL().Delete(ctx, entity)) |
| |
| // Object operations (except for delete) |
| // Retry to account for propagation delay to objects in metadata update |
| // (we updated the metadata to add the otherUserEmail as owner on the bucket) |
| o := bucket.Object(objectName) |
| ctxWithTimeout, cancel := context.WithTimeout(ctx, time.Second*15) |
| defer cancel() |
| // Only retry when we expect success to avoid retrying |
| // when we know it will fail |
| if test.expectSuccess { |
| o = o.Retryer(WithErrorFunc(retryOnTransient400and403)) |
| } |
| checkforErrors("write object", writeObject(ctxWithTimeout, o, "text/plain", []byte("hello"))) |
| _, err = readObject(ctx, bucket.Object(objectName)) |
| checkforErrors("read object", err) |
| _, err = bucket.Object(objectName).Attrs(ctx) |
| checkforErrors("get object attrs", err) |
| _, err = bucket.Object(objectName).Update(ctx, ObjectAttrsToUpdate{ContentLanguage: "en"}) |
| checkforErrors("update object", err) |
| |
| // Object ACL operations |
| checkforErrors("object acl set", bucket.Object(objectName).ACL().Set(ctx, entity, RoleReader)) |
| _, err = bucket.Object(objectName).ACL().List(ctx) |
| checkforErrors("object acl list", err) |
| checkforErrors("object acl list", bucket.Object(objectName).ACL().Delete(ctx, entity)) |
| |
| // Default object ACL operations |
| // Once again, we interleave buckets to avoid rate limits |
| checkforErrors("default object acl set", bucket.DefaultObjectACL().Set(ctx, entity, RoleReader)) |
| _, err = bucket.DefaultObjectACL().List(ctx) |
| checkforErrors("default object acl list", err) |
| checkforErrors("default object acl delete", bucket.DefaultObjectACL().Delete(ctx, entity)) |
| |
| // Copy |
| copyObj := bucket.Object("copy") |
| _, err = copyObj.CopierFrom(bucket.Object(objectName)).Run(ctx) |
| checkforErrors("copy", err) |
| // Delete "copy" object, if created |
| if err == nil { |
| t.Cleanup(func() { |
| if err := deleteObjectIfExists(copyObj, WithErrorFunc(retryOnTransient400and403)); err != nil { |
| t.Error(err) |
| } |
| }) |
| } |
| |
| // Compose |
| composeObj := bucket.Object("compose") |
| _, err = composeObj.ComposerFrom(bucket.Object(objectName), bucket.Object("copy")).Run(ctx) |
| checkforErrors("compose", err) |
| // Delete "compose" object, if created |
| if err == nil { |
| t.Cleanup(func() { |
| if err := deleteObjectIfExists(composeObj, WithErrorFunc(retryOnTransient400and403)); err != nil { |
| t.Error(err) |
| } |
| }) |
| } |
| |
| // Delete object |
| checkforErrors("delete object", bucket.Object(objectName).Delete(ctx)) |
| }) |
| } |
| }, option.WithTokenSource(ts)) |
| } |
| |
| func TestIntegration_Notifications(t *testing.T) { |
| multiTransportTest(skipGRPC("notifications not implemented"), t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| bkt := client.Bucket(bucket) |
| |
| checkNotifications := func(msg string, want map[string]*Notification) { |
| got, err := bkt.Notifications(ctx) |
| if err != nil { |
| t.Fatal(err) |
| } |
| if diff := testutil.Diff(got, want); diff != "" { |
| t.Errorf("%s: got=-, want=+:\n%s", msg, diff) |
| } |
| } |
| checkNotifications("initial", map[string]*Notification{}) |
| |
| nArg := &Notification{ |
| TopicProjectID: testutil.ProjID(), |
| TopicID: "go-storage-notification-test", |
| PayloadFormat: NoPayload, |
| } |
| n, err := bkt.AddNotification(ctx, nArg) |
| if err != nil { |
| t.Fatal(err) |
| } |
| if n.ID == "" { |
| t.Fatal("expected created Notification to have non-empty ID") |
| } |
| nArg.ID = n.ID |
| if !testutil.Equal(n, nArg) { |
| t.Errorf("got %+v, want %+v", n, nArg) |
| } |
| checkNotifications("after add", map[string]*Notification{n.ID: n}) |
| |
| if err := bkt.DeleteNotification(ctx, n.ID); err != nil { |
| t.Fatal(err) |
| } |
| checkNotifications("after delete", map[string]*Notification{}) |
| }) |
| } |
| |
| func TestIntegration_PublicBucket(t *testing.T) { |
| // Confirm that an unauthenticated client can access a public bucket. |
| // See https://cloud.google.com/storage/docs/public-datasets/landsat |
| |
| multiTransportTest(skipGRPC("no public buckets for gRPC"), t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| const landsatBucket = "gcp-public-data-landsat" |
| const landsatPrefix = "LC08/01/001/002/LC08_L1GT_001002_20160817_20170322_01_T2/" |
| const landsatObject = landsatPrefix + "LC08_L1GT_001002_20160817_20170322_01_T2_ANG.txt" |
| |
| h := testHelper{t} |
| bkt := client.Bucket(landsatBucket) |
| obj := bkt.Object(landsatObject) |
| |
| // Read a public object. |
| bytes := h.mustRead(obj) |
| if got, want := len(bytes), 117255; got != want { |
| t.Errorf("len(bytes) = %d, want %d", got, want) |
| } |
| |
| // List objects in a public bucket. |
| iter := bkt.Objects(ctx, &Query{Prefix: landsatPrefix}) |
| gotCount := 0 |
| for { |
| _, err := iter.Next() |
| if err == iterator.Done { |
| break |
| } |
| if err != nil { |
| t.Fatal(err) |
| } |
| gotCount++ |
| } |
| if wantCount := 14; gotCount != wantCount { |
| t.Errorf("object count: got %d, want %d", gotCount, wantCount) |
| } |
| |
| errCode := func(err error) int { |
| var err2 *googleapi.Error |
| if ok := errors.As(err, &err2); !ok { |
| return -1 |
| } |
| return err2.Code |
| } |
| |
| // Reading from or writing to a non-public bucket fails. |
| c := testConfig(ctx, t) |
| defer c.Close() |
| nonPublicObj := client.Bucket(bucket).Object("noauth") |
| // XML API calls return 403 but the JSON API returns 401. Either is |
| // acceptable for reads. |
| _, err := readObject(ctx, nonPublicObj) |
| if got := errCode(err); got != 403 && got != 401 { |
| t.Errorf("got code %d; want %v\nerror: %v", got, "401 or 403", err) |
| } |
| err = writeObject(ctx, nonPublicObj, "text/plain", []byte("b")) |
| if got, want := errCode(err), 401; got != want { |
| t.Errorf("got code %d; want %d\nerror: %v", got, want, err) |
| } |
| }, option.WithoutAuthentication()) |
| } |
| |
| func TestIntegration_PublicObject(t *testing.T) { |
| multiTransportTest(context.Background(), t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| publicObj := client.Bucket(bucket).Object("public-obj" + uidSpaceObjects.New()) |
| contents := randomContents() |
| |
| w := publicObj.Retryer(WithPolicy(RetryAlways)).NewWriter(ctx) |
| if _, err := w.Write(contents); err != nil { |
| t.Fatalf("writer.Write: %v", err) |
| } |
| if err := w.Close(); err != nil { |
| t.Errorf("writer.Close: %v", err) |
| } |
| |
| // Set object ACL to public read. |
| if err := publicObj.ACL().Set(ctx, AllUsers, RoleReader); err != nil { |
| t.Fatalf("PutACLEntry failed with %v", err) |
| } |
| |
| // Create unauthenticated client. |
| publicClient, err := newTestClient(ctx, option.WithoutAuthentication()) |
| if err != nil { |
| t.Fatalf("newTestClient: %v", err) |
| } |
| |
| // Test can read public object. |
| publicObjUnauthenticated := publicClient.Bucket(bucket).Object(publicObj.ObjectName()) |
| data, err := readObject(context.Background(), publicObjUnauthenticated) |
| if err != nil { |
| t.Fatalf("readObject: %v", err) |
| } |
| |
| if !bytes.Equal(data, contents) { |
| t.Errorf("Public object's content: got %q, want %q", data, contents) |
| } |
| |
| // Test cannot write to read-only object without authentication. |
| wc := publicObjUnauthenticated.NewWriter(ctx) |
| if _, err := wc.Write([]byte("hello")); err != nil { |
| t.Errorf("Write unexpectedly failed with %v", err) |
| } |
| if err = wc.Close(); err == nil { |
| t.Error("Close expected an error, found none") |
| } |
| }) |
| } |
| |
| func TestIntegration_ReadCRC(t *testing.T) { |
| // Test that the checksum is handled correctly when reading files. |
| // For gzipped files, see https://github.com/GoogleCloudPlatform/google-cloud-dotnet/issues/1641. |
| ctx := skipJSONReads(skipGRPC("transcoding not supported"), "https://github.com/googleapis/google-cloud-go/issues/7786") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| const ( |
| // This is an uncompressed file. |
| // See https://cloud.google.com/storage/docs/public-datasets/landsat |
| uncompressedBucket = "gcp-public-data-landsat" |
| uncompressedObject = "LC08/01/001/002/LC08_L1GT_001002_20160817_20170322_01_T2/LC08_L1GT_001002_20160817_20170322_01_T2_ANG.txt" |
| |
| gzippedObject = "gzipped-text.txt" |
| ) |
| |
| h := testHelper{t} |
| |
| // Create gzipped object. |
| var buf bytes.Buffer |
| zw := gzip.NewWriter(&buf) |
| zw.Name = gzippedObject |
| if _, err := zw.Write([]byte("gzipped object data")); err != nil { |
| t.Fatalf("creating gzip: %v", err) |
| } |
| if err := zw.Close(); err != nil { |
| t.Fatalf("closing gzip writer: %v", err) |
| } |
| w := client.Bucket(bucket).Object(gzippedObject).NewWriter(ctx) |
| w.ContentEncoding = "gzip" |
| w.ContentType = "text/plain" |
| h.mustWrite(w, buf.Bytes()) |
| |
| for _, test := range []struct { |
| desc string |
| obj *ObjectHandle |
| offset, length int64 |
| readCompressed bool // don't decompress a gzipped file |
| |
| wantErr bool |
| wantCheck bool // Should Reader try to check the CRC? |
| }{ |
| { |
| desc: "uncompressed, entire file", |
| obj: client.Bucket(uncompressedBucket).Object(uncompressedObject), |
| offset: 0, |
| length: -1, |
| readCompressed: false, |
| wantCheck: true, |
| }, |
| { |
| desc: "uncompressed, entire file, don't decompress", |
| obj: client.Bucket(uncompressedBucket).Object(uncompressedObject), |
| offset: 0, |
| length: -1, |
| readCompressed: true, |
| wantCheck: true, |
| }, |
| { |
| desc: "uncompressed, suffix", |
| obj: client.Bucket(uncompressedBucket).Object(uncompressedObject), |
| offset: 1, |
| length: -1, |
| readCompressed: false, |
| wantCheck: false, |
| }, |
| { |
| desc: "uncompressed, prefix", |
| obj: client.Bucket(uncompressedBucket).Object(uncompressedObject), |
| offset: 0, |
| length: 18, |
| readCompressed: false, |
| wantCheck: false, |
| }, |
| { |
| // When a gzipped file is unzipped on read, we can't verify the checksum |
| // because it was computed against the zipped contents. We can detect |
| // this case using http.Response.Uncompressed. |
| desc: "compressed, entire file, unzipped", |
| obj: client.Bucket(bucket).Object(gzippedObject), |
| offset: 0, |
| length: -1, |
| readCompressed: false, |
| wantCheck: false, |
| }, |
| { |
| // When we read a gzipped file uncompressed, it's like reading a regular file: |
| // the served content and the CRC match. |
| desc: "compressed, entire file, read compressed", |
| obj: client.Bucket(bucket).Object(gzippedObject), |
| offset: 0, |
| length: -1, |
| readCompressed: true, |
| wantCheck: true, |
| }, |
| { |
| desc: "compressed, partial, server unzips", |
| obj: client.Bucket(bucket).Object(gzippedObject), |
| offset: 1, |
| length: 8, |
| readCompressed: false, |
| wantErr: true, // GCS can't serve part of a gzipped object |
| wantCheck: false, |
| }, |
| { |
| desc: "compressed, partial, read compressed", |
| obj: client.Bucket(bucket).Object(gzippedObject), |
| offset: 1, |
| length: 8, |
| readCompressed: true, |
| wantCheck: false, |
| }, |
| } { |
| t.Run(test.desc, func(t *testing.T) { |
| // Test both Read and WriteTo. |
| for _, c := range readCases { |
| t.Run(c.desc, func(t *testing.T) { |
| obj := test.obj.ReadCompressed(test.readCompressed) |
| r, err := obj.NewRangeReader(ctx, test.offset, test.length) |
| if err != nil { |
| if test.wantErr { |
| return |
| } |
| t.Fatalf("%s: %v", test.desc, err) |
| } |
| if got, want := r.checkCRC, test.wantCheck; got != want { |
| t.Errorf("%s, checkCRC: got %t, want %t", test.desc, got, want) |
| } |
| _, err = c.readFunc(r) |
| _ = r.Close() |
| if err != nil { |
| t.Fatalf("%s: %v", test.desc, err) |
| } |
| }) |
| |
| } |
| }) |
| } |
| }) |
| } |
| |
| func TestIntegration_CancelWrite(t *testing.T) { |
| // Verify that canceling the writer's context immediately stops uploading an object |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, bucket, _ string, client *Client) { |
| bkt := client.Bucket(bucket) |
| |
| cctx, cancel := context.WithCancel(ctx) |
| defer cancel() |
| obj := bkt.Object("cancel-write") |
| w := obj.NewWriter(cctx) |
| w.ChunkSize = googleapi.MinUploadChunkSize |
| buf := make([]byte, w.ChunkSize) |
| // Write the first chunk. This is read in its entirety before sending the request |
| // (see google.golang.org/api/gensupport.PrepareUpload), so we expect it to return |
| // without error. |
| _, err := w.Write(buf) |
| if err != nil { |
| t.Fatal(err) |
| } |
| // Now cancel the context. |
| cancel() |
| // The next Write should return context.Canceled. |
| _, err = w.Write(buf) |
| if !errors.Is(err, context.Canceled) { |
| t.Fatalf("got %v, wanted context.Canceled", err) |
| } |
| // The Close should too. |
| err = w.Close() |
| if !errors.Is(err, context.Canceled) { |
| t.Fatalf("got %v, wanted context.Canceled", err) |
| } |
| }) |
| } |
| |
| func TestIntegration_UpdateCORS(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| initialSettings := []CORS{ |
| { |
| MaxAge: time.Hour, |
| Methods: []string{"POST"}, |
| Origins: []string{"some-origin.com"}, |
| ResponseHeaders: []string{"foo-bar"}, |
| }, |
| } |
| |
| for _, test := range []struct { |
| desc string |
| input []CORS |
| want []CORS |
| }{ |
| { |
| desc: "set new values", |
| input: []CORS{ |
| { |
| MaxAge: time.Hour, |
| Methods: []string{"GET"}, |
| Origins: []string{"*"}, |
| ResponseHeaders: []string{"some-header"}, |
| }, |
| }, |
| want: []CORS{ |
| { |
| MaxAge: time.Hour, |
| Methods: []string{"GET"}, |
| Origins: []string{"*"}, |
| ResponseHeaders: []string{"some-header"}, |
| }, |
| }, |
| }, |
| { |
| desc: "set to empty to remove existing policies", |
| input: []CORS{}, |
| want: nil, |
| }, |
| { |
| desc: "do not set to keep existing policies", |
| input: nil, |
| want: []CORS{ |
| { |
| MaxAge: time.Hour, |
| Methods: []string{"POST"}, |
| Origins: []string{"some-origin.com"}, |
| ResponseHeaders: []string{"foo-bar"}, |
| }, |
| }, |
| }, |
| } { |
| t.Run(test.desc, func(t *testing.T) { |
| h := testHelper{t} |
| |
| bkt := client.Bucket(prefix + uidSpace.New()) |
| h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{CORS: initialSettings}) |
| defer h.mustDeleteBucket(bkt) |
| h.mustUpdateBucket(bkt, BucketAttrsToUpdate{CORS: test.input}, h.mustBucketAttrs(bkt).MetaGeneration) |
| attrs := h.mustBucketAttrs(bkt) |
| if diff := testutil.Diff(attrs.CORS, test.want); diff != "" { |
| t.Errorf("input: %v\ngot=-, want=+:\n%s", test.input, diff) |
| } |
| }) |
| } |
| }) |
| } |
| |
| func TestIntegration_UpdateDefaultEventBasedHold(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| h := testHelper{t} |
| |
| bkt := client.Bucket(prefix + uidSpace.New()) |
| h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{}) |
| defer h.mustDeleteBucket(bkt) |
| attrs := h.mustBucketAttrs(bkt) |
| if attrs.DefaultEventBasedHold != false { |
| t.Errorf("got=%v, want=%v", attrs.DefaultEventBasedHold, false) |
| } |
| |
| h.mustUpdateBucket(bkt, BucketAttrsToUpdate{DefaultEventBasedHold: true}, attrs.MetaGeneration) |
| attrs = h.mustBucketAttrs(bkt) |
| if attrs.DefaultEventBasedHold != true { |
| t.Errorf("got=%v, want=%v", attrs.DefaultEventBasedHold, true) |
| } |
| |
| // Omitting it should leave the value unchanged. |
| h.mustUpdateBucket(bkt, BucketAttrsToUpdate{RequesterPays: true}, attrs.MetaGeneration) |
| attrs = h.mustBucketAttrs(bkt) |
| if attrs.DefaultEventBasedHold != true { |
| t.Errorf("got=%v, want=%v", attrs.DefaultEventBasedHold, true) |
| } |
| }) |
| } |
| |
| func TestIntegration_UpdateEventBasedHold(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| h := testHelper{t} |
| |
| obj := client.Bucket(bucket).Object("some-obj") |
| h.mustWrite(obj.NewWriter(ctx), randomContents()) |
| |
| defer func() { |
| h.mustUpdateObject(obj, ObjectAttrsToUpdate{EventBasedHold: false}, h.mustObjectAttrs(obj).Metageneration) |
| h.mustDeleteObject(obj) |
| }() |
| |
| attrs := h.mustObjectAttrs(obj) |
| if attrs.EventBasedHold != false { |
| t.Fatalf("got=%v, want=%v", attrs.EventBasedHold, false) |
| } |
| |
| h.mustUpdateObject(obj, ObjectAttrsToUpdate{EventBasedHold: true}, attrs.Metageneration) |
| attrs = h.mustObjectAttrs(obj) |
| if attrs.EventBasedHold != true { |
| t.Fatalf("got=%v, want=%v", attrs.EventBasedHold, true) |
| } |
| |
| // Omitting it should leave the value unchanged. |
| h.mustUpdateObject(obj, ObjectAttrsToUpdate{ContentType: "foo"}, attrs.Metageneration) |
| attrs = h.mustObjectAttrs(obj) |
| if attrs.EventBasedHold != true { |
| t.Fatalf("got=%v, want=%v", attrs.EventBasedHold, true) |
| } |
| }) |
| } |
| |
| func TestIntegration_UpdateTemporaryHold(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| h := testHelper{t} |
| |
| obj := client.Bucket(bucket).Object("updatetemporaryhold-obj") |
| h.mustWrite(obj.NewWriter(ctx), randomContents()) |
| |
| defer func() { |
| h.mustUpdateObject(obj, ObjectAttrsToUpdate{TemporaryHold: false}, h.mustObjectAttrs(obj).Metageneration) |
| h.mustDeleteObject(obj) |
| }() |
| |
| attrs := h.mustObjectAttrs(obj) |
| if attrs.TemporaryHold != false { |
| t.Fatalf("got=%v, want=%v", attrs.TemporaryHold, false) |
| } |
| |
| h.mustUpdateObject(obj, ObjectAttrsToUpdate{TemporaryHold: true}, attrs.Metageneration) |
| attrs = h.mustObjectAttrs(obj) |
| if attrs.TemporaryHold != true { |
| t.Fatalf("got=%v, want=%v", attrs.TemporaryHold, true) |
| } |
| |
| // Omitting it should leave the value unchanged. |
| h.mustUpdateObject(obj, ObjectAttrsToUpdate{ContentType: "foo"}, attrs.Metageneration) |
| attrs = h.mustObjectAttrs(obj) |
| if attrs.TemporaryHold != true { |
| t.Fatalf("got=%v, want=%v", attrs.TemporaryHold, true) |
| } |
| }) |
| } |
| |
| func TestIntegration_UpdateRetentionExpirationTime(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| h := testHelper{t} |
| |
| bkt := client.Bucket(prefix + uidSpace.New()) |
| h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{RetentionPolicy: &RetentionPolicy{RetentionPeriod: time.Hour}}) |
| obj := bkt.Object("some-obj") |
| h.mustWrite(obj.NewWriter(ctx), randomContents()) |
| |
| defer func() { |
| t.Helper() |
| h.mustUpdateBucket(bkt, BucketAttrsToUpdate{RetentionPolicy: &RetentionPolicy{RetentionPeriod: 0}}, h.mustBucketAttrs(bkt).MetaGeneration) |
| |
| // RetentionPeriod of less than a day is explicitly called out |
| // as best effort and not guaranteed, so let's log problems deleting |
| // objects instead of failing. |
| if err := obj.Delete(context.Background()); err != nil { |
| t.Logf("object delete: %v", err) |
| } |
| if err := bkt.Delete(context.Background()); err != nil { |
| t.Logf("bucket delete: %v", err) |
| } |
| }() |
| |
| attrs := h.mustObjectAttrs(obj) |
| if attrs.RetentionExpirationTime == (time.Time{}) { |
| t.Fatalf("got=%v, wanted a non-zero value", attrs.RetentionExpirationTime) |
| } |
| }) |
| } |
| |
| func TestIntegration_CustomTime(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, bucket string, _ string, client *Client) { |
| h := testHelper{t} |
| |
| // Create object with CustomTime. |
| bkt := client.Bucket(bucket) |
| obj := bkt.Object("custom-time-obj") |
| w := obj.NewWriter(ctx) |
| ct := time.Date(2020, 8, 25, 12, 12, 12, 0, time.UTC) |
| w.ObjectAttrs.CustomTime = ct |
| h.mustWrite(w, randomContents()) |
| |
| // Validate that CustomTime has been set |
| checkCustomTime := func(want time.Time) error { |
| attrs, err := obj.Attrs(ctx) |
| if err != nil { |
| return fmt.Errorf("failed to get object attrs: %v", err) |
| } |
| if got := attrs.CustomTime; got != want { |
| return fmt.Errorf("CustomTime not set correctly: got %+v, want %+v", got, ct) |
| } |
| return nil |
| } |
| |
| if err := checkCustomTime(ct); err != nil { |
| t.Fatalf("checking CustomTime: %v", err) |
| } |
| |
| // Update CustomTime to the future should succeed. |
| laterTime := ct.Add(10 * time.Hour) |
| if _, err := obj.Update(ctx, ObjectAttrsToUpdate{CustomTime: laterTime}); err != nil { |
| t.Fatalf("updating CustomTime: %v", err) |
| } |
| |
| // Update CustomTime to the past should give error. |
| earlierTime := ct.Add(5 * time.Hour) |
| if _, err := obj.Update(ctx, ObjectAttrsToUpdate{CustomTime: earlierTime}); err == nil { |
| t.Fatalf("backdating CustomTime: expected error, got none") |
| } |
| |
| // Zero value for CustomTime should be ignored. Set TemporaryHold so that |
| // we don't send an empty update request, which is invalid for gRPC. |
| if _, err := obj.Update(ctx, ObjectAttrsToUpdate{TemporaryHold: false}); err != nil { |
| t.Fatalf("empty update: %v", err) |
| } |
| if err := checkCustomTime(laterTime); err != nil { |
| t.Fatalf("after sending zero value: %v", err) |
| } |
| }) |
| } |
| |
| func TestIntegration_UpdateRetentionPolicy(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| initial := &RetentionPolicy{RetentionPeriod: time.Minute} |
| |
| for _, test := range []struct { |
| desc string |
| input *RetentionPolicy |
| want *RetentionPolicy |
| }{ |
| { |
| desc: "update", |
| input: &RetentionPolicy{RetentionPeriod: time.Hour}, |
| want: &RetentionPolicy{RetentionPeriod: time.Hour}, |
| }, |
| { |
| desc: "update even with timestamp (EffectiveTime should be ignored)", |
| input: &RetentionPolicy{RetentionPeriod: time.Hour, EffectiveTime: time.Now()}, |
| want: &RetentionPolicy{RetentionPeriod: time.Hour}, |
| }, |
| { |
| desc: "remove", |
| input: &RetentionPolicy{}, |
| want: nil, |
| }, |
| { |
| desc: "remove even with timestamp (EffectiveTime should be ignored)", |
| input: &RetentionPolicy{EffectiveTime: time.Now().Add(time.Hour)}, |
| want: nil, |
| }, |
| { |
| desc: "ignore", |
| input: nil, |
| want: initial, |
| }, |
| } { |
| t.Run(test.desc, func(t *testing.T) { |
| h := testHelper{t} |
| bkt := client.Bucket(prefix + uidSpace.New()) |
| h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{RetentionPolicy: initial}) |
| defer h.mustDeleteBucket(bkt) |
| h.mustUpdateBucket(bkt, BucketAttrsToUpdate{RetentionPolicy: test.input}, h.mustBucketAttrs(bkt).MetaGeneration) |
| |
| attrs := h.mustBucketAttrs(bkt) |
| if attrs.RetentionPolicy != nil && attrs.RetentionPolicy.EffectiveTime.Unix() == 0 { |
| // Should be set by the server and parsed by the client |
| t.Fatal("EffectiveTime should be set, but it was not") |
| } |
| if diff := testutil.Diff(attrs.RetentionPolicy, test.want, cmpopts.IgnoreTypes(time.Time{})); diff != "" { |
| t.Errorf("input: %v\ngot=-, want=+:\n%s", test.input, diff) |
| } |
| }) |
| } |
| }) |
| } |
| |
| func TestIntegration_DeleteObjectInBucketWithRetentionPolicy(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| h := testHelper{t} |
| |
| bkt := client.Bucket(prefix + uidSpace.New()) |
| h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{RetentionPolicy: &RetentionPolicy{RetentionPeriod: 25 * time.Hour}}) |
| defer h.mustDeleteBucket(bkt) |
| |
| o := bkt.Object("some-object") |
| if err := writeObject(ctx, o, "text/plain", []byte("hello world")); err != nil { |
| t.Fatal(err) |
| } |
| |
| if err := o.Delete(ctx); err == nil { |
| t.Fatal("expected to err deleting an object in a bucket with retention period, but got nil") |
| } |
| |
| // Remove the retention period |
| h.mustUpdateBucket(bkt, BucketAttrsToUpdate{RetentionPolicy: &RetentionPolicy{}}, h.mustBucketAttrs(bkt).MetaGeneration) |
| |
| // Delete with retry, as bucket metadata changes |
| // can take some time to propagate. |
| retry := func(err error) bool { return err != nil } |
| ctx, cancel := context.WithTimeout(ctx, time.Second*10) |
| defer cancel() |
| |
| o = o.Retryer(WithErrorFunc(retry), WithPolicy(RetryAlways)) |
| if err := o.Delete(ctx); err != nil { |
| t.Fatalf("object delete: %v", err) |
| } |
| }) |
| } |
| |
| func TestIntegration_LockBucket(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| h := testHelper{t} |
| |
| bkt := client.Bucket(prefix + uidSpace.New()) |
| h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{RetentionPolicy: &RetentionPolicy{RetentionPeriod: time.Hour * 25}}) |
| attrs := h.mustBucketAttrs(bkt) |
| if attrs.RetentionPolicy.IsLocked { |
| t.Fatal("Expected bucket to begin unlocked, but it was not") |
| } |
| err := bkt.If(BucketConditions{MetagenerationMatch: attrs.MetaGeneration}).LockRetentionPolicy(ctx) |
| if err != nil { |
| t.Fatal("could not lock", err) |
| } |
| |
| attrs = h.mustBucketAttrs(bkt) |
| if !attrs.RetentionPolicy.IsLocked { |
| t.Fatal("Expected bucket to be locked, but it was not") |
| } |
| |
| _, err = bkt.Update(ctx, BucketAttrsToUpdate{RetentionPolicy: &RetentionPolicy{RetentionPeriod: time.Hour}}) |
| if err == nil { |
| t.Fatal("Expected error updating locked bucket, got nil") |
| } |
| }) |
| } |
| |
| func TestIntegration_LockBucket_MetagenerationRequired(t *testing.T) { |
| ctx := skipJSONReads(context.Background(), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| h := testHelper{t} |
| |
| bkt := client.Bucket(prefix + uidSpace.New()) |
| h.mustCreate(bkt, testutil.ProjID(), &BucketAttrs{ |
| RetentionPolicy: &RetentionPolicy{RetentionPeriod: time.Hour * 25}, |
| }) |
| err := bkt.LockRetentionPolicy(ctx) |
| if err == nil { |
| t.Fatal("expected error locking bucket without metageneration condition, got nil") |
| } |
| }) |
| } |
| |
| func TestIntegration_BucketObjectRetention(t *testing.T) { |
| ctx := skipJSONReads(skipGRPC("not yet available in gRPC - b/308194853"), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| setTrue, setFalse := true, false |
| |
| for _, test := range []struct { |
| desc string |
| enable *bool |
| wantRetentionMode string |
| }{ |
| { |
| desc: "ObjectRetentionMode is not enabled by default", |
| wantRetentionMode: "", |
| }, |
| { |
| desc: "Enable retention", |
| enable: &setTrue, |
| wantRetentionMode: "Enabled", |
| }, |
| { |
| desc: "Set object retention to false", |
| enable: &setFalse, |
| wantRetentionMode: "", |
| }, |
| } { |
| t.Run(test.desc, func(t *testing.T) { |
| b := client.Bucket(prefix + uidSpace.New()) |
| if test.enable != nil { |
| b = b.SetObjectRetention(*test.enable) |
| } |
| |
| err := b.Create(ctx, testutil.ProjID(), nil) |
| if err != nil { |
| t.Fatalf("error creating bucket: %v", err) |
| } |
| t.Cleanup(func() { b.Delete(ctx) }) |
| |
| attrs, err := b.Attrs(ctx) |
| if err != nil { |
| t.Fatalf("b.Attrs: %v", err) |
| } |
| if got, want := attrs.ObjectRetentionMode, test.wantRetentionMode; got != want { |
| t.Errorf("expected ObjectRetentionMode to be %q, got %q", want, got) |
| } |
| }) |
| } |
| }) |
| } |
| |
| func TestIntegration_ObjectRetention(t *testing.T) { |
| ctx := skipJSONReads(skipGRPC("not yet available in gRPC - b/308194853"), "no reads in test") |
| multiTransportTest(ctx, t, func(t *testing.T, ctx context.Context, _ string, prefix string, client *Client) { |
| h := testHelper{t} |
| |
| b := client.Bucket(prefix + uidSpace.New()).SetObjectRetention(true) |
| |
| if err := b.Create(ctx, testutil.ProjID(), nil); err != nil { |
| t.Fatalf("error creating bucket: %v", err) |
| } |
| t.Cleanup(func() { h.mustDeleteBucket(b) }) |
| |
| retentionUnlocked := &ObjectRetention{ |
| Mode: "Unlocked", |
| RetainUntil: time.Now().Add(time.Minute * 20).Truncate(time.Second), |
| } |
| retentionUnlockedExtended := &ObjectRetention{ |
| Mode: "Unlocked", |
| RetainUntil: time.Now().Add(time.Hour).Truncate(time.Second), |
| } |
| |
| // Create an object with future retain until time |
| o := b.Object("retention-on-create" + uidSpaceObjects.New()) |
| w := o.NewWriter(ctx) |
| w.Retention = retentionUnlocked |
|