[artifactory] Special case disk.raw uploads
Artifacts named disk.raw are Google Compute images, and unfortunately
must be both tarred and gzipped to match the expected GCE format.
Bug: 9127
Change-Id: Ibcfc9adc11e3781533d14d52bd38e15bfa843b45
Reviewed-on: https://fuchsia-review.googlesource.com/c/fuchsia/+/442795
Commit-Queue: Anirudh Mathukumilli <rudymathu@google.com>
Reviewed-by: Ina Huh <ihuh@google.com>
Reviewed-by: Anthony Fandrianto <atyfto@google.com>
Testability-Review: Ina Huh <ihuh@google.com>
diff --git a/tools/artifactory/cmd/up.go b/tools/artifactory/cmd/up.go
index 5a74a81..7b47e9a 100644
--- a/tools/artifactory/cmd/up.go
+++ b/tools/artifactory/cmd/up.go
@@ -5,6 +5,7 @@
package main
import (
+ "archive/tar"
"bytes"
"compress/gzip"
"context"
@@ -228,7 +229,10 @@
}
// Sign the images for release builds.
- images := artifactory.ImageUploads(m, path.Join(buildsUUIDDir, imageDirName))
+ images, err := artifactory.ImageUploads(m, path.Join(buildsUUIDDir, imageDirName))
+ if err != nil {
+ return err
+ }
if pkey != nil {
images, err = artifactory.Sign(images, pkey)
if err != nil {
@@ -322,7 +326,7 @@
// Write writes the content of a reader to a sink object at the given name.
// If an object at that name does not exists, it will be created; else it
// will be overwritten.
- write(ctx context.Context, name string, reader io.Reader, compress bool, metadata map[string]string) error
+ write(ctx context.Context, upload *artifactory.Upload) error
}
// CloudSink is a GCS-backed data sink.
@@ -366,19 +370,30 @@
return n, err
}
-func (s *cloudSink) write(ctx context.Context, name string, reader io.Reader, compress bool, metadata map[string]string) error {
- obj := s.bucket.Object(name)
+func (s *cloudSink) write(ctx context.Context, upload *artifactory.Upload) error {
+ var reader io.Reader
+ if upload.Source != "" {
+ f, err := os.Open(upload.Source)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ reader = f
+ } else {
+ reader = bytes.NewBuffer(upload.Contents)
+ }
+ obj := s.bucket.Object(upload.Destination)
// Setting timeouts to fail fast on hung connections.
tctx, cancel := context.WithTimeout(ctx, perFileUploadTimeout)
defer cancel()
sw := obj.If(storage.Conditions{DoesNotExist: true}).NewWriter(tctx)
sw.ChunkSize = chunkSize
sw.ContentType = "application/octet-stream"
- if compress {
+ if upload.Compress {
sw.ContentEncoding = "gzip"
}
- if metadata != nil {
- sw.Metadata = metadata
+ if upload.Metadata != nil {
+ sw.Metadata = upload.Metadata
}
// We optionally compress on the fly, and calculate the MD5 on the
@@ -390,10 +405,19 @@
// Note that a gzip compressor would need to be closed before the storage
// writer that it wraps is.
h := &hasher{md5.New(), sw}
- var writeErr, zipErr error
- if compress {
+ var writeErr, tarErr, zipErr error
+ if upload.Compress {
gzw := gzip.NewWriter(h)
- _, writeErr = io.Copy(gzw, reader)
+ if upload.TarHeader != nil {
+ tw := tar.NewWriter(gzw)
+ writeErr = tw.WriteHeader(upload.TarHeader)
+ if writeErr == nil {
+ _, writeErr = io.Copy(tw, reader)
+ }
+ tarErr = tw.Close()
+ } else {
+ _, writeErr = io.Copy(gzw, reader)
+ }
zipErr = gzw.Close()
} else {
_, writeErr = io.Copy(h, reader)
@@ -405,12 +429,15 @@
// Note: consider an errorsmisc.FirstNonNil() helper if see this logic again.
err := writeErr
if err == nil {
+ err = tarErr
+ }
+ if err == nil {
err = zipErr
}
if err == nil {
err = closeErr
}
- if err = checkGCSErr(ctx, err, name); err != nil {
+ if err = checkGCSErr(ctx, err, upload.Destination); err != nil {
return err
}
@@ -424,20 +451,20 @@
for {
attrs, err := obj.Attrs(ctx)
if err != nil {
- return fmt.Errorf("failed to confirm MD5 for %s due to: %w", name, err)
+ return fmt.Errorf("failed to confirm MD5 for %s due to: %w", upload.Destination, err)
}
if len(attrs.MD5) == 0 {
time.Sleep(t)
if t += t / 2; t > max {
t = max
}
- logger.Debugf(ctx, "waiting for MD5 for %s", name)
+ logger.Debugf(ctx, "waiting for MD5 for %s", upload.Destination)
continue
}
if !bytes.Equal(attrs.MD5, d) {
- return fmt.Errorf("MD5 mismatch for %s", name)
+ return fmt.Errorf("MD5 mismatch for %s", upload.Destination)
}
- logger.Infof(ctx, "Uploaded: %s", name)
+ logger.Infof(ctx, "Uploaded: %s", upload.Destination)
break
}
return nil
@@ -565,18 +592,7 @@
logger.Debugf(ctx, "object %q: attempting creation", upload.Destination)
if err := retry.Retry(ctx, retry.WithMaxAttempts(retry.NewConstantBackoff(uploadRetryBackoff), maxUploadAttempts), func() error {
- var src io.Reader
- if upload.Source != "" {
- f, err := os.Open(upload.Source)
- if err != nil {
- return err
- }
- defer f.Close()
- src = f
- } else {
- src = bytes.NewBuffer(upload.Contents)
- }
- if err := dest.write(ctx, upload.Destination, src, upload.Compress, upload.Metadata); err != nil {
+ if err := dest.write(ctx, &upload); err != nil {
return fmt.Errorf("%s: %w", upload.Destination, err)
}
return nil
diff --git a/tools/artifactory/cmd/up_test.go b/tools/artifactory/cmd/up_test.go
index f251f7b..1ba1793 100644
--- a/tools/artifactory/cmd/up_test.go
+++ b/tools/artifactory/cmd/up_test.go
@@ -5,6 +5,7 @@
package main
import (
+ "bytes"
"context"
"io"
"io/ioutil"
@@ -44,14 +45,25 @@
return true, nil
}
-func (s *memSink) write(ctx context.Context, name string, r io.Reader, _ bool, _ map[string]string) error {
+func (s *memSink) write(ctx context.Context, upload *artifactory.Upload) error {
s.mutex.Lock()
defer s.mutex.Unlock()
- content, err := ioutil.ReadAll(r)
+ var reader io.Reader
+ if upload.Source != "" {
+ f, err := os.Open(upload.Source)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ reader = f
+ } else {
+ reader = bytes.NewBuffer(upload.Contents)
+ }
+ content, err := ioutil.ReadAll(reader)
if err != nil {
return err
}
- s.contents[name] = content
+ s.contents[upload.Destination] = content
return nil
}
diff --git a/tools/artifactory/images.go b/tools/artifactory/images.go
index a2896b0..cf1de39 100644
--- a/tools/artifactory/images.go
+++ b/tools/artifactory/images.go
@@ -5,19 +5,32 @@
package artifactory
import (
+ "archive/tar"
+ "log"
+ "os"
"path"
"path/filepath"
"go.fuchsia.dev/fuchsia/tools/build"
)
+const (
+ // uefiImageName is the canonical name of an x64 UEFI image in the
+ // manifest.
+ uefiImageName = "uefi-disk"
+ // gceUploadName is the canonical name of the uploaded GCE image.
+ gceUploadName = "disk.tar.gz"
+ // gceImageName is the canonical expected name of a source image in GCE.
+ gceImageName = "disk.raw"
+)
+
// ImageUploads parses the image manifest located in the build and returns a
// list of Uploads for the images used for testing.
-func ImageUploads(mods *build.Modules, namespace string) []Upload {
+func ImageUploads(mods *build.Modules, namespace string) ([]Upload, error) {
return imageUploads(mods, namespace)
}
-func imageUploads(mods imgModules, namespace string) []Upload {
+func imageUploads(mods imgModules, namespace string) ([]Upload, error) {
manifestName := filepath.Base(mods.ImageManifest())
files := []Upload{
@@ -31,15 +44,36 @@
seen := make(map[string]struct{})
for _, img := range mods.Images() {
if _, ok := seen[img.Path]; !ok {
- files = append(files, Upload{
- Source: filepath.Join(mods.BuildDir(), img.Path),
- Destination: path.Join(namespace, img.Path),
- Compress: true,
- })
+ if img.Name == uefiImageName {
+ srcPath := filepath.Join(mods.BuildDir(), img.Path)
+ info, err := os.Stat(srcPath)
+ if err != nil {
+ log.Printf("failed to stat gce image on disk: %s", err)
+ continue
+ }
+ dest := filepath.Join(filepath.Dir(img.Path), gceUploadName)
+ files = append(files, Upload{
+ Source: srcPath,
+ Destination: path.Join(namespace, dest),
+ Compress: true,
+ TarHeader: &tar.Header{
+ Format: tar.FormatGNU,
+ Name: gceImageName,
+ Mode: 0666,
+ Size: info.Size(),
+ },
+ })
+ } else {
+ files = append(files, Upload{
+ Source: filepath.Join(mods.BuildDir(), img.Path),
+ Destination: path.Join(namespace, img.Path),
+ Compress: true,
+ })
+ }
seen[img.Path] = struct{}{}
}
}
- return files
+ return files, nil
}
type imgModules interface {
diff --git a/tools/artifactory/images_test.go b/tools/artifactory/images_test.go
index 0e47e33..de8800c 100644
--- a/tools/artifactory/images_test.go
+++ b/tools/artifactory/images_test.go
@@ -5,6 +5,10 @@
package artifactory
import (
+ "archive/tar"
+ "io"
+ "io/ioutil"
+ "os"
"path/filepath"
"reflect"
"testing"
@@ -14,11 +18,12 @@
// Implements imgModules
type mockModules struct {
- imgs []build.Image
+ imgs []build.Image
+ buildDir string
}
func (m mockModules) BuildDir() string {
- return "BUILD_DIR"
+ return m.buildDir
}
func (m mockModules) ImageManifest() string {
@@ -30,7 +35,29 @@
}
func TestImageUploads(t *testing.T) {
+ // Create a temporary disk.raw image.
+ dir, err := ioutil.TempDir("", "testBuildDir")
+ if err != nil {
+ t.Fatalf("failed to create fake build dir: %s", err)
+ }
+ defer os.RemoveAll(dir)
+ f, err := ioutil.TempFile(dir, "disk.raw")
+ if err != nil {
+ t.Fatalf("failed to create fake disk.raw: %s", err)
+ }
+ defer f.Close()
+ if _, err := io.WriteString(f, "Hello World!"); err != nil {
+ t.Fatalf("failed to write to fake disk.raw file: %s", err)
+ }
+ if err := f.Sync(); err != nil {
+ t.Fatalf("failed to sync fake disk.raw: %s", err)
+ }
+ info, err := f.Stat()
+ if err != nil {
+ t.Fatalf("failed to get file info for fake disk.raw: %s", err)
+ }
m := &mockModules{
+ buildDir: dir,
imgs: []build.Image{
{
PaveArgs: []string{"--bootloader"},
@@ -61,36 +88,54 @@
Path: "qemu-kernel.bin",
Type: "kernel",
},
+ {
+ Name: "uefi-disk",
+ Path: filepath.Base(f.Name()),
+ Type: "blk",
+ },
},
}
-
expected := []Upload{
{
Source: "BUILD_DIR/IMAGE_MANIFEST",
Destination: "namespace/IMAGE_MANIFEST",
},
{
- Source: filepath.Join("BUILD_DIR", "bootloader"),
+ Source: filepath.Join(dir, "bootloader"),
Destination: "namespace/bootloader",
Compress: true,
},
{
- Source: filepath.Join("BUILD_DIR", "zedboot.zbi"),
+ Source: filepath.Join(dir, "zedboot.zbi"),
Destination: "namespace/zedboot.zbi",
Compress: true,
},
{
- Source: filepath.Join("BUILD_DIR", "fuchsia.zbi"),
+ Source: filepath.Join(dir, "fuchsia.zbi"),
Destination: "namespace/fuchsia.zbi",
Compress: true,
},
{
- Source: filepath.Join("BUILD_DIR", "qemu-kernel.bin"),
+ Source: filepath.Join(dir, "qemu-kernel.bin"),
Destination: "namespace/qemu-kernel.bin",
Compress: true,
},
+ {
+ Source: f.Name(),
+ Destination: filepath.Join("namespace", gceUploadName),
+ Compress: true,
+ TarHeader: &tar.Header{
+ Format: tar.FormatGNU,
+ Name: gceImageName,
+ Mode: 0666,
+ Size: info.Size(),
+ },
+ },
}
- actual := imageUploads(m, "namespace")
+ actual, err := imageUploads(m, "namespace")
+ if err != nil {
+ t.Fatalf("imageUploads failed: %s", err)
+ }
if !reflect.DeepEqual(actual, expected) {
t.Fatalf("unexpected image uploads:\nexpected: %v\nactual: %v\n", expected, actual)
}
diff --git a/tools/artifactory/upload.go b/tools/artifactory/upload.go
index cf3593f..077d60e 100644
--- a/tools/artifactory/upload.go
+++ b/tools/artifactory/upload.go
@@ -4,6 +4,10 @@
package artifactory
+import (
+ "archive/tar"
+)
+
// Upload is a struct that contains source and destination paths to files to
// upload to GCS.
type Upload struct {
@@ -31,4 +35,8 @@
// Metadata contains the metadata to be uploaded with the file.
Metadata map[string]string
+
+ // TarHeader tells whether or not to compress with tar and contains the
+ // associated header.
+ TarHeader *tar.Header
}