Skip to content

Commit

Permalink
Add oc image append which adds layers to a schema1/2 image
Browse files Browse the repository at this point in the history
This command can take zero or more gzipped layer tars (in Docker layer
format) and append them to an existing image or a scratch image and then
push the new image to a registry. Layers in the existing image are
pushed as well. The caller can mutate the provided config as it goes.
  • Loading branch information
smarterclayton committed Jun 18, 2018
1 parent cdc531a commit aa7a362
Show file tree
Hide file tree
Showing 10 changed files with 1,263 additions and 21 deletions.
24 changes: 24 additions & 0 deletions pkg/image/apis/image/docker10/conversion.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
package docker10

// Convert_DockerV1CompatibilityImage_to_DockerImageConfig takes a Docker registry digest
// (schema 2.1) and converts it to the external API version of Image.
func Convert_DockerV1CompatibilityImage_to_DockerImageConfig(in *DockerV1CompatibilityImage, out *DockerImageConfig) error {
*out = DockerImageConfig{
ID: in.ID,
Parent: in.Parent,
Comment: in.Comment,
Created: in.Created,
Container: in.Container,
DockerVersion: in.DockerVersion,
Author: in.Author,
Architecture: in.Architecture,
Size: in.Size,
OS: "linux",
ContainerConfig: in.ContainerConfig,
}
if in.Config != nil {
out.Config = &DockerConfig{}
*out.Config = *in.Config
}
return nil
}
356 changes: 356 additions & 0 deletions pkg/image/dockerlayer/add/add.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,356 @@
package add

import (
"compress/gzip"
"context"
"encoding/json"
"fmt"
"io"
"runtime"
"time"

"github.com/docker/distribution"
"github.com/docker/distribution/manifest/schema2"
digest "github.com/opencontainers/go-digest"

"github.com/openshift/origin/pkg/image/apis/image/docker10"
"github.com/openshift/origin/pkg/image/dockerlayer"
)

// get base manifest
// check that I can access base layers
// find the input file (assume I can stream)
// start a streaming upload of the layer to the remote registry, while calculating digests
// get back the final digest
// build the new image manifest and config.json
// upload config.json
// upload the rest of the layers
// tag the image

const (
// dockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers.
dockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip"
// dockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs.
dockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
)

// DigestCopy reads all of src into dst, where src is a gzipped stream. It will return the
// sha256 sum of the underlying content (the layerDigest) and the sha256 sum of the
// tar archive (the blobDigest) or an error. If the gzip layer has a modification time
// it will be returned.
// TODO: use configurable digests
func DigestCopy(dst io.ReaderFrom, src io.Reader) (layerDigest, blobDigest digest.Digest, modTime *time.Time, size int64, err error) {
algo := digest.Canonical
// calculate the blob digest as the sha256 sum of the uploaded contents
blobhash := algo.Hash()
// calculate the diffID as the sha256 sum of the layer contents
pr, pw := io.Pipe()
layerhash := algo.Hash()
ch := make(chan error)
go func() {
defer close(ch)
gr, err := gzip.NewReader(pr)
if err != nil {
ch <- fmt.Errorf("unable to create gzip reader layer upload: %v", err)
return
}
if !gr.Header.ModTime.IsZero() {
modTime = &gr.Header.ModTime
}
_, err = io.Copy(layerhash, gr)
ch <- err
}()

n, err := dst.ReadFrom(io.TeeReader(src, io.MultiWriter(blobhash, pw)))
if err != nil {
return "", "", nil, 0, fmt.Errorf("unable to upload new layer (%d): %v", n, err)
}
if err := pw.Close(); err != nil {
return "", "", nil, 0, fmt.Errorf("unable to complete writing diffID: %v", err)
}
if err := <-ch; err != nil {
return "", "", nil, 0, fmt.Errorf("unable to calculate layer diffID: %v", err)
}

layerDigest = digest.NewDigestFromBytes(algo, layerhash.Sum(make([]byte, 0, layerhash.Size())))
blobDigest = digest.NewDigestFromBytes(algo, blobhash.Sum(make([]byte, 0, blobhash.Size())))
return layerDigest, blobDigest, modTime, n, nil
}

func NewEmptyConfig() *docker10.DockerImageConfig {
config := &docker10.DockerImageConfig{
DockerVersion: "",
// Created must be non-zero
Created: (time.Time{}).Add(1 * time.Second),
OS: runtime.GOOS,
Architecture: runtime.GOARCH,
}
return config
}

func AddScratchLayerToConfig(config *docker10.DockerImageConfig) distribution.Descriptor {
layer := distribution.Descriptor{
MediaType: dockerV2Schema2LayerMediaType,
Digest: digest.Digest(dockerlayer.GzippedEmptyLayerDigest),
Size: int64(len(dockerlayer.GzippedEmptyLayer)),
}
AddLayerToConfig(config, layer, dockerlayer.EmptyLayerDiffID)
return layer
}

func AddLayerToConfig(config *docker10.DockerImageConfig, layer distribution.Descriptor, diffID string) {
if config.RootFS == nil {
config.RootFS = &docker10.DockerConfigRootFS{Type: "layers"}
}
config.RootFS.DiffIDs = append(config.RootFS.DiffIDs, diffID)
config.Size += layer.Size
}

func UploadSchema2Config(ctx context.Context, blobs distribution.BlobService, config *docker10.DockerImageConfig, layers []distribution.Descriptor) (*schema2.DeserializedManifest, error) {
// ensure the image size is correct before persisting
config.Size = 0
for _, layer := range layers {
config.Size += layer.Size
}
configJSON, err := json.Marshal(config)
if err != nil {
return nil, err
}
return putSchema2ImageConfig(ctx, blobs, dockerV2Schema2ConfigMediaType, configJSON, layers)
}

// putSchema2ImageConfig uploads the provided configJSON to the blob store and returns the generated manifest
// for the requested image.
func putSchema2ImageConfig(ctx context.Context, blobs distribution.BlobService, mediaType string, configJSON []byte, layers []distribution.Descriptor) (*schema2.DeserializedManifest, error) {
b := schema2.NewManifestBuilder(blobs, mediaType, configJSON)
for _, layer := range layers {
if err := b.AppendReference(layer); err != nil {
return nil, err
}
}
m, err := b.Build(ctx)
if err != nil {
return nil, err
}
manifest, ok := m.(*schema2.DeserializedManifest)
if !ok {
return nil, fmt.Errorf("unable to turn %T into a DeserializedManifest, unable to store image", m)
}
return manifest, nil
}

/*
func (r *InstantiateREST) completeInstantiate(ctx apirequest.Context, tag string, target *imageapi.ImageStream, imageInstantiate *imageapi.ImageStreamTagInstantiate, layerBody io.Reader, mediaType string) (runtime.Object, error) {
// TODO: load this from the default registry function
insecure := true
ref, u, err := registryTarget(target, r.defaultRegistry)
if err != nil {
return nil, err
}
// verify the user has access to the From image, if any is specified
baseImageName, baseImageRepository, err := r.resolveTagInstantiateToImage(ctx, target, imageInstantiate)
if err != nil {
return nil, err
}
// no layer, so we load our base image (if necessary)
var created time.Time
var baseImage *imageapi.Image
var sourceRepo distribution.Repository
if len(baseImageName) > 0 {
image, err := r.imageRegistry.GetImage(ctx, baseImageName, &metav1.GetOptions{})
if err != nil {
return nil, err
}
baseImage = image
sourceRepo, err = r.repository.Repository(ctx, u, baseImageRepository, insecure)
if err != nil {
return nil, errors.NewInternalError(fmt.Errorf("could not contact integrated registry: %v", err))
}
glog.V(4).Infof("Using base image for instantiate of tag %s: %s from %s", imageInstantiate.Name, baseImageName, baseImageRepository)
created = image.DockerImageMetadata.Created.Time
}
imageRepository := imageapi.DockerImageReference{Namespace: ref.Namespace, Name: ref.Name}.Exact()
repo, err := r.repository.Repository(ctx, u, imageRepository, insecure)
if err != nil {
return nil, errors.NewInternalError(fmt.Errorf("could not contact integrated registry: %v", err))
}
var imageLayer *imageapi.ImageLayer
var imageLayerDiffID digest.Digest
if layerBody != nil {
desc, diffID, modTime, err := uploadLayer(ctx, layerBody, repo, mediaType)
if err != nil {
return nil, errors.NewInternalError(fmt.Errorf("unable to upload new image layer: %v", err))
}
imageLayer = &imageapi.ImageLayer{
Name: desc.Digest.String(),
LayerSize: desc.Size,
MediaType: mediaType,
}
imageLayerDiffID = diffID
if modTime != nil && created.Before(*modTime) {
created = *modTime
}
}
target, image, err := instantiateImage(
ctx, r.gr,
repo, sourceRepo, r.imageStreamRegistry, r.imageRegistry,
target, baseImage, imageInstantiate, created,
imageLayer, imageLayerDiffID,
*ref,
)
if err != nil {
glog.V(4).Infof("Failed cloning into tag %s: %v", imageInstantiate.Name, err)
return nil, err
}
return newISTag(tag, target, image, false)
}
// instantiateImage assembles the new image, saves it to the registry, then saves an image and tags the
// image stream.
func instantiateImage(
ctx apirequest.Context, gr schema.GroupResource,
repo, sourceRepo distribution.Repository,
base *docker10.DockerImageConfig,
layer *imageapi.ImageLayer, diffID digest.Digest,
imageReference imageapi.DockerImageReference,
) (*imageapi.ImageStream, *imageapi.Image, error) {
// create a new config.json representing the image
imageConfig := *base
imageConfig.Size = 0
imageConfig.RootFS = &docker10.DockerConfigRootFS{Type: "layers"},
// TODO: resolve
// History []DockerConfigHistory
// OSVersion string
// OSFeatures []string
}
layers, err := calculateUpdatedImageConfig(ctx, &imageConfig, base, layer, diffID, sourceRepo)
if err != nil {
return nil, nil, errors.NewInternalError(fmt.Errorf("unable to generate a new image configuration: %v", err))
}
configJSON, err := json.Marshal(&imageConfig)
if err != nil {
return nil, nil, errors.NewInternalError(fmt.Errorf("unable to marshal the new image config.json: %v", err))
}
// generate a manifest for that config.json
glog.V(5).Infof("Saving layer %s onto %q with configJSON:\n%s", diffID, imageInstantiate.Name, configJSON)
blobs := repo.Blobs(ctx)
image, err := importer.SerializeImageAsSchema2Manifest(ctx, blobs, configJSON, layers)
if err != nil {
return nil, nil, errors.NewInternalError(fmt.Errorf("unable to generate a new image manifest: %v", err))
}
// create the manifest as an image
imageReference.ID = image.Name
image.DockerImageReference = imageReference.Exact()
if err := images.CreateImage(ctx, image); err != nil && !errors.IsAlreadyExists(err) {
return nil, nil, err
}
return stream, image, err
}
// calculateUpdatedImageConfig generates a new image config.json with the provided info.
func calculateUpdatedImageConfig(
ctx apirequest.Context,
imageConfig *imageapi.DockerImageConfig,
base *imageapi.Image,
layer *imageapi.ImageLayer,
diffID digest.Digest,
sourceRepo distribution.Repository,
) ([]imageapi.ImageLayer, error) {
var layers []imageapi.ImageLayer
// initialize with the base
if base != nil {
layers = append(layers, base.DockerImageLayers...)
for i := range layers {
imageConfig.Size += layers[i].LayerSize
}
// need to look up the rootFS
manifests, err := sourceRepo.Manifests(ctx)
if err != nil {
return nil, err
}
m, err := manifests.Get(ctx, digest.Digest(base.Name))
if err != nil {
return nil, err
}
var contents []byte
switch t := m.(type) {
case *schema2.DeserializedManifest:
if t.Config.MediaType != manifest.DockerV2Schema2ConfigMediaType {
return nil, fmt.Errorf("unrecognized config: %s", t.Config.MediaType)
}
contents, err = sourceRepo.Blobs(ctx).Get(ctx, t.Config.Digest)
if err != nil {
return nil, fmt.Errorf("unreadable config %s: %v", t.Config.Digest, err)
}
existingImageConfig := &imageapi.DockerImageConfig{}
if err := json.Unmarshal(contents, existingImageConfig); err != nil {
return nil, fmt.Errorf("manifest unreadable %s: %v", base.Name, err)
}
if existingImageConfig.RootFS == nil || existingImageConfig.RootFS.Type != "layers" {
return nil, fmt.Errorf("unable to find rootFs description from base image %s", base.Name)
}
imageConfig.OS = existingImageConfig.OS
imageConfig.Architecture = existingImageConfig.Architecture
imageConfig.OSFeatures = existingImageConfig.OSFeatures
imageConfig.OSVersion = existingImageConfig.OSVersion
imageConfig.RootFS.DiffIDs = existingImageConfig.RootFS.DiffIDs
case *schema1.SignedManifest:
digest := digest.FromBytes(t.Canonical)
contents, err = sourceRepo.Blobs(ctx).Get(ctx, digest)
if err != nil {
return nil, fmt.Errorf("unreadable config %s: %v", digest, err)
}
for _, layer := range t.FSLayers {
imageConfig.RootFS.DiffIDs = append(imageConfig.RootFS.DiffIDs, layer.BlobSum.String())
}
default:
return nil, fmt.Errorf("unrecognized manifest: %T", m)
}
}
// add the optional layer if provided
if layer != nil {
// the layer goes at the front - the most recent image is always first
layers = append(layers, *layer)
imageConfig.Size += layer.LayerSize
imageConfig.RootFS.DiffIDs = append(imageConfig.RootFS.DiffIDs, diffID.String())
}
// add the scratch layer in if no other layers exist
if len(layers) == 0 {
layers = append(layers, imageapi.ImageLayer{
Name: dockerlayer.GzippedEmptyLayerDigest.String(),
LayerSize: int64(len(dockerlayer.GzippedEmptyLayer)),
MediaType: manifest.DockerV2Schema2LayerMediaType,
})
imageConfig.RootFS.DiffIDs = append(imageConfig.RootFS.DiffIDs, dockerlayer.EmptyLayerDiffID.String())
imageConfig.Size += layers[0].LayerSize
}
// the metav1 serialization of zero is not parseable by the Docker daemon, therefore
// we must store a zero+1 value
if imageConfig.Created.IsZero() {
imageConfig.Created = metav1.Time{imageConfig.Created.Add(1 * time.Second)}
}
return layers, nil
}
*/
Loading

0 comments on commit aa7a362

Please sign in to comment.