зеркало из https://github.com/microsoft/docker.git
Change push to use manifest builder
Currently this always uses the schema1 manifest builder. Later, it will be changed to attempt schema2 first, and fall back when necessary. Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
This commit is contained in:
Родитель
c168a0059f
Коммит
f33fa1b8d3
|
@ -7,7 +7,6 @@ import (
|
|||
"io"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
"github.com/docker/docker/image"
|
||||
|
@ -77,7 +76,6 @@ func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *reg
|
|||
endpoint: endpoint,
|
||||
repoInfo: repoInfo,
|
||||
config: imagePushConfig,
|
||||
layersPushed: pushMap{layersPushed: make(map[digest.Digest]bool)},
|
||||
}, nil
|
||||
case registry.APIVersion1:
|
||||
return &v1Pusher{
|
||||
|
|
|
@ -1,22 +1,18 @@
|
|||
package distribution
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/distribution/manifest/schema2"
|
||||
"github.com/docker/distribution/registry/client"
|
||||
"github.com/docker/docker/distribution/metadata"
|
||||
"github.com/docker/docker/distribution/xfer"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/image/v1"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
|
@ -43,31 +39,34 @@ type v2Pusher struct {
|
|||
config *ImagePushConfig
|
||||
repo distribution.Repository
|
||||
|
||||
// pushState is state built by the Download functions.
|
||||
pushState pushState
|
||||
}
|
||||
|
||||
type pushState struct {
|
||||
sync.Mutex
|
||||
// remoteLayers is the set of layers known to exist on the remote side.
|
||||
// This avoids redundant queries when pushing multiple tags that
|
||||
// involve the same layers. It is also used to fill in digest and size
|
||||
// information when building the manifest.
|
||||
remoteLayers map[layer.DiffID]distribution.Descriptor
|
||||
// confirmedV2 is set to true if we confirm we're talking to a v2
|
||||
// registry. This is used to limit fallbacks to the v1 protocol.
|
||||
confirmedV2 bool
|
||||
|
||||
// layersPushed is the set of layers known to exist on the remote side.
|
||||
// This avoids redundant queries when pushing multiple tags that
|
||||
// involve the same layers.
|
||||
layersPushed pushMap
|
||||
}
|
||||
|
||||
type pushMap struct {
|
||||
sync.Mutex
|
||||
layersPushed map[digest.Digest]bool
|
||||
}
|
||||
|
||||
func (p *v2Pusher) Push(ctx context.Context) (err error) {
|
||||
p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
|
||||
p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor)
|
||||
|
||||
p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
|
||||
if err != nil {
|
||||
logrus.Debugf("Error getting v2 registry: %v", err)
|
||||
return fallbackError{err: err, confirmedV2: p.confirmedV2}
|
||||
return fallbackError{err: err, confirmedV2: p.pushState.confirmedV2}
|
||||
}
|
||||
|
||||
if err = p.pushV2Repository(ctx); err != nil {
|
||||
if registry.ContinueOnError(err) {
|
||||
return fallbackError{err: err, confirmedV2: p.confirmedV2}
|
||||
return fallbackError{err: err, confirmedV2: p.pushState.confirmedV2}
|
||||
}
|
||||
}
|
||||
return err
|
||||
|
@ -134,18 +133,7 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, association reference.Associat
|
|||
descriptorTemplate := v2PushDescriptor{
|
||||
blobSumService: p.blobSumService,
|
||||
repo: p.repo,
|
||||
layersPushed: &p.layersPushed,
|
||||
confirmedV2: &p.confirmedV2,
|
||||
}
|
||||
|
||||
// Push empty layer if necessary
|
||||
for _, h := range img.History {
|
||||
if h.EmptyLayer {
|
||||
descriptor := descriptorTemplate
|
||||
descriptor.layer = layer.EmptyLayer
|
||||
descriptors = []xfer.UploadDescriptor{&descriptor}
|
||||
break
|
||||
}
|
||||
pushState: &p.pushState,
|
||||
}
|
||||
|
||||
// Loop bounds condition is to avoid pushing the base layer on Windows.
|
||||
|
@ -157,8 +145,7 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, association reference.Associat
|
|||
l = l.Parent()
|
||||
}
|
||||
|
||||
fsLayers, err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput)
|
||||
if err != nil {
|
||||
if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -166,18 +153,22 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, association reference.Associat
|
|||
if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
m, err := CreateV2Manifest(p.repo.Name(), tag, img, fsLayers)
|
||||
builder := schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, p.repo.Name(), tag, img.RawJSON())
|
||||
|
||||
// descriptors is in reverse order; iterate backwards to get references
|
||||
// appended in the right order.
|
||||
for i := len(descriptors) - 1; i >= 0; i-- {
|
||||
if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
manifest, err := builder.Build(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("Signed manifest for %s using daemon's key: %s", ref.String(), p.config.TrustKey.KeyID())
|
||||
signed, err := schema1.Sign(m, p.config.TrustKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
manifestDigest, manifestSize, err := digestFromManifest(signed, ref)
|
||||
manifestDigest, manifestSize, err := digestFromManifest(manifest.(*schema1.SignedManifest), ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -194,7 +185,12 @@ func (p *v2Pusher) pushV2Tag(ctx context.Context, association reference.Associat
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = manSvc.Put(ctx, signed)
|
||||
|
||||
if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
|
||||
_, err = manSvc.Put(ctx, manifest, client.WithTag(tagged.Tag()))
|
||||
} else {
|
||||
_, err = manSvc.Put(ctx, manifest)
|
||||
}
|
||||
// FIXME create a tag
|
||||
return err
|
||||
}
|
||||
|
@ -203,8 +199,7 @@ type v2PushDescriptor struct {
|
|||
layer layer.Layer
|
||||
blobSumService *metadata.BlobSumService
|
||||
repo distribution.Repository
|
||||
layersPushed *pushMap
|
||||
confirmedV2 *bool
|
||||
pushState *pushState
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) Key() string {
|
||||
|
@ -219,25 +214,38 @@ func (pd *v2PushDescriptor) DiffID() layer.DiffID {
|
|||
return pd.layer.DiffID()
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error) {
|
||||
func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) error {
|
||||
diffID := pd.DiffID()
|
||||
|
||||
logrus.Debugf("Pushing layer: %s", diffID)
|
||||
pd.pushState.Lock()
|
||||
if _, ok := pd.pushState.remoteLayers[diffID]; ok {
|
||||
// it is already known that the push is not needed and
|
||||
// therefore doing a stat is unnecessary
|
||||
pd.pushState.Unlock()
|
||||
progress.Update(progressOutput, pd.ID(), "Layer already exists")
|
||||
return nil
|
||||
}
|
||||
pd.pushState.Unlock()
|
||||
|
||||
// Do we have any blobsums associated with this layer's DiffID?
|
||||
possibleBlobsums, err := pd.blobSumService.GetBlobSums(diffID)
|
||||
if err == nil {
|
||||
dgst, exists, err := blobSumAlreadyExists(ctx, possibleBlobsums, pd.repo, pd.layersPushed)
|
||||
descriptor, exists, err := blobSumAlreadyExists(ctx, possibleBlobsums, pd.repo, pd.pushState)
|
||||
if err != nil {
|
||||
progress.Update(progressOutput, pd.ID(), "Image push failed")
|
||||
return "", retryOnError(err)
|
||||
return retryOnError(err)
|
||||
}
|
||||
if exists {
|
||||
progress.Update(progressOutput, pd.ID(), "Layer already exists")
|
||||
return dgst, nil
|
||||
pd.pushState.Lock()
|
||||
pd.pushState.remoteLayers[diffID] = descriptor
|
||||
pd.pushState.Unlock()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Debugf("Pushing layer: %s", diffID)
|
||||
|
||||
// if digest was empty or not saved, or if blob does not exist on the remote repository,
|
||||
// then push the blob.
|
||||
bs := pd.repo.Blobs(ctx)
|
||||
|
@ -245,13 +253,13 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
|
|||
// Send the layer
|
||||
layerUpload, err := bs.Create(ctx)
|
||||
if err != nil {
|
||||
return "", retryOnError(err)
|
||||
return retryOnError(err)
|
||||
}
|
||||
defer layerUpload.Close()
|
||||
|
||||
arch, err := pd.layer.TarStream()
|
||||
if err != nil {
|
||||
return "", xfer.DoNotRetry{Err: err}
|
||||
return xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
|
||||
// don't care if this fails; best effort
|
||||
|
@ -267,177 +275,62 @@ func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.
|
|||
nn, err := layerUpload.ReadFrom(tee)
|
||||
compressedReader.Close()
|
||||
if err != nil {
|
||||
return "", retryOnError(err)
|
||||
return retryOnError(err)
|
||||
}
|
||||
|
||||
pushDigest := digester.Digest()
|
||||
if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil {
|
||||
return "", retryOnError(err)
|
||||
return retryOnError(err)
|
||||
}
|
||||
|
||||
// If Commit succeded, that's an indication that the remote registry
|
||||
// speaks the v2 protocol.
|
||||
*pd.confirmedV2 = true
|
||||
|
||||
logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn)
|
||||
progress.Update(progressOutput, pd.ID(), "Pushed")
|
||||
|
||||
// Cache mapping from this layer's DiffID to the blobsum
|
||||
if err := pd.blobSumService.Add(diffID, pushDigest); err != nil {
|
||||
return "", xfer.DoNotRetry{Err: err}
|
||||
return xfer.DoNotRetry{Err: err}
|
||||
}
|
||||
|
||||
pd.layersPushed.Lock()
|
||||
pd.layersPushed.layersPushed[pushDigest] = true
|
||||
pd.layersPushed.Unlock()
|
||||
pd.pushState.Lock()
|
||||
|
||||
return pushDigest, nil
|
||||
// If Commit succeded, that's an indication that the remote registry
|
||||
// speaks the v2 protocol.
|
||||
pd.pushState.confirmedV2 = true
|
||||
|
||||
pd.pushState.remoteLayers[diffID] = distribution.Descriptor{
|
||||
Digest: pushDigest,
|
||||
MediaType: schema2.MediaTypeLayer,
|
||||
Size: nn,
|
||||
}
|
||||
|
||||
pd.pushState.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor {
|
||||
// Not necessary to lock pushStatus because this is always
|
||||
// called after all the mutation in pushStatus.
|
||||
// By the time this function is called, every layer will have
|
||||
// an entry in remoteLayers.
|
||||
return pd.pushState.remoteLayers[pd.DiffID()]
|
||||
}
|
||||
|
||||
// blobSumAlreadyExists checks if the registry already know about any of the
|
||||
// blobsums passed in the "blobsums" slice. If it finds one that the registry
|
||||
// knows about, it returns the known digest and "true".
|
||||
func blobSumAlreadyExists(ctx context.Context, blobsums []digest.Digest, repo distribution.Repository, layersPushed *pushMap) (digest.Digest, bool, error) {
|
||||
layersPushed.Lock()
|
||||
func blobSumAlreadyExists(ctx context.Context, blobsums []digest.Digest, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) {
|
||||
for _, dgst := range blobsums {
|
||||
if layersPushed.layersPushed[dgst] {
|
||||
// it is already known that the push is not needed and
|
||||
// therefore doing a stat is unnecessary
|
||||
layersPushed.Unlock()
|
||||
return dgst, true, nil
|
||||
}
|
||||
}
|
||||
layersPushed.Unlock()
|
||||
|
||||
for _, dgst := range blobsums {
|
||||
_, err := repo.Blobs(ctx).Stat(ctx, dgst)
|
||||
descriptor, err := repo.Blobs(ctx).Stat(ctx, dgst)
|
||||
switch err {
|
||||
case nil:
|
||||
return dgst, true, nil
|
||||
descriptor.MediaType = schema2.MediaTypeLayer
|
||||
return descriptor, true, nil
|
||||
case distribution.ErrBlobUnknown:
|
||||
// nop
|
||||
default:
|
||||
return "", false, err
|
||||
return distribution.Descriptor{}, false, err
|
||||
}
|
||||
}
|
||||
return "", false, nil
|
||||
}
|
||||
|
||||
// CreateV2Manifest creates a V2 manifest from an image config and set of
|
||||
// FSLayer digests.
|
||||
// FIXME: This should be moved to the distribution repo, since it will also
|
||||
// be useful for converting new manifests to the old format.
|
||||
func CreateV2Manifest(name, tag string, img *image.Image, fsLayers map[layer.DiffID]digest.Digest) (*schema1.Manifest, error) {
|
||||
if len(img.History) == 0 {
|
||||
return nil, errors.New("empty history when trying to create V2 manifest")
|
||||
}
|
||||
|
||||
// Generate IDs for each layer
|
||||
// For non-top-level layers, create fake V1Compatibility strings that
|
||||
// fit the format and don't collide with anything else, but don't
|
||||
// result in runnable images on their own.
|
||||
type v1Compatibility struct {
|
||||
ID string `json:"id"`
|
||||
Parent string `json:"parent,omitempty"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
Created time.Time `json:"created"`
|
||||
ContainerConfig struct {
|
||||
Cmd []string
|
||||
} `json:"container_config,omitempty"`
|
||||
ThrowAway bool `json:"throwaway,omitempty"`
|
||||
}
|
||||
|
||||
fsLayerList := make([]schema1.FSLayer, len(img.History))
|
||||
history := make([]schema1.History, len(img.History))
|
||||
|
||||
parent := ""
|
||||
layerCounter := 0
|
||||
for i, h := range img.History {
|
||||
if i == len(img.History)-1 {
|
||||
break
|
||||
}
|
||||
|
||||
var diffID layer.DiffID
|
||||
if h.EmptyLayer {
|
||||
diffID = layer.EmptyLayer.DiffID()
|
||||
} else {
|
||||
if len(img.RootFS.DiffIDs) <= layerCounter {
|
||||
return nil, errors.New("too many non-empty layers in History section")
|
||||
}
|
||||
diffID = img.RootFS.DiffIDs[layerCounter]
|
||||
layerCounter++
|
||||
}
|
||||
|
||||
fsLayer, present := fsLayers[diffID]
|
||||
if !present {
|
||||
return nil, fmt.Errorf("missing layer in CreateV2Manifest: %s", diffID.String())
|
||||
}
|
||||
dgst := digest.FromBytes([]byte(fsLayer.Hex() + " " + parent))
|
||||
v1ID := dgst.Hex()
|
||||
|
||||
v1Compatibility := v1Compatibility{
|
||||
ID: v1ID,
|
||||
Parent: parent,
|
||||
Comment: h.Comment,
|
||||
Created: h.Created,
|
||||
}
|
||||
v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy}
|
||||
if h.EmptyLayer {
|
||||
v1Compatibility.ThrowAway = true
|
||||
}
|
||||
jsonBytes, err := json.Marshal(&v1Compatibility)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reversedIndex := len(img.History) - i - 1
|
||||
history[reversedIndex].V1Compatibility = string(jsonBytes)
|
||||
fsLayerList[reversedIndex] = schema1.FSLayer{BlobSum: fsLayer}
|
||||
|
||||
parent = v1ID
|
||||
}
|
||||
|
||||
latestHistory := img.History[len(img.History)-1]
|
||||
|
||||
var diffID layer.DiffID
|
||||
if latestHistory.EmptyLayer {
|
||||
diffID = layer.EmptyLayer.DiffID()
|
||||
} else {
|
||||
if len(img.RootFS.DiffIDs) <= layerCounter {
|
||||
return nil, errors.New("too many non-empty layers in History section")
|
||||
}
|
||||
diffID = img.RootFS.DiffIDs[layerCounter]
|
||||
}
|
||||
fsLayer, present := fsLayers[diffID]
|
||||
if !present {
|
||||
return nil, fmt.Errorf("missing layer in CreateV2Manifest: %s", diffID.String())
|
||||
}
|
||||
|
||||
fsLayerList[0] = schema1.FSLayer{BlobSum: fsLayer}
|
||||
dgst := digest.FromBytes([]byte(fsLayer.Hex() + " " + parent + " " + string(img.RawJSON())))
|
||||
|
||||
// Top-level v1compatibility string should be a modified version of the
|
||||
// image config.
|
||||
transformedConfig, err := v1.MakeV1ConfigFromConfig(img, dgst.Hex(), parent, latestHistory.EmptyLayer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
history[0].V1Compatibility = string(transformedConfig)
|
||||
|
||||
// windows-only baselayer setup
|
||||
if err := setupBaseLayer(history, *img.RootFS); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &schema1.Manifest{
|
||||
Versioned: manifest.Versioned{
|
||||
SchemaVersion: 1,
|
||||
},
|
||||
Name: name,
|
||||
Tag: tag,
|
||||
Architecture: img.Architecture,
|
||||
FSLayers: fsLayerList,
|
||||
History: history,
|
||||
}, nil
|
||||
return distribution.Descriptor{}, false, nil
|
||||
}
|
||||
|
|
|
@ -1,176 +0,0 @@
|
|||
package distribution
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/docker/image"
|
||||
"github.com/docker/docker/layer"
|
||||
)
|
||||
|
||||
func TestCreateV2Manifest(t *testing.T) {
|
||||
imgJSON := `{
|
||||
"architecture": "amd64",
|
||||
"config": {
|
||||
"AttachStderr": false,
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"Cmd": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"echo hi"
|
||||
],
|
||||
"Domainname": "",
|
||||
"Entrypoint": null,
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"derived=true",
|
||||
"asdf=true"
|
||||
],
|
||||
"Hostname": "23304fc829f9",
|
||||
"Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246",
|
||||
"Labels": {},
|
||||
"OnBuild": [],
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Tty": false,
|
||||
"User": "",
|
||||
"Volumes": null,
|
||||
"WorkingDir": ""
|
||||
},
|
||||
"container": "e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001",
|
||||
"container_config": {
|
||||
"AttachStderr": false,
|
||||
"AttachStdin": false,
|
||||
"AttachStdout": false,
|
||||
"Cmd": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"
|
||||
],
|
||||
"Domainname": "",
|
||||
"Entrypoint": null,
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"derived=true",
|
||||
"asdf=true"
|
||||
],
|
||||
"Hostname": "23304fc829f9",
|
||||
"Image": "sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246",
|
||||
"Labels": {},
|
||||
"OnBuild": [],
|
||||
"OpenStdin": false,
|
||||
"StdinOnce": false,
|
||||
"Tty": false,
|
||||
"User": "",
|
||||
"Volumes": null,
|
||||
"WorkingDir": ""
|
||||
},
|
||||
"created": "2015-11-04T23:06:32.365666163Z",
|
||||
"docker_version": "1.9.0-dev",
|
||||
"history": [
|
||||
{
|
||||
"created": "2015-10-31T22:22:54.690851953Z",
|
||||
"created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
|
||||
},
|
||||
{
|
||||
"created": "2015-10-31T22:22:55.613815829Z",
|
||||
"created_by": "/bin/sh -c #(nop) CMD [\"sh\"]"
|
||||
},
|
||||
{
|
||||
"created": "2015-11-04T23:06:30.934316144Z",
|
||||
"created_by": "/bin/sh -c #(nop) ENV derived=true",
|
||||
"empty_layer": true
|
||||
},
|
||||
{
|
||||
"created": "2015-11-04T23:06:31.192097572Z",
|
||||
"created_by": "/bin/sh -c #(nop) ENV asdf=true",
|
||||
"empty_layer": true
|
||||
},
|
||||
{
|
||||
"created": "2015-11-04T23:06:32.083868454Z",
|
||||
"created_by": "/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"
|
||||
},
|
||||
{
|
||||
"created": "2015-11-04T23:06:32.365666163Z",
|
||||
"created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]",
|
||||
"empty_layer": true
|
||||
}
|
||||
],
|
||||
"os": "linux",
|
||||
"rootfs": {
|
||||
"diff_ids": [
|
||||
"sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
|
||||
"sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
|
||||
"sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
|
||||
],
|
||||
"type": "layers"
|
||||
}
|
||||
}`
|
||||
|
||||
// To fill in rawJSON
|
||||
img, err := image.NewFromJSON([]byte(imgJSON))
|
||||
if err != nil {
|
||||
t.Fatalf("json decoding failed: %v", err)
|
||||
}
|
||||
|
||||
fsLayers := map[layer.DiffID]digest.Digest{
|
||||
layer.DiffID("sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1"): digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
|
||||
layer.DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"): digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"),
|
||||
layer.DiffID("sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"): digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"),
|
||||
}
|
||||
|
||||
manifest, err := CreateV2Manifest("testrepo", "testtag", img, fsLayers)
|
||||
if err != nil {
|
||||
t.Fatalf("CreateV2Manifest returned error: %v", err)
|
||||
}
|
||||
|
||||
if manifest.Versioned.SchemaVersion != 1 {
|
||||
t.Fatal("SchemaVersion != 1")
|
||||
}
|
||||
if manifest.Name != "testrepo" {
|
||||
t.Fatal("incorrect name in manifest")
|
||||
}
|
||||
if manifest.Tag != "testtag" {
|
||||
t.Fatal("incorrect tag in manifest")
|
||||
}
|
||||
if manifest.Architecture != "amd64" {
|
||||
t.Fatal("incorrect arch in manifest")
|
||||
}
|
||||
|
||||
expectedFSLayers := []schema1.FSLayer{
|
||||
{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
|
||||
{BlobSum: digest.Digest("sha256:b4ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
|
||||
{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
|
||||
{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
|
||||
{BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")},
|
||||
{BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")},
|
||||
}
|
||||
|
||||
if len(manifest.FSLayers) != len(expectedFSLayers) {
|
||||
t.Fatalf("wrong number of FSLayers: %d", len(manifest.FSLayers))
|
||||
}
|
||||
if !reflect.DeepEqual(manifest.FSLayers, expectedFSLayers) {
|
||||
t.Fatal("wrong FSLayers list")
|
||||
}
|
||||
|
||||
expectedV1Compatibility := []string{
|
||||
`{"architecture":"amd64","config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","echo hi"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"container":"e91032eb0403a61bfe085ff5a5a48e3659e5a6deae9f4d678daa2ae399d5a001","container_config":{"AttachStderr":false,"AttachStdin":false,"AttachStdout":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"/bin/sh\" \"-c\" \"echo hi\"]"],"Domainname":"","Entrypoint":null,"Env":["PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin","derived=true","asdf=true"],"Hostname":"23304fc829f9","Image":"sha256:4ab15c48b859c2920dd5224f92aabcd39a52794c5b3cf088fb3bbb438756c246","Labels":{},"OnBuild":[],"OpenStdin":false,"StdinOnce":false,"Tty":false,"User":"","Volumes":null,"WorkingDir":""},"created":"2015-11-04T23:06:32.365666163Z","docker_version":"1.9.0-dev","id":"d728140d3fd23dfcac505954af0b2224b3579b177029eded62916579eb19ac64","os":"linux","parent":"0594e66a9830fa5ba73b66349eb221ea4beb6bac8d2148b90a0f371f8d67bcd5","throwaway":true}`,
|
||||
`{"id":"0594e66a9830fa5ba73b66349eb221ea4beb6bac8d2148b90a0f371f8d67bcd5","parent":"39bc0dbed47060dd8952b048e73744ae471fe50354d2c267d308292c53b83ce1","created":"2015-11-04T23:06:32.083868454Z","container_config":{"Cmd":["/bin/sh -c dd if=/dev/zero of=/file bs=1024 count=1024"]}}`,
|
||||
`{"id":"39bc0dbed47060dd8952b048e73744ae471fe50354d2c267d308292c53b83ce1","parent":"875d7f206c023dc979e1677567a01364074f82b61e220c9b83a4610170490381","created":"2015-11-04T23:06:31.192097572Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV asdf=true"]},"throwaway":true}`,
|
||||
`{"id":"875d7f206c023dc979e1677567a01364074f82b61e220c9b83a4610170490381","parent":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","created":"2015-11-04T23:06:30.934316144Z","container_config":{"Cmd":["/bin/sh -c #(nop) ENV derived=true"]},"throwaway":true}`,
|
||||
`{"id":"9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e","parent":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:55.613815829Z","container_config":{"Cmd":["/bin/sh -c #(nop) CMD [\"sh\"]"]}}`,
|
||||
`{"id":"3690474eb5b4b26fdfbd89c6e159e8cc376ca76ef48032a30fa6aafd56337880","created":"2015-10-31T22:22:54.690851953Z","container_config":{"Cmd":["/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"]}}`,
|
||||
}
|
||||
|
||||
if len(manifest.History) != len(expectedV1Compatibility) {
|
||||
t.Fatalf("wrong number of history entries: %d", len(manifest.History))
|
||||
}
|
||||
for i := range expectedV1Compatibility {
|
||||
if manifest.History[i].V1Compatibility != expectedV1Compatibility[i] {
|
||||
t.Fatalf("wrong V1Compatibility %d. expected:\n%s\ngot:\n%s", i, expectedV1Compatibility[i], manifest.History[i].V1Compatibility)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
// +build !windows
|
||||
|
||||
package distribution
|
||||
|
||||
import (
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/docker/image"
|
||||
)
|
||||
|
||||
func setupBaseLayer(history []schema1.History, rootFS image.RootFS) error {
|
||||
return nil
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
// +build windows
|
||||
|
||||
package distribution
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/docker/distribution/manifest/schema1"
|
||||
"github.com/docker/docker/image"
|
||||
)
|
||||
|
||||
func setupBaseLayer(history []schema1.History, rootFS image.RootFS) error {
|
||||
var v1Config map[string]*json.RawMessage
|
||||
if err := json.Unmarshal([]byte(history[len(history)-1].V1Compatibility), &v1Config); err != nil {
|
||||
return err
|
||||
}
|
||||
baseID, err := json.Marshal(rootFS.BaseLayerID())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v1Config["parent"] = (*json.RawMessage)(&baseID)
|
||||
configJSON, err := json.Marshal(v1Config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
history[len(history)-1].V1Compatibility = string(configJSON)
|
||||
return nil
|
||||
}
|
|
@ -5,7 +5,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/docker/distribution/digest"
|
||||
"github.com/docker/docker/layer"
|
||||
"github.com/docker/docker/pkg/progress"
|
||||
"golang.org/x/net/context"
|
||||
|
@ -30,7 +29,6 @@ type uploadTransfer struct {
|
|||
Transfer
|
||||
|
||||
diffID layer.DiffID
|
||||
digest digest.Digest
|
||||
err error
|
||||
}
|
||||
|
||||
|
@ -43,16 +41,15 @@ type UploadDescriptor interface {
|
|||
// DiffID should return the DiffID for this layer.
|
||||
DiffID() layer.DiffID
|
||||
// Upload is called to perform the Upload.
|
||||
Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error)
|
||||
Upload(ctx context.Context, progressOutput progress.Output) error
|
||||
}
|
||||
|
||||
// Upload is a blocking function which ensures the listed layers are present on
|
||||
// the remote registry. It uses the string returned by the Key method to
|
||||
// deduplicate uploads.
|
||||
func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) (map[layer.DiffID]digest.Digest, error) {
|
||||
func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error {
|
||||
var (
|
||||
uploads []*uploadTransfer
|
||||
digests = make(map[layer.DiffID]digest.Digest)
|
||||
dedupDescriptors = make(map[string]struct{})
|
||||
)
|
||||
|
||||
|
@ -74,16 +71,15 @@ func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescri
|
|||
for _, upload := range uploads {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
return ctx.Err()
|
||||
case <-upload.Transfer.Done():
|
||||
if upload.err != nil {
|
||||
return nil, upload.err
|
||||
return upload.err
|
||||
}
|
||||
digests[upload.diffID] = upload.digest
|
||||
}
|
||||
}
|
||||
|
||||
return digests, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc {
|
||||
|
@ -109,9 +105,8 @@ func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFun
|
|||
|
||||
retries := 0
|
||||
for {
|
||||
digest, err := descriptor.Upload(u.Transfer.Context(), progressOutput)
|
||||
err := descriptor.Upload(u.Transfer.Context(), progressOutput)
|
||||
if err == nil {
|
||||
u.digest = digest
|
||||
break
|
||||
}
|
||||
|
||||
|
|
|
@ -36,12 +36,12 @@ func (u *mockUploadDescriptor) DiffID() layer.DiffID {
|
|||
}
|
||||
|
||||
// Upload is called to perform the upload.
|
||||
func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (digest.Digest, error) {
|
||||
func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) error {
|
||||
if u.currentUploads != nil {
|
||||
defer atomic.AddInt32(u.currentUploads, -1)
|
||||
|
||||
if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency {
|
||||
return "", errors.New("concurrency limit exceeded")
|
||||
return errors.New("concurrency limit exceeded")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progre
|
|||
for i := int64(0); i <= 10; i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return "", ctx.Err()
|
||||
return ctx.Err()
|
||||
case <-time.After(10 * time.Millisecond):
|
||||
progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10})
|
||||
}
|
||||
|
@ -57,12 +57,10 @@ func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progre
|
|||
|
||||
if u.simulateRetries != 0 {
|
||||
u.simulateRetries--
|
||||
return "", errors.New("simulating retry")
|
||||
return errors.New("simulating retry")
|
||||
}
|
||||
|
||||
// For the mock implementation, use SHA256(DiffID) as the returned
|
||||
// digest.
|
||||
return digest.FromBytes([]byte(u.diffID.String())), nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func uploadDescriptors(currentUploads *int32) []UploadDescriptor {
|
||||
|
@ -101,26 +99,13 @@ func TestSuccessfulUpload(t *testing.T) {
|
|||
var currentUploads int32
|
||||
descriptors := uploadDescriptors(¤tUploads)
|
||||
|
||||
digests, err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan))
|
||||
err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan))
|
||||
if err != nil {
|
||||
t.Fatalf("upload error: %v", err)
|
||||
}
|
||||
|
||||
close(progressChan)
|
||||
<-progressDone
|
||||
|
||||
if len(digests) != len(expectedDigests) {
|
||||
t.Fatal("wrong number of keys in digests map")
|
||||
}
|
||||
|
||||
for key, val := range expectedDigests {
|
||||
if digests[key] != val {
|
||||
t.Fatalf("mismatch in digest array for key %v (expected %v, got %v)", key, val, digests[key])
|
||||
}
|
||||
if receivedProgress[key.String()] != 10 {
|
||||
t.Fatalf("missing or wrong progress output for %v", key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCancelledUpload(t *testing.T) {
|
||||
|
@ -143,7 +128,7 @@ func TestCancelledUpload(t *testing.T) {
|
|||
}()
|
||||
|
||||
descriptors := uploadDescriptors(nil)
|
||||
_, err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan))
|
||||
err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan))
|
||||
if err != context.Canceled {
|
||||
t.Fatal("expected upload to be cancelled")
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче