2015-11-19 01:18:44 +03:00
|
|
|
package distribution
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2015-11-14 03:59:01 +03:00
|
|
|
"sync"
|
2015-11-19 01:18:44 +03:00
|
|
|
|
|
|
|
"github.com/Sirupsen/logrus"
|
|
|
|
"github.com/docker/distribution"
|
|
|
|
"github.com/docker/distribution/digest"
|
|
|
|
"github.com/docker/distribution/manifest/schema1"
|
2015-12-11 03:27:58 +03:00
|
|
|
"github.com/docker/distribution/manifest/schema2"
|
|
|
|
"github.com/docker/distribution/registry/client"
|
2015-11-19 01:18:44 +03:00
|
|
|
"github.com/docker/docker/distribution/metadata"
|
2015-11-14 03:59:01 +03:00
|
|
|
"github.com/docker/docker/distribution/xfer"
|
2015-11-19 01:18:44 +03:00
|
|
|
"github.com/docker/docker/layer"
|
2015-11-14 03:59:01 +03:00
|
|
|
"github.com/docker/docker/pkg/ioutils"
|
|
|
|
"github.com/docker/docker/pkg/progress"
|
2015-11-19 01:18:44 +03:00
|
|
|
"github.com/docker/docker/pkg/stringid"
|
2015-12-05 00:55:15 +03:00
|
|
|
"github.com/docker/docker/reference"
|
2015-11-19 01:18:44 +03:00
|
|
|
"github.com/docker/docker/registry"
|
|
|
|
"golang.org/x/net/context"
|
|
|
|
)
|
|
|
|
|
2015-12-22 02:02:44 +03:00
|
|
|
// PushResult contains the tag, manifest digest, and manifest size from the
|
|
|
|
// push. It's used to signal this information to the trust code in the client
|
|
|
|
// so it can sign the manifest if necessary.
|
|
|
|
type PushResult struct {
|
|
|
|
Tag string
|
|
|
|
Digest digest.Digest
|
|
|
|
Size int
|
|
|
|
}
|
|
|
|
|
2015-11-19 01:18:44 +03:00
|
|
|
type v2Pusher struct {
|
|
|
|
blobSumService *metadata.BlobSumService
|
|
|
|
ref reference.Named
|
|
|
|
endpoint registry.APIEndpoint
|
|
|
|
repoInfo *registry.RepositoryInfo
|
|
|
|
config *ImagePushConfig
|
|
|
|
repo distribution.Repository
|
|
|
|
|
2015-12-11 03:27:58 +03:00
|
|
|
// pushState is state built by the Download functions.
|
|
|
|
pushState pushState
|
2015-11-14 03:59:01 +03:00
|
|
|
}
|
|
|
|
|
2015-12-11 03:27:58 +03:00
|
|
|
type pushState struct {
|
2015-11-14 03:59:01 +03:00
|
|
|
sync.Mutex
|
2015-12-11 03:27:58 +03:00
|
|
|
// remoteLayers is the set of layers known to exist on the remote side.
|
|
|
|
// This avoids redundant queries when pushing multiple tags that
|
|
|
|
// involve the same layers. It is also used to fill in digest and size
|
|
|
|
// information when building the manifest.
|
|
|
|
remoteLayers map[layer.DiffID]distribution.Descriptor
|
|
|
|
// confirmedV2 is set to true if we confirm we're talking to a v2
|
|
|
|
// registry. This is used to limit fallbacks to the v1 protocol.
|
|
|
|
confirmedV2 bool
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|
|
|
|
|
2015-12-05 00:42:33 +03:00
|
|
|
func (p *v2Pusher) Push(ctx context.Context) (err error) {
|
2015-12-11 03:27:58 +03:00
|
|
|
p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor)
|
|
|
|
|
|
|
|
p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull")
|
2015-11-19 01:18:44 +03:00
|
|
|
if err != nil {
|
|
|
|
logrus.Debugf("Error getting v2 registry: %v", err)
|
2015-12-11 03:27:58 +03:00
|
|
|
return fallbackError{err: err, confirmedV2: p.pushState.confirmedV2}
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|
|
|
|
|
2015-12-05 00:42:33 +03:00
|
|
|
if err = p.pushV2Repository(ctx); err != nil {
|
|
|
|
if registry.ContinueOnError(err) {
|
2015-12-11 03:27:58 +03:00
|
|
|
return fallbackError{err: err, confirmedV2: p.pushState.confirmedV2}
|
2015-12-05 00:42:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) {
|
2015-12-05 00:55:15 +03:00
|
|
|
var associations []reference.Association
|
|
|
|
if _, isTagged := p.ref.(reference.NamedTagged); isTagged {
|
|
|
|
imageID, err := p.config.ReferenceStore.Get(p.ref)
|
2015-11-19 01:18:44 +03:00
|
|
|
if err != nil {
|
2015-12-05 00:42:33 +03:00
|
|
|
return fmt.Errorf("tag does not exist: %s", p.ref.String())
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|
|
|
|
|
2015-12-05 00:55:15 +03:00
|
|
|
associations = []reference.Association{
|
2015-11-19 01:18:44 +03:00
|
|
|
{
|
|
|
|
Ref: p.ref,
|
|
|
|
ImageID: imageID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Pull all tags
|
2015-12-05 00:55:15 +03:00
|
|
|
associations = p.config.ReferenceStore.ReferencesByName(p.ref)
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|
|
|
|
if err != nil {
|
2015-12-05 00:42:33 +03:00
|
|
|
return fmt.Errorf("error getting tags for %s: %s", p.repoInfo.Name(), err)
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|
|
|
|
if len(associations) == 0 {
|
2015-12-05 00:42:33 +03:00
|
|
|
return fmt.Errorf("no tags to push for %s", p.repoInfo.Name())
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, association := range associations {
|
2015-11-14 03:59:01 +03:00
|
|
|
if err := p.pushV2Tag(ctx, association); err != nil {
|
2015-12-05 00:42:33 +03:00
|
|
|
return err
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-05 00:42:33 +03:00
|
|
|
return nil
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|
|
|
|
|
2015-12-05 00:55:15 +03:00
|
|
|
func (p *v2Pusher) pushV2Tag(ctx context.Context, association reference.Association) error {
|
2015-11-19 01:18:44 +03:00
|
|
|
ref := association.Ref
|
|
|
|
logrus.Debugf("Pushing repository: %s", ref.String())
|
|
|
|
|
|
|
|
img, err := p.config.ImageStore.Get(association.ImageID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err)
|
|
|
|
}
|
|
|
|
|
|
|
|
var l layer.Layer
|
|
|
|
|
|
|
|
topLayerID := img.RootFS.ChainID()
|
|
|
|
if topLayerID == "" {
|
|
|
|
l = layer.EmptyLayer
|
|
|
|
} else {
|
|
|
|
l, err = p.config.LayerStore.Get(topLayerID)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("failed to get top layer from image: %v", err)
|
|
|
|
}
|
|
|
|
defer layer.ReleaseAndLog(p.config.LayerStore, l)
|
|
|
|
}
|
|
|
|
|
2015-11-14 03:59:01 +03:00
|
|
|
var descriptors []xfer.UploadDescriptor
|
2015-11-19 01:18:44 +03:00
|
|
|
|
2015-12-05 00:42:33 +03:00
|
|
|
descriptorTemplate := v2PushDescriptor{
|
|
|
|
blobSumService: p.blobSumService,
|
|
|
|
repo: p.repo,
|
2015-12-11 03:27:58 +03:00
|
|
|
pushState: &p.pushState,
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|
|
|
|
|
2015-11-14 03:59:01 +03:00
|
|
|
// Loop bounds condition is to avoid pushing the base layer on Windows.
|
2015-11-19 01:18:44 +03:00
|
|
|
for i := 0; i < len(img.RootFS.DiffIDs); i++ {
|
2015-12-05 00:42:33 +03:00
|
|
|
descriptor := descriptorTemplate
|
|
|
|
descriptor.layer = l
|
|
|
|
descriptors = append(descriptors, &descriptor)
|
2015-11-19 01:18:44 +03:00
|
|
|
|
|
|
|
l = l.Parent()
|
|
|
|
}
|
|
|
|
|
2015-12-11 03:27:58 +03:00
|
|
|
if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil {
|
2015-11-14 03:59:01 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-11-19 01:18:44 +03:00
|
|
|
var tag string
|
2015-12-05 00:55:15 +03:00
|
|
|
if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
|
2015-11-19 01:18:44 +03:00
|
|
|
tag = tagged.Tag()
|
|
|
|
}
|
2015-12-11 03:27:58 +03:00
|
|
|
builder := schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, p.repo.Name(), tag, img.RawJSON())
|
|
|
|
|
|
|
|
// descriptors is in reverse order; iterate backwards to get references
|
|
|
|
// appended in the right order.
|
|
|
|
for i := len(descriptors) - 1; i >= 0; i-- {
|
|
|
|
if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|
|
|
|
|
2015-12-11 03:27:58 +03:00
|
|
|
manifest, err := builder.Build(ctx)
|
2015-11-19 01:18:44 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-12-11 03:27:58 +03:00
|
|
|
manifestDigest, manifestSize, err := digestFromManifest(manifest.(*schema1.SignedManifest), ref)
|
2015-11-19 01:18:44 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if manifestDigest != "" {
|
2015-12-05 00:55:15 +03:00
|
|
|
if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
|
2015-11-14 03:59:01 +03:00
|
|
|
progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", tagged.Tag(), manifestDigest, manifestSize)
|
2015-12-22 02:02:44 +03:00
|
|
|
// Signal digest to the trust client so it can sign the
|
|
|
|
// push, if appropriate.
|
|
|
|
progress.Aux(p.config.ProgressOutput, PushResult{Tag: tagged.Tag(), Digest: manifestDigest, Size: manifestSize})
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-14 03:59:01 +03:00
|
|
|
manSvc, err := p.repo.Manifests(ctx)
|
2015-11-19 01:18:44 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-12-11 03:27:58 +03:00
|
|
|
|
|
|
|
if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
|
|
|
|
_, err = manSvc.Put(ctx, manifest, client.WithTag(tagged.Tag()))
|
|
|
|
} else {
|
|
|
|
_, err = manSvc.Put(ctx, manifest)
|
|
|
|
}
|
2015-12-08 22:14:02 +03:00
|
|
|
// FIXME create a tag
|
|
|
|
return err
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|
|
|
|
|
2015-11-14 03:59:01 +03:00
|
|
|
type v2PushDescriptor struct {
|
|
|
|
layer layer.Layer
|
|
|
|
blobSumService *metadata.BlobSumService
|
|
|
|
repo distribution.Repository
|
2015-12-11 03:27:58 +03:00
|
|
|
pushState *pushState
|
2015-11-14 03:59:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (pd *v2PushDescriptor) Key() string {
|
|
|
|
return "v2push:" + pd.repo.Name() + " " + pd.layer.DiffID().String()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pd *v2PushDescriptor) ID() string {
|
|
|
|
return stringid.TruncateID(pd.layer.DiffID().String())
|
|
|
|
}
|
|
|
|
|
|
|
|
func (pd *v2PushDescriptor) DiffID() layer.DiffID {
|
|
|
|
return pd.layer.DiffID()
|
|
|
|
}
|
|
|
|
|
2015-12-11 03:27:58 +03:00
|
|
|
func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) error {
|
2015-11-14 03:59:01 +03:00
|
|
|
diffID := pd.DiffID()
|
|
|
|
|
2015-12-11 03:27:58 +03:00
|
|
|
pd.pushState.Lock()
|
|
|
|
if _, ok := pd.pushState.remoteLayers[diffID]; ok {
|
|
|
|
// it is already known that the push is not needed and
|
|
|
|
// therefore doing a stat is unnecessary
|
|
|
|
pd.pushState.Unlock()
|
|
|
|
progress.Update(progressOutput, pd.ID(), "Layer already exists")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
pd.pushState.Unlock()
|
2015-11-19 01:18:44 +03:00
|
|
|
|
|
|
|
// Do we have any blobsums associated with this layer's DiffID?
|
2015-11-14 03:59:01 +03:00
|
|
|
possibleBlobsums, err := pd.blobSumService.GetBlobSums(diffID)
|
2015-11-19 01:18:44 +03:00
|
|
|
if err == nil {
|
2015-12-11 03:27:58 +03:00
|
|
|
descriptor, exists, err := blobSumAlreadyExists(ctx, possibleBlobsums, pd.repo, pd.pushState)
|
2015-11-19 01:18:44 +03:00
|
|
|
if err != nil {
|
2015-11-14 03:59:01 +03:00
|
|
|
progress.Update(progressOutput, pd.ID(), "Image push failed")
|
2015-12-11 03:27:58 +03:00
|
|
|
return retryOnError(err)
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|
|
|
|
if exists {
|
2015-11-14 03:59:01 +03:00
|
|
|
progress.Update(progressOutput, pd.ID(), "Layer already exists")
|
2015-12-11 03:27:58 +03:00
|
|
|
pd.pushState.Lock()
|
|
|
|
pd.pushState.remoteLayers[diffID] = descriptor
|
|
|
|
pd.pushState.Unlock()
|
|
|
|
return nil
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-12-11 03:27:58 +03:00
|
|
|
logrus.Debugf("Pushing layer: %s", diffID)
|
|
|
|
|
2015-11-19 01:18:44 +03:00
|
|
|
// if digest was empty or not saved, or if blob does not exist on the remote repository,
|
|
|
|
// then push the blob.
|
2015-11-14 03:59:01 +03:00
|
|
|
bs := pd.repo.Blobs(ctx)
|
|
|
|
|
|
|
|
// Send the layer
|
|
|
|
layerUpload, err := bs.Create(ctx)
|
|
|
|
if err != nil {
|
2015-12-11 03:27:58 +03:00
|
|
|
return retryOnError(err)
|
2015-11-14 03:59:01 +03:00
|
|
|
}
|
|
|
|
defer layerUpload.Close()
|
|
|
|
|
|
|
|
arch, err := pd.layer.TarStream()
|
2015-11-19 01:18:44 +03:00
|
|
|
if err != nil {
|
2015-12-11 03:27:58 +03:00
|
|
|
return xfer.DoNotRetry{Err: err}
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|
2015-11-14 03:59:01 +03:00
|
|
|
|
|
|
|
// don't care if this fails; best effort
|
|
|
|
size, _ := pd.layer.DiffSize()
|
|
|
|
|
|
|
|
reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), progressOutput, size, pd.ID(), "Pushing")
|
|
|
|
defer reader.Close()
|
|
|
|
compressedReader := compress(reader)
|
|
|
|
|
|
|
|
digester := digest.Canonical.New()
|
|
|
|
tee := io.TeeReader(compressedReader, digester.Hash())
|
|
|
|
|
|
|
|
nn, err := layerUpload.ReadFrom(tee)
|
|
|
|
compressedReader.Close()
|
|
|
|
if err != nil {
|
2015-12-11 03:27:58 +03:00
|
|
|
return retryOnError(err)
|
2015-11-14 03:59:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
pushDigest := digester.Digest()
|
|
|
|
if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil {
|
2015-12-11 03:27:58 +03:00
|
|
|
return retryOnError(err)
|
2015-11-14 03:59:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn)
|
|
|
|
progress.Update(progressOutput, pd.ID(), "Pushed")
|
|
|
|
|
2015-11-19 01:18:44 +03:00
|
|
|
// Cache mapping from this layer's DiffID to the blobsum
|
2015-11-14 03:59:01 +03:00
|
|
|
if err := pd.blobSumService.Add(diffID, pushDigest); err != nil {
|
2015-12-11 03:27:58 +03:00
|
|
|
return xfer.DoNotRetry{Err: err}
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|
|
|
|
|
2015-12-11 03:27:58 +03:00
|
|
|
pd.pushState.Lock()
|
|
|
|
|
|
|
|
// If Commit succeded, that's an indication that the remote registry
|
|
|
|
// speaks the v2 protocol.
|
|
|
|
pd.pushState.confirmedV2 = true
|
|
|
|
|
|
|
|
pd.pushState.remoteLayers[diffID] = distribution.Descriptor{
|
|
|
|
Digest: pushDigest,
|
|
|
|
MediaType: schema2.MediaTypeLayer,
|
|
|
|
Size: nn,
|
|
|
|
}
|
|
|
|
|
|
|
|
pd.pushState.Unlock()
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2015-11-14 03:59:01 +03:00
|
|
|
|
2015-12-11 03:27:58 +03:00
|
|
|
func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor {
|
|
|
|
// Not necessary to lock pushStatus because this is always
|
|
|
|
// called after all the mutation in pushStatus.
|
|
|
|
// By the time this function is called, every layer will have
|
|
|
|
// an entry in remoteLayers.
|
|
|
|
return pd.pushState.remoteLayers[pd.DiffID()]
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// blobSumAlreadyExists checks if the registry already know about any of the
|
|
|
|
// blobsums passed in the "blobsums" slice. If it finds one that the registry
|
|
|
|
// knows about, it returns the known digest and "true".
|
2015-12-11 03:27:58 +03:00
|
|
|
func blobSumAlreadyExists(ctx context.Context, blobsums []digest.Digest, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) {
|
2015-11-14 03:59:01 +03:00
|
|
|
for _, dgst := range blobsums {
|
2015-12-11 03:27:58 +03:00
|
|
|
descriptor, err := repo.Blobs(ctx).Stat(ctx, dgst)
|
2015-11-19 01:18:44 +03:00
|
|
|
switch err {
|
|
|
|
case nil:
|
2015-12-11 03:27:58 +03:00
|
|
|
descriptor.MediaType = schema2.MediaTypeLayer
|
|
|
|
return descriptor, true, nil
|
2015-11-19 01:18:44 +03:00
|
|
|
case distribution.ErrBlobUnknown:
|
|
|
|
// nop
|
|
|
|
default:
|
2015-12-11 03:27:58 +03:00
|
|
|
return distribution.Descriptor{}, false, err
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|
|
|
|
}
|
2015-12-11 03:27:58 +03:00
|
|
|
return distribution.Descriptor{}, false, nil
|
2015-11-19 01:18:44 +03:00
|
|
|
}
|