Fix layer compression regression

PR #15493 removed compression of layers when pushing them to a V2
registry. This this makes layer uploads larger than they should be.

This commit restores the compression. It uses an io.Pipe to turn the
gzip compressor output Writer into a Reader, so the ReadFrom method can
be used on the BlobWriter (which is very important for avoiding many
PATCH requests per layer).

Fixes #17209
Fixes #17038

Signed-off-by: Aaron Lehmann <aaron.lehmann@docker.com>
This commit is contained in:
Aaron Lehmann 2015-10-20 16:05:21 -07:00
Родитель c516aa645e
Коммит 4dce280d96
1 изменённых файлов: 23 добавлений и 5 удалений

Просмотреть файл

@ -1,6 +1,7 @@
package graph
import (
"compress/gzip"
"fmt"
"io"
"io/ioutil"
@ -236,11 +237,8 @@ func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (d
}
defer layerUpload.Close()
digester := digest.Canonical.New()
tee := io.TeeReader(arch, digester.Hash())
reader := progressreader.New(progressreader.Config{
In: ioutil.NopCloser(tee), // we'll take care of close here.
In: ioutil.NopCloser(arch), // we'll take care of close here.
Out: out,
Formatter: p.sf,
@ -254,8 +252,28 @@ func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (d
Action: "Pushing",
})
digester := digest.Canonical.New()
// HACK: The MultiWriter doesn't write directly to layerUpload because
// we must make sure the ReadFrom is used, not Write. Using Write would
// send a PATCH request for every Write call.
pipeReader, pipeWriter := io.Pipe()
compressor := gzip.NewWriter(io.MultiWriter(pipeWriter, digester.Hash()))
go func() {
_, err := io.Copy(compressor, reader)
if err == nil {
err = compressor.Close()
}
if err != nil {
pipeWriter.CloseWithError(err)
} else {
pipeWriter.Close()
}
}()
out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushing", nil))
nn, err := io.Copy(layerUpload, reader)
nn, err := layerUpload.ReadFrom(pipeReader)
pipeReader.Close()
if err != nil {
return "", err
}