Merge pull request #18409 from tonistiigi/fix-sha-prefix-inspect

Vendor distribution and fix inspect by sha256 prefix
This commit is contained in:
Doug Davis 2015-12-03 20:59:47 -05:00
Родитель 4e6bea5964 61d6240069
Коммит c80d03db77
13 изменённых файлов: 152 добавлений и 84 удалений

Просмотреть файл

@ -40,7 +40,7 @@ clone git github.com/hashicorp/consul v0.5.2
clone git github.com/boltdb/bolt v1.1.0
# get graph and distribution packages
clone git github.com/docker/distribution c6c9194e9c6097f84b0ff468a741086ff7704aa3
clone git github.com/docker/distribution 568bf038af6d65b376165d02886b1c7fcaef1f61
clone git github.com/vbatts/tar-split v0.9.11
clone git github.com/docker/notary 45de2828b5e0083bfb4e9a5a781eddb05e2ef9d0

Просмотреть файл

@ -342,3 +342,17 @@ func (s *DockerSuite) TestInspectJSONFields(c *check.C) {
c.Assert(err, check.IsNil)
c.Assert(out, checker.Equals, "[]\n")
}
func (s *DockerSuite) TestInspectByPrefix(c *check.C) {
id, err := inspectField("busybox", "Id")
c.Assert(err, checker.IsNil)
c.Assert(id, checker.HasPrefix, "sha256:")
id2, err := inspectField(id[:10], "Id")
c.Assert(err, checker.IsNil)
c.Assert(id, checker.Equals, id2)
id3, err := inspectField(strings.TrimPrefix(id, "sha256:")[:10], "Id")
c.Assert(err, checker.IsNil)
c.Assert(id, checker.Equals, id3)
}

Просмотреть файл

@ -1,4 +1,5 @@
Aaron Lehmann <aaron.lehmann@docker.com>
Aaron Vinson <avinson.public@gmail.com>
Adam Enger <adamenger@gmail.com>
Adrian Mouat <adrian.mouat@gmail.com>
Ahmet Alp Balkan <ahmetalpbalkan@gmail.com>
@ -10,6 +11,7 @@ Andy Goldstein <agoldste@redhat.com>
Anton Tiurin <noxiouz@yandex.ru>
Antonio Mercado <amercado@thinknode.com>
Arnaud Porterie <arnaud.porterie@docker.com>
Arthur Baars <arthur@semmle.com>
Ayose Cazorla <ayosec@gmail.com>
BadZen <dave.trombley@gmail.com>
Ben Firshman <ben@firshman.co.uk>
@ -25,15 +27,20 @@ David Lawrence <david.lawrence@docker.com>
David Verhasselt <david@crowdway.com>
David Xia <dxia@spotify.com>
davidli <wenquan.li@hp.com>
Dejan Golja <dejan@golja.org>
Derek McGowan <derek@mcgstyle.net>
Diogo Mónica <diogo.monica@gmail.com>
Donald Huang <don.hcd@gmail.com>
Doug Davis <dug@us.ibm.com>
Florentin Raud <florentin.raud@gmail.com>
Frederick F. Kautz IV <fkautz@alumni.cmu.edu>
harche <harche@users.noreply.github.com>
Henri Gomez <henri.gomez@gmail.com>
Hu Keping <hukeping@huawei.com>
Hua Wang <wanghua.humble@gmail.com>
Ian Babrou <ibobrik@gmail.com>
Jack Griffin <jackpg14@gmail.com>
Jason Freidman <jason.freidman@gmail.com>
Jeff Nickoloff <jeff@allingeek.com>
Jessie Frazelle <jfrazelle@users.noreply.github.com>
Jianqing Wang <tsing@jianqing.org>
@ -44,15 +51,19 @@ Julien Fernandez <julien.fernandez@gmail.com>
Kelsey Hightower <kelsey.hightower@gmail.com>
Kenneth Lim <kennethlimcp@gmail.com>
Li Yi <denverdino@gmail.com>
Louis Kottmann <louis.kottmann@gmail.com>
Luke Carpenter <x@rubynerd.net>
Mary Anthony <mary@docker.com>
Matt Bentley <mbentley@mbentley.net>
Matt Moore <mattmoor@google.com>
Matt Robenolt <matt@ydekproductions.com>
Michael Prokop <mika@grml.org>
Miquel Sabaté <msabate@suse.com>
moxiegirl <mary@docker.com>
Nathan Sullivan <nathan@nightsys.net>
nevermosby <robolwq@qq.com>
Nghia Tran <tcnghia@gmail.com>
Nuutti Kotivuori <nuutti.kotivuori@poplatek.fi>
Oilbeater <liumengxinfly@gmail.com>
Olivier Gambier <olivier@docker.com>
Olivier Jacques <olivier.jacques@hp.com>
@ -60,20 +71,27 @@ Patrick Devine <patrick.devine@docker.com>
Philip Misiowiec <philip@atlashealth.com>
Richard Scothern <richard.scothern@docker.com>
Sebastiaan van Stijn <github@gone.nl>
Sharif Nassar <mrwacky42@users.noreply.github.com>
Sharif Nassar <sharif@mrwacky.com>
Shawn Falkner-Horine <dreadpirateshawn@gmail.com>
Shreyas Karnik <karnik.shreyas@gmail.com>
Simon Thulbourn <simon+github@thulbourn.com>
Spencer Rinehart <anubis@overthemonkey.com>
Stephen J Day <stephen.day@docker.com>
Sungho Moon <sungho.moon@navercorp.com>
Sven Dowideit <SvenDowideit@home.org.au>
Sylvain Baubeau <sbaubeau@redhat.com>
tgic <farmer1992@gmail.com>
Thomas Sjögren <konstruktoid@users.noreply.github.com>
Tianon Gravi <admwiggin@gmail.com>
Tibor Vass <teabee89@gmail.com>
Troels Thomsen <troels@thomsen.io>
Vincent Batts <vbatts@redhat.com>
Vincent Demeester <vincent@sbr.pm>
Vincent Giersch <vincent.giersch@ovh.net>
Vincent Giersch <vincent@giersch.fr>
W. Trevor King <wking@tremily.us>
xg.song <xg.song@venusource.com>
xiekeyang <xiekeyang@huawei.com>
Yann ROBERT <yann.robert@anantaplex.fr>
yuzou <zouyu7@huawei.com>

Просмотреть файл

@ -1,4 +1,8 @@
Solomon Hykes <solomon@docker.com> (@shykes)
Olivier Gambier <olivier@docker.com> (@dmp42)
Sam Alba <sam@docker.com> (@samalba)
Stephen Day <stephen.day@docker.com> (@stevvooe)
Derek McGowan <derek@mcgstyle.net> (@dmcgowan)
Richard Scothern <richard.scothern@gmail.com> (@richardscothern)
Aaron Lehmann <aaron.lehmann@docker.com> (@aaronlehmann)

Просмотреть файл

@ -11,7 +11,7 @@ ifeq (${DISABLE_OPTIMIZATION},true)
VERSION:="$(VERSION)-noopt"
endif
GO_LDFLAGS=-ldflags "-X `go list ./version`.Version $(VERSION)"
GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)"
.PHONY: clean all fmt vet lint build test binaries
.DEFAULT: default

Просмотреть файл

@ -3,12 +3,15 @@
The Docker toolset to pack, ship, store, and deliver content.
This repository's main product is the Docker Registry 2.0 implementation
for storing and distributing Docker images. It supersedes the [docker/docker-
registry](https://github.com/docker/docker-registry) project with a new API
design, focused around security and performance.
for storing and distributing Docker images. It supersedes the
[docker/docker-registry](https://github.com/docker/docker-registry)
project with a new API design, focused around security and performance.
<img src="https://www.docker.com/sites/default/files/oyster-registry-3.png" width=200px/>
[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master)
[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution)
This repository contains the following components:
|**Component** |Description |

Просмотреть файл

@ -8,19 +8,15 @@ machine:
- ceph osd pool create docker-distribution 1
post:
# Install many go versions
# - gvm install go1.3.3 -B --name=old
- gvm install go1.4.2 -B --name=stable
# - gvm install tip --name=bleed
# go
- gvm install go1.5 --prefer-binary --name=stable
environment:
# Convenient shortcuts to "common" locations
CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME
BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME
# Trick circle brainflat "no absolute path" behavior
BASE_OLD: ../../../$HOME/.gvm/pkgsets/old/global/$BASE_DIR
BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR
# BASE_BLEED: ../../../$HOME/.gvm/pkgsets/bleed/global/$BASE_DIR
DOCKER_BUILDTAGS: "include_rados include_oss include_gcs"
# Workaround Circle parsing dumb bugs and/or YAML wonkyness
CIRCLE_PAIN: "mode: set"
@ -34,37 +30,22 @@ machine:
dependencies:
pre:
# Copy the code to the gopath of all go versions
# - >
# gvm use old &&
# mkdir -p "$(dirname $BASE_OLD)" &&
# cp -R "$CHECKOUT" "$BASE_OLD"
- >
gvm use stable &&
mkdir -p "$(dirname $BASE_STABLE)" &&
cp -R "$CHECKOUT" "$BASE_STABLE"
# - >
# gvm use bleed &&
# mkdir -p "$(dirname $BASE_BLEED)" &&
# cp -R "$CHECKOUT" "$BASE_BLEED"
override:
# Install dependencies for every copied clone/go version
# - gvm use old && go get github.com/tools/godep:
# pwd: $BASE_OLD
- gvm use stable && go get github.com/tools/godep:
pwd: $BASE_STABLE
# - gvm use bleed && go get github.com/tools/godep:
# pwd: $BASE_BLEED
post:
# For the stable go version, additionally install linting tools
- >
gvm use stable &&
go get github.com/axw/gocov/gocov github.com/golang/lint/golint
# Disabling goveralls for now
# go get github.com/axw/gocov/gocov github.com/mattn/goveralls github.com/golang/lint/golint
@ -73,7 +54,6 @@ test:
# Output the go versions we are going to test
# - gvm use old && go version
- gvm use stable && go version
# - gvm use bleed && go version
# First thing: build everything. This will catch compile errors, and it's
# also necessary for go vet to work properly (see #807).
@ -84,7 +64,7 @@ test:
- gvm use stable && test -z "$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)":
pwd: $BASE_STABLE
# VET
# VET
- gvm use stable && go vet ./...:
pwd: $BASE_STABLE
@ -93,30 +73,22 @@ test:
pwd: $BASE_STABLE
override:
# Test every version we have (but stable)
# - gvm use old; godep go test -test.v -test.short ./...:
# timeout: 600
# pwd: $BASE_OLD
# - gvm use bleed; go test -test.v -test.short ./...:
# timeout: 600
# pwd: $BASE_BLEED
# Test stable, and report
# Preset the goverall report file
- echo "$CIRCLE_PAIN" > ~/goverage.report
- gvm use stable; go list ./... | xargs -L 1 -I{} rm -f $GOPATH/src/{}/coverage.out:
pwd: $BASE_STABLE
- gvm use stable; go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/{}/coverage.out {}:
timeout: 600
pwd: $BASE_STABLE
# - echo "$CIRCLE_PAIN" > ~/goverage.report
- gvm use stable; go list ./... | xargs -L 1 -I{} rm -f $GOPATH/src/{}/coverage.out:
pwd: $BASE_STABLE
- gvm use stable; go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/{}/coverage.out {}:
timeout: 600
pwd: $BASE_STABLE
post:
# Aggregate and report to coveralls
- gvm use stable; go list ./... | xargs -L 1 -I{} cat "$GOPATH/src/{}/coverage.out" | grep -v "$CIRCLE_PAIN" >> ~/goverage.report:
- gvm use stable; go list -tags "$DOCKER_BUILDTAGS" ./... | xargs -L 1 -I{} cat "$GOPATH/src/{}/coverage.out" | grep -v "$CIRCLE_PAIN" >> ~/goverage.report:
pwd: $BASE_STABLE
# - gvm use stable; goveralls -service circleci -coverprofile=/home/ubuntu/goverage.report -repotoken $COVERALLS_TOKEN:
# pwd: $BASE_STABLE
## Notes
# Disabled coveralls reporting: build breaking sending coverage data to coveralls

Просмотреть файл

@ -58,6 +58,9 @@ var (
// ErrDigestInvalidFormat returned when digest format invalid.
ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format")
// ErrDigestInvalidLength returned when digest has invalid length.
ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length")
// ErrDigestUnsupported returned when the digest algorithm is unsupported.
ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm")
)
@ -126,8 +129,11 @@ func (d Digest) Validate() error {
return ErrDigestInvalidFormat
}
switch Algorithm(s[:i]) {
switch algorithm := Algorithm(s[:i]); algorithm {
case SHA256, SHA384, SHA512:
if algorithm.Size()*2 != len(s[i+1:]) {
return ErrDigestInvalidLength
}
break
default:
return ErrDigestUnsupported

Просмотреть файл

@ -54,6 +54,15 @@ func (a Algorithm) String() string {
return string(a)
}
// Size returns number of bytes returned by the hash.
func (a Algorithm) Size() int {
h, ok := algorithms[a]
if !ok {
return 0
}
return h.Size()
}
// Set implemented to allow use of Algorithm as a command line flag.
func (a *Algorithm) Set(value string) error {
if value == "" {

Просмотреть файл

@ -89,3 +89,14 @@ type ErrManifestBlobUnknown struct {
func (err ErrManifestBlobUnknown) Error() string {
return fmt.Sprintf("unknown blob %v on manifest", err.Digest)
}
// ErrManifestNameInvalid should be used to denote an invalid manifest
// name. Reason may set, indicating the cause of invalidity.
type ErrManifestNameInvalid struct {
Name string
Reason error
}
func (err ErrManifestNameInvalid) Error() string {
return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason)
}

Просмотреть файл

@ -7,7 +7,7 @@
//
// // repository.go
// repository := hostname ['/' component]+
// hostname := hostcomponent [':' port-number]
// hostname := hostcomponent [':' port-number]
// component := subcomponent [separator subcomponent]*
// subcomponent := alpha-numeric ['-'* alpha-numeric]*
// hostcomponent := [hostpart '.']* hostpart
@ -24,7 +24,7 @@
// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
// digest-algorithm-separator := /[+.-_]/
// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
// digest-hex := /[0-9a-fA-F]{32,}/ ; Atleast 128 bit digest value
// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
package reference
import (

Просмотреть файл

@ -391,17 +391,18 @@ func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) {
}
func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) {
stat, err := bs.statter.Stat(ctx, dgst)
blobURL, err := bs.ub.BuildBlobURL(bs.name, dgst)
if err != nil {
return nil, err
}
blobURL, err := bs.ub.BuildBlobURL(bs.name, stat.Digest)
if err != nil {
return nil, err
}
return transport.NewHTTPReadSeeker(bs.client, blobURL, stat.Size), nil
return transport.NewHTTPReadSeeker(bs.client, blobURL,
func(resp *http.Response) error {
if resp.StatusCode == http.StatusNotFound {
return distribution.ErrBlobUnknown
}
return handleErrorResponse(resp)
}), nil
}
func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error {

Просмотреть файл

@ -2,11 +2,9 @@ package transport
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
)
@ -21,11 +19,11 @@ type ReadSeekCloser interface {
// request. When seeking and starting a read from a non-zero offset
// the a "Range" header will be added which sets the offset.
// TODO(dmcgowan): Move this into a separate utility package
func NewHTTPReadSeeker(client *http.Client, url string, size int64) ReadSeekCloser {
func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser {
return &httpReadSeeker{
client: client,
url: url,
size: size,
client: client,
url: url,
errorHandler: errorHandler,
}
}
@ -33,12 +31,26 @@ type httpReadSeeker struct {
client *http.Client
url string
// errorHandler creates an error from an unsuccessful HTTP response.
// This allows the error to be created with the HTTP response body
// without leaking the body through a returned error.
errorHandler func(*http.Response) error
size int64
rc io.ReadCloser // remote read closer
brd *bufio.Reader // internal buffered io
offset int64
err error
// rc is the remote read closer.
rc io.ReadCloser
// brd is a buffer for internal buffered io.
brd *bufio.Reader
// readerOffset tracks the offset as of the last read.
readerOffset int64
// seekOffset allows Seek to override the offset. Seek changes
// seekOffset instead of changing readOffset directly so that
// connection resets can be delayed and possibly avoided if the
// seek is undone (i.e. seeking to the end and then back to the
// beginning).
seekOffset int64
err error
}
func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
@ -46,16 +58,29 @@ func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
return 0, hrs.err
}
// If we seeked to a different position, we need to reset the
// connection. This logic is here instead of Seek so that if
// a seek is undone before the next read, the connection doesn't
// need to be closed and reopened. A common example of this is
// seeking to the end to determine the length, and then seeking
// back to the original position.
if hrs.readerOffset != hrs.seekOffset {
hrs.reset()
}
hrs.readerOffset = hrs.seekOffset
rd, err := hrs.reader()
if err != nil {
return 0, err
}
n, err = rd.Read(p)
hrs.offset += int64(n)
hrs.seekOffset += int64(n)
hrs.readerOffset += int64(n)
// Simulate io.EOF error if we reach filesize.
if err == nil && hrs.offset >= hrs.size {
if err == nil && hrs.size >= 0 && hrs.readerOffset >= hrs.size {
err = io.EOF
}
@ -67,13 +92,20 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
return 0, hrs.err
}
var err error
newOffset := hrs.offset
_, err := hrs.reader()
if err != nil {
return 0, err
}
newOffset := hrs.seekOffset
switch whence {
case os.SEEK_CUR:
newOffset += int64(offset)
case os.SEEK_END:
if hrs.size < 0 {
return 0, errors.New("content length not known")
}
newOffset = hrs.size + int64(offset)
case os.SEEK_SET:
newOffset = int64(offset)
@ -82,15 +114,10 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
if newOffset < 0 {
err = errors.New("cannot seek to negative position")
} else {
if hrs.offset != newOffset {
hrs.reset()
}
// No problems, set the offset.
hrs.offset = newOffset
hrs.seekOffset = newOffset
}
return hrs.offset, err
return hrs.seekOffset, err
}
func (hrs *httpReadSeeker) Close() error {
@ -130,17 +157,12 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) {
return hrs.brd, nil
}
// If the offset is great than or equal to size, return a empty, noop reader.
if hrs.offset >= hrs.size {
return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
}
req, err := http.NewRequest("GET", hrs.url, nil)
if err != nil {
return nil, err
}
if hrs.offset > 0 {
if hrs.readerOffset > 0 {
// TODO(stevvooe): Get this working correctly.
// If we are at different offset, issue a range request from there.
@ -158,8 +180,16 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) {
// import
if resp.StatusCode >= 200 && resp.StatusCode <= 399 {
hrs.rc = resp.Body
if resp.StatusCode == http.StatusOK {
hrs.size = resp.ContentLength
} else {
hrs.size = -1
}
} else {
defer resp.Body.Close()
if hrs.errorHandler != nil {
return nil, hrs.errorHandler(resp)
}
return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status)
}