Merge pull request #32677 from tonistiigi/builder-remote-context-4

Implement long running interactive session and sending build context incrementally
This commit is contained in:
Victor Vieux 2017-06-22 14:38:47 -07:00 коммит произвёл GitHub
Родитель f88626b270 8f68adfaf0
Коммит 050c1bb17b
112 изменённых файлов: 8684 добавлений и 246 удалений

Просмотреть файл

@ -4,11 +4,11 @@ import (
"fmt"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"github.com/docker/docker/builder"
"github.com/docker/docker/builder/dockerfile"
"github.com/docker/docker/builder/fscache"
"github.com/docker/docker/image"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/stringid"
"github.com/pkg/errors"
"golang.org/x/net/context"
@ -20,16 +20,21 @@ type ImageComponent interface {
TagImageWithReference(image.ID, string, reference.Named) error
}
// Builder defines interface for running a build
type Builder interface {
Build(context.Context, backend.BuildConfig) (*builder.Result, error)
}
// Backend provides build functionality to the API router
type Backend struct {
manager *dockerfile.BuildManager
builder Builder
fsCache *fscache.FSCache
imageComponent ImageComponent
}
// NewBackend creates a new build backend from components
func NewBackend(components ImageComponent, builderBackend builder.Backend, idMappings *idtools.IDMappings) *Backend {
manager := dockerfile.NewBuildManager(builderBackend, idMappings)
return &Backend{imageComponent: components, manager: manager}
func NewBackend(components ImageComponent, builder Builder, fsCache *fscache.FSCache) (*Backend, error) {
return &Backend{imageComponent: components, builder: builder, fsCache: fsCache}, nil
}
// Build builds an image from a Source
@ -40,7 +45,7 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string
return "", err
}
build, err := b.manager.Build(ctx, config)
build, err := b.builder.Build(ctx, config)
if err != nil {
return "", err
}
@ -58,6 +63,15 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string
return imageID, err
}
// PruneCache removes all cached build sources
func (b *Backend) PruneCache(ctx context.Context) (*types.BuildCachePruneReport, error) {
size, err := b.fsCache.Prune()
if err != nil {
return nil, errors.Wrap(err, "failed to prune build cache")
}
return &types.BuildCachePruneReport{SpaceReclaimed: size}, nil
}
func squashBuild(build *builder.Result, imageComponent ImageComponent) (string, error) {
var fromID string
if build.FromImage != nil {

Просмотреть файл

@ -1,6 +1,7 @@
package build
import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/backend"
"golang.org/x/net/context"
)
@ -10,6 +11,9 @@ type Backend interface {
// Build a Docker image returning the id of the image
// TODO: make this return a reference instead of string
Build(context.Context, backend.BuildConfig) (string, error)
// Prune build cache
PruneCache(context.Context) (*types.BuildCachePruneReport, error)
}
type experimentalProvider interface {

Просмотреть файл

@ -24,5 +24,6 @@ func (r *buildRouter) Routes() []router.Route {
func (r *buildRouter) initRoutes() {
r.routes = []router.Route{
router.NewPostRoute("/build", r.postBuild, router.WithCancel),
router.NewPostRoute("/build/prune", r.postPrune, router.WithCancel),
}
}

Просмотреть файл

@ -127,10 +127,19 @@ func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBui
}
options.CacheFrom = cacheFrom
}
options.SessionID = r.FormValue("session")
return options, nil
}
func (br *buildRouter) postPrune(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
report, err := br.backend.PruneCache(ctx)
if err != nil {
return err
}
return httputils.WriteJSON(w, http.StatusOK, report)
}
func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
var (
notVerboseBuffer = bytes.NewBuffer(nil)

Просмотреть файл

@ -0,0 +1,12 @@
package session
import (
"net/http"
"golang.org/x/net/context"
)
// Backend abstracts an session receiver from an http request.
type Backend interface {
HandleHTTPRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error
}

Просмотреть файл

@ -0,0 +1,29 @@
package session
import "github.com/docker/docker/api/server/router"
// sessionRouter is a router to talk with the session controller
type sessionRouter struct {
backend Backend
routes []router.Route
}
// NewRouter initializes a new session router
func NewRouter(b Backend) router.Router {
r := &sessionRouter{
backend: b,
}
r.initRoutes()
return r
}
// Routes returns the available routers to the session controller
func (r *sessionRouter) Routes() []router.Route {
return r.routes
}
func (r *sessionRouter) initRoutes() {
r.routes = []router.Route{
router.Experimental(router.NewPostRoute("/session", r.startSession)),
}
}

Просмотреть файл

@ -0,0 +1,16 @@
package session
import (
"net/http"
apierrors "github.com/docker/docker/api/errors"
"golang.org/x/net/context"
)
func (sr *sessionRouter) startSession(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
err := sr.backend.HandleHTTPRequest(ctx, w, r)
if err != nil {
return apierrors.NewBadRequestError(err)
}
return nil
}

Просмотреть файл

@ -2,6 +2,7 @@ package system
import (
"github.com/docker/docker/api/server/router"
"github.com/docker/docker/builder/fscache"
"github.com/docker/docker/daemon/cluster"
)
@ -11,13 +12,15 @@ type systemRouter struct {
backend Backend
cluster *cluster.Cluster
routes []router.Route
builder *fscache.FSCache
}
// NewRouter initializes a new system router
func NewRouter(b Backend, c *cluster.Cluster) router.Router {
func NewRouter(b Backend, c *cluster.Cluster, fscache *fscache.FSCache) router.Router {
r := &systemRouter{
backend: b,
cluster: c,
builder: fscache,
}
r.routes = []router.Route{

Просмотреть файл

@ -17,6 +17,7 @@ import (
timetypes "github.com/docker/docker/api/types/time"
"github.com/docker/docker/api/types/versions"
"github.com/docker/docker/pkg/ioutils"
pkgerrors "github.com/pkg/errors"
"golang.org/x/net/context"
)
@ -75,6 +76,11 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter,
if err != nil {
return err
}
builderSize, err := s.builder.DiskUsage()
if err != nil {
return pkgerrors.Wrap(err, "error getting build cache usage")
}
du.BuilderSize = builderSize
return httputils.WriteJSON(w, http.StatusOK, du)
}

Просмотреть файл

@ -4745,6 +4745,27 @@ paths:
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["Image"]
/build/prune:
post:
summary: "Delete builder cache"
produces:
- "application/json"
operationId: "BuildPrune"
responses:
200:
description: "No error"
schema:
type: "object"
properties:
SpaceReclaimed:
description: "Disk space reclaimed in bytes"
type: "integer"
format: "int64"
500:
description: "Server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["Image"]
/images/create:
post:
summary: "Create an image"
@ -8481,3 +8502,43 @@ paths:
type: "string"
required: true
tags: ["Distribution"]
/session:
post:
summary: "Initialize interactive session"
description: |
Start a new interactive session with a server. Session allows server to call back to the client for advanced capabilities.
### Hijacking
This endpoint hijacks the HTTP connection to HTTP2 transport that allows the client to expose gPRC services on that connection.
For example, the client sends this request to upgrade the connection:
```
POST /session HTTP/1.1
Upgrade: h2c
Connection: Upgrade
```
The Docker daemon will respond with a `101 UPGRADED` response follow with the raw stream:
```
HTTP/1.1 101 UPGRADED
Connection: Upgrade
Upgrade: h2c
```
operationId: "Session"
produces:
- "application/vnd.docker.raw-stream"
responses:
101:
description: "no error, hijacking successful"
400:
description: "bad parameter"
schema:
$ref: "#/definitions/ErrorResponse"
500:
description: "server error"
schema:
$ref: "#/definitions/ErrorResponse"
tags: ["Session"]

Просмотреть файл

@ -7,7 +7,7 @@ import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/go-units"
units "github.com/docker/go-units"
)
// CheckpointCreateOptions holds parameters to create a checkpoint from a container
@ -178,6 +178,7 @@ type ImageBuildOptions struct {
SecurityOpt []string
ExtraHosts []string // List of extra hosts
Target string
SessionID string
// TODO @jhowardmsft LCOW Support: This will require extending to include
// `Platform string`, but is ommited for now as it's hard-coded temporarily

Просмотреть файл

@ -489,10 +489,11 @@ type Runtime struct {
// DiskUsage contains response of Engine API:
// GET "/system/df"
type DiskUsage struct {
LayersSize int64
Images []*ImageSummary
Containers []*Container
Volumes []*Volume
LayersSize int64
Images []*ImageSummary
Containers []*Container
Volumes []*Volume
BuilderSize int64
}
// ContainersPruneReport contains the response for Engine API:
@ -516,6 +517,12 @@ type ImagesPruneReport struct {
SpaceReclaimed uint64
}
// BuildCachePruneReport contains the response for Engine API:
// POST "/build/prune"
type BuildCachePruneReport struct {
SpaceReclaimed uint64
}
// NetworksPruneReport contains the response for Engine API:
// POST "/networks/prune"
type NetworksPruneReport struct {

Просмотреть файл

@ -7,6 +7,7 @@ import (
"io/ioutil"
"runtime"
"strings"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/api/types"
@ -15,7 +16,9 @@ import (
"github.com/docker/docker/builder"
"github.com/docker/docker/builder/dockerfile/command"
"github.com/docker/docker/builder/dockerfile/parser"
"github.com/docker/docker/builder/fscache"
"github.com/docker/docker/builder/remotecontext"
"github.com/docker/docker/client/session"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/idtools"
@ -40,20 +43,33 @@ var validCommitCommands = map[string]bool{
"workdir": true,
}
// SessionGetter is object used to get access to a session by uuid
type SessionGetter interface {
Get(ctx context.Context, uuid string) (session.Caller, error)
}
// BuildManager is shared across all Builder objects
type BuildManager struct {
archiver *archive.Archiver
backend builder.Backend
pathCache pathCache // TODO: make this persistent
sg SessionGetter
fsCache *fscache.FSCache
}
// NewBuildManager creates a BuildManager
func NewBuildManager(b builder.Backend, idMappings *idtools.IDMappings) *BuildManager {
return &BuildManager{
func NewBuildManager(b builder.Backend, sg SessionGetter, fsCache *fscache.FSCache, idMappings *idtools.IDMappings) (*BuildManager, error) {
bm := &BuildManager{
backend: b,
pathCache: &syncmap.Map{},
sg: sg,
archiver: chrootarchive.NewArchiver(idMappings),
fsCache: fsCache,
}
if err := fsCache.RegisterTransport(remotecontext.ClientSessionRemote, NewClientSessionTransport()); err != nil {
return nil, err
}
return bm, nil
}
// Build starts a new build from a BuildConfig
@ -67,13 +83,13 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (
if err != nil {
return nil, err
}
if source != nil {
defer func() {
defer func() {
if source != nil {
if err := source.Close(); err != nil {
logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err)
}
}()
}
}
}()
// TODO @jhowardmsft LCOW support - this will require rework to allow both linux and Windows simultaneously.
// This is an interim solution to hardcode to linux if LCOW is turned on.
@ -84,6 +100,15 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (
}
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
if src, err := bm.initializeClientSession(ctx, cancel, config.Options); err != nil {
return nil, err
} else if src != nil {
source = src
}
builderOptions := builderOptions{
Options: config.Options,
ProgressWriter: config.ProgressWriter,
@ -96,6 +121,40 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) (
return newBuilder(ctx, builderOptions).build(source, dockerfile)
}
func (bm *BuildManager) initializeClientSession(ctx context.Context, cancel func(), options *types.ImageBuildOptions) (builder.Source, error) {
if options.SessionID == "" || bm.sg == nil {
return nil, nil
}
logrus.Debug("client is session enabled")
ctx, cancelCtx := context.WithTimeout(ctx, sessionConnectTimeout)
defer cancelCtx()
c, err := bm.sg.Get(ctx, options.SessionID)
if err != nil {
return nil, err
}
go func() {
<-c.Context().Done()
cancel()
}()
if options.RemoteContext == remotecontext.ClientSessionRemote {
st := time.Now()
csi, err := NewClientSessionSourceIdentifier(ctx, bm.sg,
options.SessionID, []string{"/"})
if err != nil {
return nil, err
}
src, err := bm.fsCache.SyncFrom(ctx, csi)
if err != nil {
return nil, err
}
logrus.Debugf("sync-time: %v", time.Since(st))
return src, nil
}
return nil, nil
}
// builderOptions are the dependencies required by the builder
type builderOptions struct {
Options *types.ImageBuildOptions

Просмотреть файл

@ -0,0 +1,78 @@
package dockerfile
import (
"time"
"github.com/docker/docker/builder/fscache"
"github.com/docker/docker/builder/remotecontext"
"github.com/docker/docker/client/session"
"github.com/docker/docker/client/session/filesync"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
const sessionConnectTimeout = 5 * time.Second
// ClientSessionTransport is a transport for copying files from docker client
// to the daemon.
type ClientSessionTransport struct{}
// NewClientSessionTransport returns new ClientSessionTransport instance
func NewClientSessionTransport() *ClientSessionTransport {
return &ClientSessionTransport{}
}
// Copy data from a remote to a destination directory.
func (cst *ClientSessionTransport) Copy(ctx context.Context, id fscache.RemoteIdentifier, dest string, cu filesync.CacheUpdater) error {
csi, ok := id.(*ClientSessionSourceIdentifier)
if !ok {
return errors.New("invalid identifier for client session")
}
return filesync.FSSync(ctx, csi.caller, filesync.FSSendRequestOpt{
SrcPaths: csi.srcPaths,
DestDir: dest,
CacheUpdater: cu,
})
}
// ClientSessionSourceIdentifier is an identifier that can be used for requesting
// files from remote client
type ClientSessionSourceIdentifier struct {
srcPaths []string
caller session.Caller
sharedKey string
uuid string
}
// NewClientSessionSourceIdentifier returns new ClientSessionSourceIdentifier instance
func NewClientSessionSourceIdentifier(ctx context.Context, sg SessionGetter, uuid string, sources []string) (*ClientSessionSourceIdentifier, error) {
csi := &ClientSessionSourceIdentifier{
uuid: uuid,
srcPaths: sources,
}
caller, err := sg.Get(ctx, uuid)
if err != nil {
return nil, errors.Wrapf(err, "failed to get session for %s", uuid)
}
csi.caller = caller
return csi, nil
}
// Transport returns transport identifier for remote identifier
func (csi *ClientSessionSourceIdentifier) Transport() string {
return remotecontext.ClientSessionRemote
}
// SharedKey returns shared key for remote identifier. Shared key is used
// for finding the base for a repeated transfer.
func (csi *ClientSessionSourceIdentifier) SharedKey() string {
return csi.caller.SharedKey()
}
// Key returns unique key for remote identifier. Requests with same key return
// same data.
func (csi *ClientSessionSourceIdentifier) Key() string {
return csi.uuid
}

Просмотреть файл

@ -158,7 +158,7 @@ func executeTestCase(t *testing.T, testCase dispatchTestCase) {
}
}()
context, err := remotecontext.MakeTarSumContext(tarStream)
context, err := remotecontext.FromArchive(tarStream)
if err != nil {
t.Fatalf("Error when creating tar context: %s", err)

602
builder/fscache/fscache.go Normal file
Просмотреть файл

@ -0,0 +1,602 @@
package fscache
import (
"encoding/json"
"os"
"path/filepath"
"sort"
"sync"
"time"
"github.com/Sirupsen/logrus"
"github.com/boltdb/bolt"
"github.com/docker/docker/builder"
"github.com/docker/docker/builder/remotecontext"
"github.com/docker/docker/client/session/filesync"
"github.com/docker/docker/pkg/directory"
"github.com/docker/docker/pkg/stringid"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil"
"golang.org/x/net/context"
"golang.org/x/sync/singleflight"
)
const dbFile = "fscache.db"
const cacheKey = "cache"
const metaKey = "meta"
// Backend is a backing implementation for FSCache
type Backend interface {
Get(id string) (string, error)
Remove(id string) error
}
// FSCache allows syncing remote resources to cached snapshots
type FSCache struct {
opt Opt
transports map[string]Transport
mu sync.Mutex
g singleflight.Group
store *fsCacheStore
}
// Opt defines options for initializing FSCache
type Opt struct {
Backend Backend
Root string // for storing local metadata
GCPolicy GCPolicy
}
// GCPolicy defines policy for garbage collection
type GCPolicy struct {
MaxSize uint64
MaxKeepDuration time.Duration
}
// NewFSCache returns new FSCache object
func NewFSCache(opt Opt) (*FSCache, error) {
store, err := newFSCacheStore(opt)
if err != nil {
return nil, err
}
return &FSCache{
store: store,
opt: opt,
transports: make(map[string]Transport),
}, nil
}
// Transport defines a method for syncing remote data to FSCache
type Transport interface {
Copy(ctx context.Context, id RemoteIdentifier, dest string, cs filesync.CacheUpdater) error
}
// RemoteIdentifier identifies a transfer request
type RemoteIdentifier interface {
Key() string
SharedKey() string
Transport() string
}
// RegisterTransport registers a new transport method
func (fsc *FSCache) RegisterTransport(id string, transport Transport) error {
fsc.mu.Lock()
defer fsc.mu.Unlock()
if _, ok := fsc.transports[id]; ok {
return errors.Errorf("transport %v already exists", id)
}
fsc.transports[id] = transport
return nil
}
// SyncFrom returns a source based on a remote identifier
func (fsc *FSCache) SyncFrom(ctx context.Context, id RemoteIdentifier) (builder.Source, error) { // cacheOpt
trasportID := id.Transport()
fsc.mu.Lock()
transport, ok := fsc.transports[id.Transport()]
if !ok {
fsc.mu.Unlock()
return nil, errors.Errorf("invalid transport %s", trasportID)
}
logrus.Debugf("SyncFrom %s %s", id.Key(), id.SharedKey())
fsc.mu.Unlock()
sourceRef, err, _ := fsc.g.Do(id.Key(), func() (interface{}, error) {
var sourceRef *cachedSourceRef
sourceRef, err := fsc.store.Get(id.Key())
if err == nil {
return sourceRef, nil
}
// check for unused shared cache
sharedKey := id.SharedKey()
if sharedKey != "" {
r, err := fsc.store.Rebase(sharedKey, id.Key())
if err == nil {
sourceRef = r
}
}
if sourceRef == nil {
var err error
sourceRef, err = fsc.store.New(id.Key(), sharedKey)
if err != nil {
return nil, errors.Wrap(err, "failed to create remote context")
}
}
if err := syncFrom(ctx, sourceRef, transport, id); err != nil {
sourceRef.Release()
return nil, err
}
if err := sourceRef.resetSize(-1); err != nil {
return nil, err
}
return sourceRef, nil
})
if err != nil {
return nil, err
}
ref := sourceRef.(*cachedSourceRef)
if ref.src == nil { // failsafe
return nil, errors.Errorf("invalid empty pull")
}
wc := &wrappedContext{Source: ref.src, closer: func() error {
ref.Release()
return nil
}}
return wc, nil
}
// DiskUsage reports how much data is allocated by the cache
func (fsc *FSCache) DiskUsage() (int64, error) {
return fsc.store.DiskUsage()
}
// Prune allows manually cleaning up the cache
func (fsc *FSCache) Prune() (uint64, error) {
return fsc.store.Prune()
}
// Close stops the gc and closes the persistent db
func (fsc *FSCache) Close() error {
return fsc.store.Close()
}
func syncFrom(ctx context.Context, cs *cachedSourceRef, transport Transport, id RemoteIdentifier) (retErr error) {
src := cs.src
if src == nil {
src = remotecontext.NewCachableSource(cs.Dir())
}
if !cs.cached {
if err := cs.storage.db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(id.Key()))
dt := b.Get([]byte(cacheKey))
if dt != nil {
if err := src.UnmarshalBinary(dt); err != nil {
return err
}
} else {
return errors.Wrap(src.Scan(), "failed to scan cache records")
}
return nil
}); err != nil {
return err
}
}
dc := &detectChanges{f: src.HandleChange}
// todo: probably send a bucket to `Copy` and let it return source
// but need to make sure that tx is safe
if err := transport.Copy(ctx, id, cs.Dir(), dc); err != nil {
return errors.Wrapf(err, "failed to copy to %s", cs.Dir())
}
if !dc.supported {
if err := src.Scan(); err != nil {
return errors.Wrap(err, "failed to scan cache records after transfer")
}
}
cs.cached = true
cs.src = src
return cs.storage.db.Update(func(tx *bolt.Tx) error {
dt, err := src.MarshalBinary()
if err != nil {
return err
}
b := tx.Bucket([]byte(id.Key()))
return b.Put([]byte(cacheKey), dt)
})
}
type fsCacheStore struct {
root string
mu sync.Mutex
sources map[string]*cachedSource
db *bolt.DB
fs Backend
gcTimer *time.Timer
gcPolicy GCPolicy
}
// CachePolicy defines policy for keeping a resource in cache
type CachePolicy struct {
Priority int
LastUsed time.Time
}
func defaultCachePolicy() CachePolicy {
return CachePolicy{Priority: 10, LastUsed: time.Now()}
}
func newFSCacheStore(opt Opt) (*fsCacheStore, error) {
if err := os.MkdirAll(opt.Root, 0700); err != nil {
return nil, err
}
p := filepath.Join(opt.Root, dbFile)
db, err := bolt.Open(p, 0600, nil)
if err != nil {
return nil, errors.Wrap(err, "failed to open database file %s")
}
s := &fsCacheStore{db: db, sources: make(map[string]*cachedSource), fs: opt.Backend, gcPolicy: opt.GCPolicy}
db.View(func(tx *bolt.Tx) error {
return tx.ForEach(func(name []byte, b *bolt.Bucket) error {
dt := b.Get([]byte(metaKey))
if dt == nil {
return nil
}
var sm sourceMeta
if err := json.Unmarshal(dt, &sm); err != nil {
return err
}
dir, err := s.fs.Get(sm.BackendID)
if err != nil {
return err // TODO: handle gracefully
}
source := &cachedSource{
refs: make(map[*cachedSourceRef]struct{}),
id: string(name),
dir: dir,
sourceMeta: sm,
storage: s,
}
s.sources[string(name)] = source
return nil
})
})
s.gcTimer = s.startPeriodicGC(5 * time.Minute)
return s, nil
}
func (s *fsCacheStore) startPeriodicGC(interval time.Duration) *time.Timer {
var t *time.Timer
t = time.AfterFunc(interval, func() {
if err := s.GC(); err != nil {
logrus.Errorf("build gc error: %v", err)
}
t.Reset(interval)
})
return t
}
func (s *fsCacheStore) Close() error {
s.gcTimer.Stop()
return s.db.Close()
}
func (s *fsCacheStore) New(id, sharedKey string) (*cachedSourceRef, error) {
s.mu.Lock()
defer s.mu.Unlock()
var ret *cachedSource
if err := s.db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte(id))
if err != nil {
return err
}
backendID := stringid.GenerateRandomID()
dir, err := s.fs.Get(backendID)
if err != nil {
return err
}
source := &cachedSource{
refs: make(map[*cachedSourceRef]struct{}),
id: id,
dir: dir,
sourceMeta: sourceMeta{
BackendID: backendID,
SharedKey: sharedKey,
CachePolicy: defaultCachePolicy(),
},
storage: s,
}
dt, err := json.Marshal(source.sourceMeta)
if err != nil {
return err
}
if err := b.Put([]byte(metaKey), dt); err != nil {
return err
}
s.sources[id] = source
ret = source
return nil
}); err != nil {
return nil, err
}
return ret.getRef(), nil
}
func (s *fsCacheStore) Rebase(sharedKey, newid string) (*cachedSourceRef, error) {
s.mu.Lock()
defer s.mu.Unlock()
var ret *cachedSource
for id, snap := range s.sources {
if snap.SharedKey == sharedKey && len(snap.refs) == 0 {
if err := s.db.Update(func(tx *bolt.Tx) error {
if err := tx.DeleteBucket([]byte(id)); err != nil {
return err
}
b, err := tx.CreateBucket([]byte(newid))
if err != nil {
return err
}
snap.id = newid
snap.CachePolicy = defaultCachePolicy()
dt, err := json.Marshal(snap.sourceMeta)
if err != nil {
return err
}
if err := b.Put([]byte(metaKey), dt); err != nil {
return err
}
delete(s.sources, id)
s.sources[newid] = snap
return nil
}); err != nil {
return nil, err
}
ret = snap
break
}
}
if ret == nil {
return nil, errors.Errorf("no candidate for rebase")
}
return ret.getRef(), nil
}
func (s *fsCacheStore) Get(id string) (*cachedSourceRef, error) {
s.mu.Lock()
defer s.mu.Unlock()
src, ok := s.sources[id]
if !ok {
return nil, errors.Errorf("not found")
}
return src.getRef(), nil
}
// DiskUsage reports how much data is allocated by the cache
func (s *fsCacheStore) DiskUsage() (int64, error) {
s.mu.Lock()
defer s.mu.Unlock()
var size int64
for _, snap := range s.sources {
if len(snap.refs) == 0 {
ss, err := snap.getSize()
if err != nil {
return 0, err
}
size += ss
}
}
return size, nil
}
// Prune allows manually cleaning up the cache
func (s *fsCacheStore) Prune() (uint64, error) {
s.mu.Lock()
defer s.mu.Unlock()
var size uint64
for id, snap := range s.sources {
if len(snap.refs) == 0 {
ss, err := snap.getSize()
if err != nil {
return size, err
}
if err := s.delete(id); err != nil {
return size, errors.Wrapf(err, "failed to delete %s", id)
}
size += uint64(ss)
}
}
return size, nil
}
// GC runs a garbage collector on FSCache
func (s *fsCacheStore) GC() error {
s.mu.Lock()
defer s.mu.Unlock()
var size uint64
cutoff := time.Now().Add(-s.gcPolicy.MaxKeepDuration)
var blacklist []*cachedSource
for id, snap := range s.sources {
if len(snap.refs) == 0 {
if cutoff.After(snap.CachePolicy.LastUsed) {
if err := s.delete(id); err != nil {
return errors.Wrapf(err, "failed to delete %s", id)
}
} else {
ss, err := snap.getSize()
if err != nil {
return err
}
size += uint64(ss)
blacklist = append(blacklist, snap)
}
}
}
sort.Sort(sortableCacheSources(blacklist))
for _, snap := range blacklist {
if size <= s.gcPolicy.MaxSize {
break
}
ss, err := snap.getSize()
if err != nil {
return err
}
if err := s.delete(snap.id); err != nil {
return errors.Wrapf(err, "failed to delete %s", snap.id)
}
size -= uint64(ss)
}
return nil
}
// keep mu while calling this
func (s *fsCacheStore) delete(id string) error {
src, ok := s.sources[id]
if !ok {
return nil
}
if len(src.refs) > 0 {
return errors.Errorf("can't delete %s because it has active references", id)
}
delete(s.sources, id)
if err := s.db.Update(func(tx *bolt.Tx) error {
return tx.DeleteBucket([]byte(id))
}); err != nil {
return err
}
if err := s.fs.Remove(src.BackendID); err != nil {
return err
}
return nil
}
type sourceMeta struct {
SharedKey string
BackendID string
CachePolicy CachePolicy
Size int64
}
type cachedSource struct {
sourceMeta
refs map[*cachedSourceRef]struct{}
id string
dir string
src *remotecontext.CachableSource
storage *fsCacheStore
cached bool // keep track if cache is up to date
}
type cachedSourceRef struct {
*cachedSource
}
func (cs *cachedSource) Dir() string {
return cs.dir
}
// hold storage lock before calling
func (cs *cachedSource) getRef() *cachedSourceRef {
ref := &cachedSourceRef{cachedSource: cs}
cs.refs[ref] = struct{}{}
return ref
}
// hold storage lock before calling
func (cs *cachedSource) getSize() (int64, error) {
if cs.sourceMeta.Size < 0 {
ss, err := directory.Size(cs.dir)
if err != nil {
return 0, err
}
if err := cs.resetSize(ss); err != nil {
return 0, err
}
return ss, nil
}
return cs.sourceMeta.Size, nil
}
func (cs *cachedSource) resetSize(val int64) error {
cs.sourceMeta.Size = val
return cs.saveMeta()
}
func (cs *cachedSource) saveMeta() error {
return cs.storage.db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte(cs.id))
dt, err := json.Marshal(cs.sourceMeta)
if err != nil {
return err
}
return b.Put([]byte(metaKey), dt)
})
}
func (csr *cachedSourceRef) Release() error {
csr.cachedSource.storage.mu.Lock()
defer csr.cachedSource.storage.mu.Unlock()
delete(csr.cachedSource.refs, csr)
if len(csr.cachedSource.refs) == 0 {
go csr.cachedSource.storage.GC()
}
return nil
}
type detectChanges struct {
f fsutil.ChangeFunc
supported bool
}
func (dc *detectChanges) HandleChange(kind fsutil.ChangeKind, path string, fi os.FileInfo, err error) error {
if dc == nil {
return nil
}
return dc.f(kind, path, fi, err)
}
func (dc *detectChanges) MarkSupported(v bool) {
if dc == nil {
return
}
dc.supported = v
}
type wrappedContext struct {
builder.Source
closer func() error
}
func (wc *wrappedContext) Close() error {
if err := wc.Source.Close(); err != nil {
return err
}
return wc.closer()
}
type sortableCacheSources []*cachedSource
// Len is the number of elements in the collection.
func (s sortableCacheSources) Len() int {
return len(s)
}
// Less reports whether the element with
// index i should sort before the element with index j.
func (s sortableCacheSources) Less(i, j int) bool {
return s[i].CachePolicy.LastUsed.Before(s[j].CachePolicy.LastUsed)
}
// Swap swaps the elements with indexes i and j.
func (s sortableCacheSources) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}

Просмотреть файл

@ -0,0 +1,131 @@
package fscache
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/docker/docker/client/session/filesync"
"github.com/stretchr/testify/assert"
"golang.org/x/net/context"
)
func TestFSCache(t *testing.T) {
tmpDir, err := ioutil.TempDir("", "fscache")
assert.Nil(t, err)
defer os.RemoveAll(tmpDir)
backend := NewNaiveCacheBackend(filepath.Join(tmpDir, "backend"))
opt := Opt{
Root: tmpDir,
Backend: backend,
GCPolicy: GCPolicy{MaxSize: 15, MaxKeepDuration: time.Hour},
}
fscache, err := NewFSCache(opt)
assert.Nil(t, err)
defer fscache.Close()
err = fscache.RegisterTransport("test", &testTransport{})
assert.Nil(t, err)
src1, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data", "bar"})
assert.Nil(t, err)
dt, err := ioutil.ReadFile(filepath.Join(src1.Root(), "foo"))
assert.Nil(t, err)
assert.Equal(t, string(dt), "data")
// same id doesn't recalculate anything
src2, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data2", "bar"})
assert.Nil(t, err)
assert.Equal(t, src1.Root(), src2.Root())
dt, err = ioutil.ReadFile(filepath.Join(src1.Root(), "foo"))
assert.Nil(t, err)
assert.Equal(t, string(dt), "data")
assert.Nil(t, src2.Close())
src3, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo2", "data2", "bar"})
assert.Nil(t, err)
assert.NotEqual(t, src1.Root(), src3.Root())
dt, err = ioutil.ReadFile(filepath.Join(src3.Root(), "foo2"))
assert.Nil(t, err)
assert.Equal(t, string(dt), "data2")
s, err := fscache.DiskUsage()
assert.Nil(t, err)
assert.Equal(t, s, int64(0))
assert.Nil(t, src3.Close())
s, err = fscache.DiskUsage()
assert.Nil(t, err)
assert.Equal(t, s, int64(5))
// new upload with the same shared key shoutl overwrite
src4, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo3", "data3", "bar"})
assert.Nil(t, err)
assert.NotEqual(t, src1.Root(), src3.Root())
dt, err = ioutil.ReadFile(filepath.Join(src3.Root(), "foo3"))
assert.Nil(t, err)
assert.Equal(t, string(dt), "data3")
assert.Equal(t, src4.Root(), src3.Root())
assert.Nil(t, src4.Close())
s, err = fscache.DiskUsage()
assert.Nil(t, err)
assert.Equal(t, s, int64(10))
// this one goes over the GC limit
src5, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo4", "datadata", "baz"})
assert.Nil(t, err)
assert.Nil(t, src5.Close())
// GC happens async
time.Sleep(100 * time.Millisecond)
// only last insertion after GC
s, err = fscache.DiskUsage()
assert.Nil(t, err)
assert.Equal(t, s, int64(8))
// prune deletes everything
released, err := fscache.Prune()
assert.Nil(t, err)
assert.Equal(t, released, uint64(8))
s, err = fscache.DiskUsage()
assert.Nil(t, err)
assert.Equal(t, s, int64(0))
}
type testTransport struct {
}
func (t *testTransport) Copy(ctx context.Context, id RemoteIdentifier, dest string, cs filesync.CacheUpdater) error {
testid := id.(*testIdentifier)
return ioutil.WriteFile(filepath.Join(dest, testid.filename), []byte(testid.data), 0600)
}
type testIdentifier struct {
filename string
data string
sharedKey string
}
func (t *testIdentifier) Key() string {
return t.filename
}
func (t *testIdentifier) SharedKey() string {
return t.sharedKey
}
func (t *testIdentifier) Transport() string {
return "test"
}

Просмотреть файл

@ -0,0 +1,28 @@
package fscache
import (
"os"
"path/filepath"
"github.com/pkg/errors"
)
// NewNaiveCacheBackend is a basic backend implementation for fscache
func NewNaiveCacheBackend(root string) Backend {
return &naiveCacheBackend{root: root}
}
type naiveCacheBackend struct {
root string
}
func (tcb *naiveCacheBackend) Get(id string) (string, error) {
d := filepath.Join(tcb.root, id)
if err := os.MkdirAll(d, 0700); err != nil {
return "", errors.Wrapf(err, "failed to create tmp dir for %s", d)
}
return d, nil
}
func (tcb *naiveCacheBackend) Remove(id string) error {
return errors.WithStack(os.RemoveAll(filepath.Join(tcb.root, id)))
}

Просмотреть файл

@ -0,0 +1,128 @@
package remotecontext
import (
"io"
"os"
"path/filepath"
"github.com/docker/docker/builder"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/pkg/tarsum"
"github.com/pkg/errors"
)
type archiveContext struct {
root string
sums tarsum.FileInfoSums
}
func (c *archiveContext) Close() error {
return os.RemoveAll(c.root)
}
func convertPathError(err error, cleanpath string) error {
if err, ok := err.(*os.PathError); ok {
err.Path = cleanpath
return err
}
return err
}
type modifiableContext interface {
builder.Source
// Remove deletes the entry specified by `path`.
// It is usual for directory entries to delete all its subentries.
Remove(path string) error
}
// FromArchive returns a build source from a tar stream.
//
// It extracts the tar stream to a temporary folder that is deleted as soon as
// the Context is closed.
// As the extraction happens, a tarsum is calculated for every file, and the set of
// all those sums then becomes the source of truth for all operations on this Context.
//
// Closing tarStream has to be done by the caller.
func FromArchive(tarStream io.Reader) (builder.Source, error) {
root, err := ioutils.TempDir("", "docker-builder")
if err != nil {
return nil, err
}
tsc := &archiveContext{root: root}
// Make sure we clean-up upon error. In the happy case the caller
// is expected to manage the clean-up
defer func() {
if err != nil {
tsc.Close()
}
}()
decompressedStream, err := archive.DecompressStream(tarStream)
if err != nil {
return nil, err
}
sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1)
if err != nil {
return nil, err
}
err = chrootarchive.Untar(sum, root, nil)
if err != nil {
return nil, err
}
tsc.sums = sum.GetSums()
return tsc, nil
}
func (c *archiveContext) Root() string {
return c.root
}
func (c *archiveContext) Remove(path string) error {
_, fullpath, err := normalize(path, c.root)
if err != nil {
return err
}
return os.RemoveAll(fullpath)
}
func (c *archiveContext) Hash(path string) (string, error) {
cleanpath, fullpath, err := normalize(path, c.root)
if err != nil {
return "", err
}
rel, err := filepath.Rel(c.root, fullpath)
if err != nil {
return "", convertPathError(err, cleanpath)
}
// Use the checksum of the followed path(not the possible symlink) because
// this is the file that is actually copied.
if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil {
return tsInfo.Sum(), nil
}
// We set sum to path by default for the case where GetFile returns nil.
// The usual case is if relative path is empty.
return path, nil // backwards compat TODO: see if really needed
}
func normalize(path, root string) (cleanPath, fullPath string, err error) {
cleanPath = filepath.Clean(string(os.PathSeparator) + path)[1:]
fullPath, err = symlink.FollowSymlinkInScope(filepath.Join(root, path), root)
if err != nil {
return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath)
}
if _, err := os.Lstat(fullPath); err != nil {
return "", "", errors.WithStack(convertPathError(err, path))
}
return
}

Просмотреть файл

@ -19,6 +19,9 @@ import (
"github.com/pkg/errors"
)
// ClientSessionRemote is identifier for client-session context transport
const ClientSessionRemote = "client-session"
// Detect returns a context and dockerfile from remote location or local
// archive. progressReader is only used if remoteURL is actually a URL
// (not empty, and not a Git endpoint).
@ -29,6 +32,12 @@ func Detect(config backend.BuildConfig) (remote builder.Source, dockerfile *pars
switch {
case remoteURL == "":
remote, dockerfile, err = newArchiveRemote(config.Source, dockerfilePath)
case remoteURL == ClientSessionRemote:
res, err := parser.Parse(config.Source)
if err != nil {
return nil, nil, err
}
return nil, res, nil
case urlutil.IsGitURL(remoteURL):
remote, dockerfile, err = newGitRemote(remoteURL, dockerfilePath)
case urlutil.IsURL(remoteURL):
@ -41,7 +50,7 @@ func Detect(config backend.BuildConfig) (remote builder.Source, dockerfile *pars
func newArchiveRemote(rc io.ReadCloser, dockerfilePath string) (builder.Source, *parser.Result, error) {
defer rc.Close()
c, err := MakeTarSumContext(rc)
c, err := FromArchive(rc)
if err != nil {
return nil, nil, err
}

Просмотреть файл

@ -12,10 +12,21 @@ import (
// NewFileHash returns new hash that is used for the builder cache keys
func NewFileHash(path, name string, fi os.FileInfo) (hash.Hash, error) {
hdr, err := archive.FileInfoHeader(path, name, fi)
var link string
if fi.Mode()&os.ModeSymlink != 0 {
var err error
link, err = os.Readlink(path)
if err != nil {
return nil, err
}
}
hdr, err := archive.FileInfoHeader(name, fi, link)
if err != nil {
return nil, err
}
if err := archive.ReadSecurityXattrToTarHeader(path, hdr); err != nil {
return nil, err
}
tsh := &tarsumHash{hdr: hdr, Hash: sha256.New()}
tsh.Reset() // initialize header
return tsh, nil

Просмотреть файл

@ -0,0 +1,3 @@
package remotecontext
//go:generate protoc --gogoslick_out=. tarsum.proto

Просмотреть файл

@ -25,5 +25,5 @@ func MakeGitContext(gitURL string) (builder.Source, error) {
c.Close()
os.RemoveAll(root)
}()
return MakeTarSumContext(c)
return FromArchive(c)
}

Просмотреть файл

@ -43,7 +43,7 @@ func (c *lazySource) Hash(path string) (string, error) {
fi, err := os.Lstat(fullPath)
if err != nil {
return "", err
return "", errors.WithStack(err)
}
relPath, err := Rel(c.root, fullPath)

Просмотреть файл

@ -28,7 +28,7 @@ var mimeRe = regexp.MustCompile(acceptableRemoteMIME)
//
// If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected
// to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not).
// In either case, an (assumed) tar stream is passed to MakeTarSumContext whose result is returned.
// In either case, an (assumed) tar stream is passed to FromArchive whose result is returned.
func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (builder.Source, error) {
f, err := GetWithStatusError(remoteURL)
if err != nil {
@ -63,7 +63,7 @@ func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.
// Pass through - this is a pre-packaged context, presumably
// with a Dockerfile with the right name inside it.
return MakeTarSumContext(contextReader)
return FromArchive(contextReader)
}
// GetWithStatusError does an http.Get() and returns an error if the

Просмотреть файл

@ -212,26 +212,13 @@ func TestMakeRemoteContext(t *testing.T) {
t.Fatal("Remote context should not be nil")
}
tarSumCtx, ok := remoteContext.(*tarSumContext)
if !ok {
t.Fatal("Cast error, remote context should be casted to tarSumContext")
h, err := remoteContext.Hash(builder.DefaultDockerfileName)
if err != nil {
t.Fatalf("failed to compute hash %s", err)
}
fileInfoSums := tarSumCtx.sums
if fileInfoSums.Len() != 1 {
t.Fatalf("Size of file info sums should be 1, got: %d", fileInfoSums.Len())
}
fileInfo := fileInfoSums.GetFile(builder.DefaultDockerfileName)
if fileInfo == nil {
t.Fatalf("There should be file named %s in fileInfoSums", builder.DefaultDockerfileName)
}
if fileInfo.Pos() != 0 {
t.Fatalf("File %s should have position 0, got %d", builder.DefaultDockerfileName, fileInfo.Pos())
if expected, actual := "7b6b6b66bee9e2102fbdc2228be6c980a2a23adf371962a37286a49f7de0f7cc", h; expected != actual {
t.Fatalf("There should be file named %s %s in fileInfoSums", expected, actual)
}
}

Просмотреть файл

@ -1,128 +1,174 @@
package remotecontext
import (
"io"
"fmt"
"os"
"path/filepath"
"sync"
"github.com/docker/docker/builder"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/symlink"
"github.com/docker/docker/pkg/tarsum"
iradix "github.com/hashicorp/go-immutable-radix"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil"
)
type tarSumContext struct {
type hashed interface {
Hash() string
}
// CachableSource is a source that contains cache records for its contents
type CachableSource struct {
mu sync.Mutex
root string
sums tarsum.FileInfoSums
tree *iradix.Tree
txn *iradix.Txn
}
func (c *tarSumContext) Close() error {
return os.RemoveAll(c.root)
// NewCachableSource creates new CachableSource
func NewCachableSource(root string) *CachableSource {
ts := &CachableSource{
tree: iradix.New(),
root: root,
}
return ts
}
func convertPathError(err error, cleanpath string) error {
if err, ok := err.(*os.PathError); ok {
err.Path = cleanpath
// MarshalBinary marshals current cache information to a byte array
func (cs *CachableSource) MarshalBinary() ([]byte, error) {
b := TarsumBackup{Hashes: make(map[string]string)}
root := cs.getRoot()
root.Walk(func(k []byte, v interface{}) bool {
b.Hashes[string(k)] = v.(*fileInfo).sum
return false
})
return b.Marshal()
}
// UnmarshalBinary decodes cache information for presented byte array
func (cs *CachableSource) UnmarshalBinary(data []byte) error {
var b TarsumBackup
if err := b.Unmarshal(data); err != nil {
return err
}
return err
}
type modifiableContext interface {
builder.Source
// Remove deletes the entry specified by `path`.
// It is usual for directory entries to delete all its subentries.
Remove(path string) error
}
// MakeTarSumContext returns a build Context from a tar stream.
//
// It extracts the tar stream to a temporary folder that is deleted as soon as
// the Context is closed.
// As the extraction happens, a tarsum is calculated for every file, and the set of
// all those sums then becomes the source of truth for all operations on this Context.
//
// Closing tarStream has to be done by the caller.
func MakeTarSumContext(tarStream io.Reader) (builder.Source, error) {
root, err := ioutils.TempDir("", "docker-builder")
if err != nil {
return nil, err
txn := iradix.New().Txn()
for p, v := range b.Hashes {
txn.Insert([]byte(p), &fileInfo{sum: v})
}
cs.mu.Lock()
defer cs.mu.Unlock()
cs.tree = txn.Commit()
return nil
}
tsc := &tarSumContext{root: root}
// Make sure we clean-up upon error. In the happy case the caller
// is expected to manage the clean-up
defer func() {
// Scan rescans the cache information from the file system
func (cs *CachableSource) Scan() error {
lc, err := NewLazySource(cs.root)
if err != nil {
return err
}
txn := iradix.New().Txn()
err = filepath.Walk(cs.root, func(path string, info os.FileInfo, err error) error {
if err != nil {
tsc.Close()
return errors.Wrapf(err, "failed to walk %s", path)
}
}()
decompressedStream, err := archive.DecompressStream(tarStream)
if err != nil {
return nil, err
}
sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1)
if err != nil {
return nil, err
}
err = chrootarchive.Untar(sum, root, nil)
if err != nil {
return nil, err
}
tsc.sums = sum.GetSums()
return tsc, nil
}
func (c *tarSumContext) Root() string {
return c.root
}
func (c *tarSumContext) Remove(path string) error {
_, fullpath, err := normalize(path, c.root)
rel, err := Rel(cs.root, path)
if err != nil {
return err
}
h, err := lc.Hash(rel)
if err != nil {
return err
}
txn.Insert([]byte(rel), &fileInfo{sum: h})
return nil
})
if err != nil {
return err
}
return os.RemoveAll(fullpath)
cs.mu.Lock()
defer cs.mu.Unlock()
cs.tree = txn.Commit()
return nil
}
func (c *tarSumContext) Hash(path string) (string, error) {
cleanpath, fullpath, err := normalize(path, c.root)
if err != nil {
return "", err
// HandleChange notifies the source about a modification operation
func (cs *CachableSource) HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) (retErr error) {
cs.mu.Lock()
if cs.txn == nil {
cs.txn = cs.tree.Txn()
}
if kind == fsutil.ChangeKindDelete {
cs.txn.Delete([]byte(p))
cs.mu.Unlock()
return
}
rel, err := filepath.Rel(c.root, fullpath)
if err != nil {
return "", convertPathError(err, cleanpath)
h, ok := fi.(hashed)
if !ok {
cs.mu.Unlock()
return errors.Errorf("invalid fileinfo: %s", p)
}
// Use the checksum of the followed path(not the possible symlink) because
// this is the file that is actually copied.
if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil {
return tsInfo.Sum(), nil
hfi := &fileInfo{
sum: h.Hash(),
}
// We set sum to path by default for the case where GetFile returns nil.
// The usual case is if relative path is empty.
return path, nil // backwards compat TODO: see if really needed
cs.txn.Insert([]byte(p), hfi)
cs.mu.Unlock()
return nil
}
func normalize(path, root string) (cleanPath, fullPath string, err error) {
cleanPath = filepath.Clean(string(os.PathSeparator) + path)[1:]
fullPath, err = symlink.FollowSymlinkInScope(filepath.Join(root, path), root)
if err != nil {
return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath)
func (cs *CachableSource) getRoot() *iradix.Node {
cs.mu.Lock()
if cs.txn != nil {
cs.tree = cs.txn.Commit()
cs.txn = nil
}
if _, err := os.Lstat(fullPath); err != nil {
t := cs.tree
cs.mu.Unlock()
return t.Root()
}
// Close closes the source
func (cs *CachableSource) Close() error {
return nil
}
func (cs *CachableSource) normalize(path string) (cleanpath, fullpath string, err error) {
cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:]
fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(cs.root, path), cs.root)
if err != nil {
return "", "", fmt.Errorf("Forbidden path outside the context: %s (%s)", path, fullpath)
}
_, err = os.Lstat(fullpath)
if err != nil {
return "", "", convertPathError(err, path)
}
return
}
// Hash returns a hash for a single file in the source
func (cs *CachableSource) Hash(path string) (string, error) {
n := cs.getRoot()
sum := ""
// TODO: check this for symlinks
v, ok := n.Get([]byte(path))
if !ok {
sum = path
} else {
sum = v.(*fileInfo).sum
}
return sum, nil
}
// Root returns a root directory for the source
func (cs *CachableSource) Root() string {
return cs.root
}
type fileInfo struct {
sum string
}
func (fi *fileInfo) Hash() string {
return fi.sum
}

Просмотреть файл

@ -0,0 +1,525 @@
// Code generated by protoc-gen-gogo.
// source: tarsum.proto
// DO NOT EDIT!
/*
Package remotecontext is a generated protocol buffer package.
It is generated from these files:
tarsum.proto
It has these top-level messages:
TarsumBackup
*/
package remotecontext
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import strings "strings"
import reflect "reflect"
import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type TarsumBackup struct {
Hashes map[string]string `protobuf:"bytes,1,rep,name=Hashes" json:"Hashes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (m *TarsumBackup) Reset() { *m = TarsumBackup{} }
func (*TarsumBackup) ProtoMessage() {}
func (*TarsumBackup) Descriptor() ([]byte, []int) { return fileDescriptorTarsum, []int{0} }
func (m *TarsumBackup) GetHashes() map[string]string {
if m != nil {
return m.Hashes
}
return nil
}
func init() {
proto.RegisterType((*TarsumBackup)(nil), "remotecontext.TarsumBackup")
}
func (this *TarsumBackup) Equal(that interface{}) bool {
if that == nil {
if this == nil {
return true
}
return false
}
that1, ok := that.(*TarsumBackup)
if !ok {
that2, ok := that.(TarsumBackup)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
if this == nil {
return true
}
return false
} else if this == nil {
return false
}
if len(this.Hashes) != len(that1.Hashes) {
return false
}
for i := range this.Hashes {
if this.Hashes[i] != that1.Hashes[i] {
return false
}
}
return true
}
func (this *TarsumBackup) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&remotecontext.TarsumBackup{")
keysForHashes := make([]string, 0, len(this.Hashes))
for k, _ := range this.Hashes {
keysForHashes = append(keysForHashes, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForHashes)
mapStringForHashes := "map[string]string{"
for _, k := range keysForHashes {
mapStringForHashes += fmt.Sprintf("%#v: %#v,", k, this.Hashes[k])
}
mapStringForHashes += "}"
if this.Hashes != nil {
s = append(s, "Hashes: "+mapStringForHashes+",\n")
}
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringTarsum(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
func (m *TarsumBackup) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *TarsumBackup) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Hashes) > 0 {
for k, _ := range m.Hashes {
dAtA[i] = 0xa
i++
v := m.Hashes[k]
mapSize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v)))
i = encodeVarintTarsum(dAtA, i, uint64(mapSize))
dAtA[i] = 0xa
i++
i = encodeVarintTarsum(dAtA, i, uint64(len(k)))
i += copy(dAtA[i:], k)
dAtA[i] = 0x12
i++
i = encodeVarintTarsum(dAtA, i, uint64(len(v)))
i += copy(dAtA[i:], v)
}
}
return i, nil
}
func encodeFixed64Tarsum(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Tarsum(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintTarsum(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *TarsumBackup) Size() (n int) {
var l int
_ = l
if len(m.Hashes) > 0 {
for k, v := range m.Hashes {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sovTarsum(uint64(len(k))) + 1 + len(v) + sovTarsum(uint64(len(v)))
n += mapEntrySize + 1 + sovTarsum(uint64(mapEntrySize))
}
}
return n
}
func sovTarsum(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozTarsum(x uint64) (n int) {
return sovTarsum(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *TarsumBackup) String() string {
if this == nil {
return "nil"
}
keysForHashes := make([]string, 0, len(this.Hashes))
for k, _ := range this.Hashes {
keysForHashes = append(keysForHashes, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForHashes)
mapStringForHashes := "map[string]string{"
for _, k := range keysForHashes {
mapStringForHashes += fmt.Sprintf("%v: %v,", k, this.Hashes[k])
}
mapStringForHashes += "}"
s := strings.Join([]string{`&TarsumBackup{`,
`Hashes:` + mapStringForHashes + `,`,
`}`,
}, "")
return s
}
func valueToStringTarsum(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *TarsumBackup) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTarsum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: TarsumBackup: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: TarsumBackup: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Hashes", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTarsum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthTarsum
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
var keykey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTarsum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
keykey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTarsum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthTarsum
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
if m.Hashes == nil {
m.Hashes = make(map[string]string)
}
if iNdEx < postIndex {
var valuekey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTarsum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
valuekey |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTarsum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLengthTarsum
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
m.Hashes[mapkey] = mapvalue
} else {
var mapvalue string
m.Hashes[mapkey] = mapvalue
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTarsum(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthTarsum
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipTarsum(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTarsum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTarsum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTarsum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthTarsum
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowTarsum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipTarsum(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthTarsum = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowTarsum = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("tarsum.proto", fileDescriptorTarsum) }
var fileDescriptorTarsum = []byte{
// 196 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x49, 0x2c, 0x2a,
0x2e, 0xcd, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x2d, 0x4a, 0xcd, 0xcd, 0x2f, 0x49,
0x4d, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0x51, 0xea, 0x62, 0xe4, 0xe2, 0x09, 0x01, 0xcb, 0x3b,
0x25, 0x26, 0x67, 0x97, 0x16, 0x08, 0xd9, 0x73, 0xb1, 0x79, 0x24, 0x16, 0x67, 0xa4, 0x16, 0x4b,
0x30, 0x2a, 0x30, 0x6b, 0x70, 0x1b, 0xa9, 0xeb, 0xa1, 0x68, 0xd0, 0x43, 0x56, 0xac, 0x07, 0x51,
0xe9, 0x9a, 0x57, 0x52, 0x54, 0x19, 0x04, 0xd5, 0x26, 0x65, 0xc9, 0xc5, 0x8d, 0x24, 0x2c, 0x24,
0xc0, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0x62, 0x0a, 0x89,
0x70, 0xb1, 0x96, 0x25, 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x81, 0xc5, 0x20, 0x1c, 0x2b, 0x26, 0x0b,
0x46, 0x27, 0x9d, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xb1,
0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7,
0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c, 0x1f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c,
0xc7, 0x90, 0xc4, 0x06, 0xf6, 0x90, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x89, 0x57, 0x7d, 0x3f,
0xe0, 0x00, 0x00, 0x00,
}

Просмотреть файл

@ -0,0 +1,7 @@
syntax = "proto3";
package remotecontext; // no namespace because only used internally
message TarsumBackup {
map<string, string> Hashes = 1;
}

Просмотреть файл

@ -9,6 +9,7 @@ import (
"github.com/docker/docker/builder"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/reexec"
"github.com/pkg/errors"
)
const (
@ -22,24 +23,22 @@ func init() {
func TestCloseRootDirectory(t *testing.T) {
contextDir, err := ioutil.TempDir("", "builder-tarsum-test")
defer os.RemoveAll(contextDir)
if err != nil {
t.Fatalf("Error with creating temporary directory: %s", err)
}
tarsum := &tarSumContext{root: contextDir}
err = tarsum.Close()
src := makeTestArchiveContext(t, contextDir)
err = src.Close()
if err != nil {
t.Fatalf("Error while executing Close: %s", err)
}
_, err = os.Stat(contextDir)
_, err = os.Stat(src.Root())
if !os.IsNotExist(err) {
t.Fatal("Directory should not exist at this point")
defer os.RemoveAll(contextDir)
}
}
@ -49,7 +48,7 @@ func TestHashFile(t *testing.T) {
createTestTempFile(t, contextDir, filename, contents, 0755)
tarSum := makeTestTarsumContext(t, contextDir)
tarSum := makeTestArchiveContext(t, contextDir)
sum, err := tarSum.Hash(filename)
@ -80,7 +79,7 @@ func TestHashSubdir(t *testing.T) {
testFilename := createTestTempFile(t, contextSubdir, filename, contents, 0755)
tarSum := makeTestTarsumContext(t, contextDir)
tarSum := makeTestArchiveContext(t, contextDir)
relativePath, err := filepath.Rel(contextDir, testFilename)
@ -109,11 +108,9 @@ func TestStatNotExisting(t *testing.T) {
contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test")
defer cleanup()
tarSum := &tarSumContext{root: contextDir}
_, err := tarSum.Hash("not-existing")
if !os.IsNotExist(err) {
src := makeTestArchiveContext(t, contextDir)
_, err := src.Hash("not-existing")
if !os.IsNotExist(errors.Cause(err)) {
t.Fatalf("This file should not exist: %s", err)
}
}
@ -130,30 +127,31 @@ func TestRemoveDirectory(t *testing.T) {
t.Fatalf("Error when getting relative path: %s", err)
}
tarSum := &tarSumContext{root: contextDir}
src := makeTestArchiveContext(t, contextDir)
tarSum := src.(modifiableContext)
err = tarSum.Remove(relativePath)
if err != nil {
t.Fatalf("Error when executing Remove: %s", err)
}
_, err = os.Stat(contextSubdir)
_, err = src.Hash(contextSubdir)
if !os.IsNotExist(err) {
if !os.IsNotExist(errors.Cause(err)) {
t.Fatal("Directory should not exist at this point")
}
}
func makeTestTarsumContext(t *testing.T, dir string) builder.Source {
func makeTestArchiveContext(t *testing.T, dir string) builder.Source {
tarStream, err := archive.Tar(dir, archive.Uncompressed)
if err != nil {
t.Fatalf("error: %s", err)
}
defer tarStream.Close()
tarSum, err := MakeTarSumContext(tarStream)
tarSum, err := FromArchive(tarStream)
if err != nil {
t.Fatalf("Error when executing MakeTarSumContext: %s", err)
t.Fatalf("Error when executing FromArchive: %s", err)
}
return tarSum
}

30
client/build_prune.go Normal file
Просмотреть файл

@ -0,0 +1,30 @@
package client
import (
"encoding/json"
"fmt"
"github.com/docker/docker/api/types"
"golang.org/x/net/context"
)
// BuildCachePrune requests the daemon to delete unused cache data
func (cli *Client) BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error) {
if err := cli.NewVersionError("1.31", "build prune"); err != nil {
return nil, err
}
report := types.BuildCachePruneReport{}
serverResp, err := cli.post(ctx, "/build/prune", nil, nil, nil)
if err != nil {
return nil, err
}
defer ensureReaderClosed(serverResp)
if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil {
return nil, fmt.Errorf("Error retrieving disk usage: %v", err)
}
return &report, nil
}

Просмотреть файл

@ -1,11 +1,9 @@
package client
import (
"bytes"
"bufio"
"crypto/tls"
"errors"
"fmt"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
@ -16,6 +14,7 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/tlsconfig"
"github.com/docker/go-connections/sockets"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
@ -48,49 +47,12 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu
}
req = cli.addHeaders(req, headers)
req.Host = cli.addr
req.Header.Set("Connection", "Upgrade")
req.Header.Set("Upgrade", "tcp")
conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport))
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?")
}
return types.HijackedResponse{}, err
}
// When we set up a TCP connection for hijack, there could be long periods
// of inactivity (a long running command with no output) that in certain
// network setups may cause ECONNTIMEOUT, leaving the client in an unknown
// state. Setting TCP KeepAlive on the socket connection will prohibit
// ECONNTIMEOUT unless the socket connection truly is broken
if tcpConn, ok := conn.(*net.TCPConn); ok {
tcpConn.SetKeepAlive(true)
tcpConn.SetKeepAlivePeriod(30 * time.Second)
}
clientconn := httputil.NewClientConn(conn, nil)
defer clientconn.Close()
// Server hijacks the connection, error 'connection closed' expected
resp, err := clientconn.Do(req)
conn, err := cli.setupHijackConn(req, "tcp")
if err != nil {
return types.HijackedResponse{}, err
}
defer resp.Body.Close()
switch resp.StatusCode {
case http.StatusOK, http.StatusSwitchingProtocols:
rwc, br := clientconn.Hijack()
return types.HijackedResponse{Conn: rwc, Reader: br}, err
}
errbody, err := ioutil.ReadAll(resp.Body)
if err != nil {
return types.HijackedResponse{}, err
}
return types.HijackedResponse{}, fmt.Errorf("Error response from daemon: %s", bytes.TrimSpace(errbody))
return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err
}
func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) {
@ -189,3 +151,56 @@ func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) {
}
return net.Dial(proto, addr)
}
func (cli *Client) setupHijackConn(req *http.Request, proto string) (net.Conn, error) {
req.Host = cli.addr
req.Header.Set("Connection", "Upgrade")
req.Header.Set("Upgrade", proto)
conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport))
if err != nil {
return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?")
}
// When we set up a TCP connection for hijack, there could be long periods
// of inactivity (a long running command with no output) that in certain
// network setups may cause ECONNTIMEOUT, leaving the client in an unknown
// state. Setting TCP KeepAlive on the socket connection will prohibit
// ECONNTIMEOUT unless the socket connection truly is broken
if tcpConn, ok := conn.(*net.TCPConn); ok {
tcpConn.SetKeepAlive(true)
tcpConn.SetKeepAlivePeriod(30 * time.Second)
}
clientconn := httputil.NewClientConn(conn, nil)
defer clientconn.Close()
// Server hijacks the connection, error 'connection closed' expected
resp, err := clientconn.Do(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusSwitchingProtocols {
resp.Body.Close()
return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode)
}
c, br := clientconn.Hijack()
if br.Buffered() > 0 {
// If there is buffered content, wrap the connection
c = &hijackedConn{c, br}
} else {
br.Reset(nil)
}
return c, nil
}
type hijackedConn struct {
net.Conn
r *bufio.Reader
}
func (c *hijackedConn) Read(b []byte) (int, error) {
return c.r.Read(b)
}

Просмотреть файл

@ -120,6 +120,9 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur
return query, err
}
query.Set("cachefrom", string(cacheFromJSON))
if options.SessionID != "" {
query.Set("session", options.SessionID)
}
return query, nil
}

Просмотреть файл

@ -2,6 +2,7 @@ package client
import (
"io"
"net"
"time"
"github.com/docker/docker/api/types"
@ -35,6 +36,7 @@ type CommonAPIClient interface {
ServerVersion(ctx context.Context) (types.Version, error)
NegotiateAPIVersion(ctx context.Context)
NegotiateAPIVersionPing(types.Ping)
DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error)
}
// ContainerAPIClient defines API client methods for the containers
@ -80,6 +82,7 @@ type DistributionAPIClient interface {
// ImageAPIClient defines API client methods for the images
type ImageAPIClient interface {
ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error)
BuildCachePrune(ctx context.Context) (*types.BuildCachePruneReport, error)
ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error)
ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error)
ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error)

19
client/session.go Normal file
Просмотреть файл

@ -0,0 +1,19 @@
package client
import (
"net"
"net/http"
"golang.org/x/net/context"
)
// DialSession returns a connection that can be used communication with daemon
func (cli *Client) DialSession(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) {
req, err := http.NewRequest("POST", "/session", nil)
if err != nil {
return nil, err
}
req = cli.addHeaders(req, meta)
return cli.setupHijackConn(req, proto)
}

Просмотреть файл

@ -0,0 +1,30 @@
package filesync
import (
"time"
"google.golang.org/grpc"
"github.com/Sirupsen/logrus"
"github.com/tonistiigi/fsutil"
)
func sendDiffCopy(stream grpc.Stream, dir string, excludes []string, progress progressCb) error {
return fsutil.Send(stream.Context(), stream, dir, &fsutil.WalkOpt{
ExcludePatterns: excludes,
}, progress)
}
func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater) error {
st := time.Now()
defer func() {
logrus.Debugf("diffcopy took: %v", time.Since(st))
}()
var cf fsutil.ChangeFunc
if cu != nil {
cu.MarkSupported(true)
cf = cu.HandleChange
}
return fsutil.Receive(ds.Context(), ds, dest, cf)
}

Просмотреть файл

@ -0,0 +1,173 @@
package filesync
import (
"os"
"strings"
"github.com/docker/docker/client/session"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
type fsSyncProvider struct {
root string
excludes []string
p progressCb
doneCh chan error
}
// NewFSSyncProvider creates a new provider for sending files from client
func NewFSSyncProvider(root string, excludes []string) session.Attachable {
p := &fsSyncProvider{
root: root,
excludes: excludes,
}
return p
}
func (sp *fsSyncProvider) Register(server *grpc.Server) {
RegisterFileSyncServer(server, sp)
}
func (sp *fsSyncProvider) DiffCopy(stream FileSync_DiffCopyServer) error {
return sp.handle("diffcopy", stream)
}
func (sp *fsSyncProvider) TarStream(stream FileSync_TarStreamServer) error {
return sp.handle("tarstream", stream)
}
func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) error {
var pr *protocol
for _, p := range supportedProtocols {
if method == p.name && isProtoSupported(p.name) {
pr = &p
break
}
}
if pr == nil {
return errors.New("failed to negotiate protocol")
}
opts, _ := metadata.FromContext(stream.Context()) // if no metadata continue with empty object
var excludes []string
if len(opts["Override-Excludes"]) == 0 || opts["Override-Excludes"][0] != "true" {
excludes = sp.excludes
}
var progress progressCb
if sp.p != nil {
progress = sp.p
sp.p = nil
}
var doneCh chan error
if sp.doneCh != nil {
doneCh = sp.doneCh
sp.doneCh = nil
}
err := pr.sendFn(stream, sp.root, excludes, progress)
if doneCh != nil {
if err != nil {
doneCh <- err
}
close(doneCh)
}
return err
}
func (sp *fsSyncProvider) SetNextProgressCallback(f func(int, bool), doneCh chan error) {
sp.p = f
sp.doneCh = doneCh
}
type progressCb func(int, bool)
type protocol struct {
name string
sendFn func(stream grpc.Stream, srcDir string, excludes []string, progress progressCb) error
recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater) error
}
func isProtoSupported(p string) bool {
// TODO: this should be removed after testing if stability is confirmed
if override := os.Getenv("BUILD_STREAM_PROTOCOL"); override != "" {
return strings.EqualFold(p, override)
}
return true
}
var supportedProtocols = []protocol{
{
name: "diffcopy",
sendFn: sendDiffCopy,
recvFn: recvDiffCopy,
},
{
name: "tarstream",
sendFn: sendTarStream,
recvFn: recvTarStream,
},
}
// FSSendRequestOpt defines options for FSSend request
type FSSendRequestOpt struct {
SrcPaths []string
OverrideExcludes bool
DestDir string
CacheUpdater CacheUpdater
}
// CacheUpdater is an object capable of sending notifications for the cache hash changes
type CacheUpdater interface {
MarkSupported(bool)
HandleChange(fsutil.ChangeKind, string, os.FileInfo, error) error
}
// FSSync initializes a transfer of files
func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error {
var pr *protocol
for _, p := range supportedProtocols {
if isProtoSupported(p.name) && c.Supports(session.MethodURL(_FileSync_serviceDesc.ServiceName, p.name)) {
pr = &p
break
}
}
if pr == nil {
return errors.New("no fssync handlers")
}
opts := make(map[string][]string)
if opt.OverrideExcludes {
opts["Override-Excludes"] = []string{"true"}
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client := NewFileSyncClient(c.Conn())
var stream grpc.ClientStream
ctx = metadata.NewContext(ctx, opts)
switch pr.name {
case "tarstream":
cc, err := client.TarStream(ctx)
if err != nil {
return err
}
stream = cc
case "diffcopy":
cc, err := client.DiffCopy(ctx)
if err != nil {
return err
}
stream = cc
}
return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater)
}

Просмотреть файл

@ -0,0 +1,575 @@
// Code generated by protoc-gen-gogo.
// source: filesync.proto
// DO NOT EDIT!
/*
Package filesync is a generated protocol buffer package.
It is generated from these files:
filesync.proto
It has these top-level messages:
BytesMessage
*/
package filesync
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import bytes "bytes"
import strings "strings"
import reflect "reflect"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// BytesMessage contains a chunk of byte data
type BytesMessage struct {
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
}
func (m *BytesMessage) Reset() { *m = BytesMessage{} }
func (*BytesMessage) ProtoMessage() {}
func (*BytesMessage) Descriptor() ([]byte, []int) { return fileDescriptorFilesync, []int{0} }
func (m *BytesMessage) GetData() []byte {
if m != nil {
return m.Data
}
return nil
}
func init() {
proto.RegisterType((*BytesMessage)(nil), "moby.filesync.v1.BytesMessage")
}
func (this *BytesMessage) Equal(that interface{}) bool {
if that == nil {
if this == nil {
return true
}
return false
}
that1, ok := that.(*BytesMessage)
if !ok {
that2, ok := that.(BytesMessage)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
if this == nil {
return true
}
return false
} else if this == nil {
return false
}
if !bytes.Equal(this.Data, that1.Data) {
return false
}
return true
}
func (this *BytesMessage) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&filesync.BytesMessage{")
s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringFilesync(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for FileSync service
type FileSyncClient interface {
DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSync_DiffCopyClient, error)
TarStream(ctx context.Context, opts ...grpc.CallOption) (FileSync_TarStreamClient, error)
}
type fileSyncClient struct {
cc *grpc.ClientConn
}
func NewFileSyncClient(cc *grpc.ClientConn) FileSyncClient {
return &fileSyncClient{cc}
}
func (c *fileSyncClient) DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSync_DiffCopyClient, error) {
stream, err := grpc.NewClientStream(ctx, &_FileSync_serviceDesc.Streams[0], c.cc, "/moby.filesync.v1.FileSync/DiffCopy", opts...)
if err != nil {
return nil, err
}
x := &fileSyncDiffCopyClient{stream}
return x, nil
}
type FileSync_DiffCopyClient interface {
Send(*BytesMessage) error
Recv() (*BytesMessage, error)
grpc.ClientStream
}
type fileSyncDiffCopyClient struct {
grpc.ClientStream
}
func (x *fileSyncDiffCopyClient) Send(m *BytesMessage) error {
return x.ClientStream.SendMsg(m)
}
func (x *fileSyncDiffCopyClient) Recv() (*BytesMessage, error) {
m := new(BytesMessage)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *fileSyncClient) TarStream(ctx context.Context, opts ...grpc.CallOption) (FileSync_TarStreamClient, error) {
stream, err := grpc.NewClientStream(ctx, &_FileSync_serviceDesc.Streams[1], c.cc, "/moby.filesync.v1.FileSync/TarStream", opts...)
if err != nil {
return nil, err
}
x := &fileSyncTarStreamClient{stream}
return x, nil
}
type FileSync_TarStreamClient interface {
Send(*BytesMessage) error
Recv() (*BytesMessage, error)
grpc.ClientStream
}
type fileSyncTarStreamClient struct {
grpc.ClientStream
}
func (x *fileSyncTarStreamClient) Send(m *BytesMessage) error {
return x.ClientStream.SendMsg(m)
}
func (x *fileSyncTarStreamClient) Recv() (*BytesMessage, error) {
m := new(BytesMessage)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Server API for FileSync service
type FileSyncServer interface {
DiffCopy(FileSync_DiffCopyServer) error
TarStream(FileSync_TarStreamServer) error
}
func RegisterFileSyncServer(s *grpc.Server, srv FileSyncServer) {
s.RegisterService(&_FileSync_serviceDesc, srv)
}
func _FileSync_DiffCopy_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(FileSyncServer).DiffCopy(&fileSyncDiffCopyServer{stream})
}
type FileSync_DiffCopyServer interface {
Send(*BytesMessage) error
Recv() (*BytesMessage, error)
grpc.ServerStream
}
type fileSyncDiffCopyServer struct {
grpc.ServerStream
}
func (x *fileSyncDiffCopyServer) Send(m *BytesMessage) error {
return x.ServerStream.SendMsg(m)
}
func (x *fileSyncDiffCopyServer) Recv() (*BytesMessage, error) {
m := new(BytesMessage)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _FileSync_TarStream_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(FileSyncServer).TarStream(&fileSyncTarStreamServer{stream})
}
type FileSync_TarStreamServer interface {
Send(*BytesMessage) error
Recv() (*BytesMessage, error)
grpc.ServerStream
}
type fileSyncTarStreamServer struct {
grpc.ServerStream
}
func (x *fileSyncTarStreamServer) Send(m *BytesMessage) error {
return x.ServerStream.SendMsg(m)
}
func (x *fileSyncTarStreamServer) Recv() (*BytesMessage, error) {
m := new(BytesMessage)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _FileSync_serviceDesc = grpc.ServiceDesc{
ServiceName: "moby.filesync.v1.FileSync",
HandlerType: (*FileSyncServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "DiffCopy",
Handler: _FileSync_DiffCopy_Handler,
ServerStreams: true,
ClientStreams: true,
},
{
StreamName: "TarStream",
Handler: _FileSync_TarStream_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "filesync.proto",
}
func (m *BytesMessage) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Data) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintFilesync(dAtA, i, uint64(len(m.Data)))
i += copy(dAtA[i:], m.Data)
}
return i, nil
}
func encodeFixed64Filesync(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Filesync(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintFilesync(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *BytesMessage) Size() (n int) {
var l int
_ = l
l = len(m.Data)
if l > 0 {
n += 1 + l + sovFilesync(uint64(l))
}
return n
}
func sovFilesync(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozFilesync(x uint64) (n int) {
return sovFilesync(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *BytesMessage) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&BytesMessage{`,
`Data:` + fmt.Sprintf("%v", this.Data) + `,`,
`}`,
}, "")
return s
}
func valueToStringFilesync(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *BytesMessage) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowFilesync
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowFilesync
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthFilesync
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
if m.Data == nil {
m.Data = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipFilesync(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthFilesync
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipFilesync(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowFilesync
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowFilesync
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowFilesync
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthFilesync
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowFilesync
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipFilesync(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthFilesync = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowFilesync = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("filesync.proto", fileDescriptorFilesync) }
var fileDescriptorFilesync = []byte{
// 198 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcb, 0xcc, 0x49,
0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa,
0xd4, 0x83, 0x0b, 0x96, 0x19, 0x2a, 0x29, 0x71, 0xf1, 0x38, 0x55, 0x96, 0xa4, 0x16, 0xfb, 0xa6,
0x16, 0x17, 0x27, 0xa6, 0xa7, 0x0a, 0x09, 0x71, 0xb1, 0xa4, 0x24, 0x96, 0x24, 0x4a, 0x30, 0x2a,
0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x46, 0xab, 0x19, 0xb9, 0x38, 0xdc, 0x32, 0x73, 0x52, 0x83,
0x2b, 0xf3, 0x92, 0x85, 0xfc, 0xb8, 0x38, 0x5c, 0x32, 0xd3, 0xd2, 0x9c, 0xf3, 0x0b, 0x2a, 0x85,
0xe4, 0xf4, 0xd0, 0xcd, 0xd3, 0x43, 0x36, 0x4c, 0x8a, 0x80, 0xbc, 0x06, 0xa3, 0x01, 0xa3, 0x90,
0x3f, 0x17, 0x67, 0x48, 0x62, 0x51, 0x70, 0x49, 0x51, 0x6a, 0x62, 0x2e, 0x35, 0x0c, 0x74, 0x32,
0xbb, 0xf0, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9,
0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e,
0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x51,
0x1c, 0x30, 0xb3, 0x92, 0xd8, 0xc0, 0x41, 0x64, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x0c,
0x8d, 0xc5, 0x34, 0x01, 0x00, 0x00,
}

Просмотреть файл

@ -0,0 +1,15 @@
syntax = "proto3";
package moby.filesync.v1;
option go_package = "filesync";
service FileSync{
rpc DiffCopy(stream BytesMessage) returns (stream BytesMessage);
rpc TarStream(stream BytesMessage) returns (stream BytesMessage);
}
// BytesMessage contains a chunk of byte data
message BytesMessage{
bytes data = 1;
}

Просмотреть файл

@ -0,0 +1,3 @@
package filesync
//go:generate protoc --gogoslick_out=plugins=grpc:. filesync.proto

Просмотреть файл

@ -0,0 +1,83 @@
package filesync
import (
"io"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/chrootarchive"
"github.com/pkg/errors"
"google.golang.org/grpc"
)
func sendTarStream(stream grpc.Stream, dir string, excludes []string, progress progressCb) error {
a, err := archive.TarWithOptions(dir, &archive.TarOptions{
ExcludePatterns: excludes,
})
if err != nil {
return err
}
size := 0
buf := make([]byte, 1<<15)
t := new(BytesMessage)
for {
n, err := a.Read(buf)
if err != nil {
if err == io.EOF {
break
}
return err
}
t.Data = buf[:n]
if err := stream.SendMsg(t); err != nil {
return err
}
size += n
if progress != nil {
progress(size, false)
}
}
if progress != nil {
progress(size, true)
}
return nil
}
func recvTarStream(ds grpc.Stream, dest string, cs CacheUpdater) error {
pr, pw := io.Pipe()
go func() {
var (
err error
t = new(BytesMessage)
)
for {
if err = ds.RecvMsg(t); err != nil {
if err == io.EOF {
err = nil
}
break
}
_, err = pw.Write(t.Data)
if err != nil {
break
}
}
if err = pw.CloseWithError(err); err != nil {
logrus.Errorf("failed to close tar transfer pipe")
}
}()
decompressedStream, err := archive.DecompressStream(pr)
if err != nil {
return errors.Wrap(err, "failed to decompress stream")
}
if err := chrootarchive.Untar(decompressedStream, dest, nil); err != nil {
return errors.Wrap(err, "failed to untar context")
}
return nil
}

62
client/session/grpc.go Normal file
Просмотреть файл

@ -0,0 +1,62 @@
package session
import (
"net"
"time"
"github.com/Sirupsen/logrus"
"github.com/pkg/errors"
"golang.org/x/net/context"
"golang.org/x/net/http2"
"google.golang.org/grpc"
"google.golang.org/grpc/health/grpc_health_v1"
)
func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) {
go func() {
<-ctx.Done()
conn.Close()
}()
logrus.Debugf("serving grpc connection")
(&http2.Server{}).ServeConn(conn, &http2.ServeConnOpts{Handler: grpcServer})
}
func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc.ClientConn, error) {
dialOpt := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) {
return conn, nil
})
cc, err := grpc.DialContext(ctx, "", dialOpt, grpc.WithInsecure())
if err != nil {
return nil, nil, errors.Wrap(err, "failed to create grpc client")
}
ctx, cancel := context.WithCancel(ctx)
go monitorHealth(ctx, cc, cancel)
return ctx, cc, nil
}
func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func()) {
defer cancelConn()
defer cc.Close()
ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop()
healthClient := grpc_health_v1.NewHealthClient(cc)
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
<-ticker.C
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
_, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{})
cancel()
if err != nil {
return
}
}
}
}

187
client/session/manager.go Normal file
Просмотреть файл

@ -0,0 +1,187 @@
package session
import (
"net/http"
"strings"
"sync"
"github.com/pkg/errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
)
// Caller can invoke requests on the session
type Caller interface {
Context() context.Context
Supports(method string) bool
Conn() *grpc.ClientConn
Name() string
SharedKey() string
}
type client struct {
Session
cc *grpc.ClientConn
supported map[string]struct{}
}
// Manager is a controller for accessing currently active sessions
type Manager struct {
sessions map[string]*client
mu sync.Mutex
updateCondition *sync.Cond
}
// NewManager returns a new Manager
func NewManager() (*Manager, error) {
sm := &Manager{
sessions: make(map[string]*client),
}
sm.updateCondition = sync.NewCond(&sm.mu)
return sm, nil
}
// HandleHTTPRequest handles an incoming HTTP request
func (sm *Manager) HandleHTTPRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
hijacker, ok := w.(http.Hijacker)
if !ok {
return errors.New("handler does not support hijack")
}
uuid := r.Header.Get(headerSessionUUID)
name := r.Header.Get(headerSessionName)
sharedKey := r.Header.Get(headerSessionSharedKey)
proto := r.Header.Get("Upgrade")
sm.mu.Lock()
if _, ok := sm.sessions[uuid]; ok {
sm.mu.Unlock()
return errors.Errorf("session %s already exists", uuid)
}
if proto == "" {
sm.mu.Unlock()
return errors.New("no upgrade proto in request")
}
if proto != "h2c" {
sm.mu.Unlock()
return errors.Errorf("protocol %s not supported", proto)
}
conn, _, err := hijacker.Hijack()
if err != nil {
sm.mu.Unlock()
return errors.Wrap(err, "failed to hijack connection")
}
resp := &http.Response{
StatusCode: http.StatusSwitchingProtocols,
ProtoMajor: 1,
ProtoMinor: 1,
Header: http.Header{},
}
resp.Header.Set("Connection", "Upgrade")
resp.Header.Set("Upgrade", proto)
// set raw mode
conn.Write([]byte{})
resp.Write(conn)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
ctx, cc, err := grpcClientConn(ctx, conn)
if err != nil {
sm.mu.Unlock()
return err
}
c := &client{
Session: Session{
uuid: uuid,
name: name,
sharedKey: sharedKey,
ctx: ctx,
cancelCtx: cancel,
done: make(chan struct{}),
},
cc: cc,
supported: make(map[string]struct{}),
}
for _, m := range r.Header[headerSessionMethod] {
c.supported[strings.ToLower(m)] = struct{}{}
}
sm.sessions[uuid] = c
sm.updateCondition.Broadcast()
sm.mu.Unlock()
defer func() {
sm.mu.Lock()
delete(sm.sessions, uuid)
sm.mu.Unlock()
}()
<-c.ctx.Done()
conn.Close()
close(c.done)
return nil
}
// Get returns a session by UUID
func (sm *Manager) Get(ctx context.Context, uuid string) (Caller, error) {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
go func() {
select {
case <-ctx.Done():
sm.updateCondition.Broadcast()
}
}()
var c *client
sm.mu.Lock()
for {
select {
case <-ctx.Done():
sm.mu.Unlock()
return nil, errors.Wrapf(ctx.Err(), "no active session for %s", uuid)
default:
}
var ok bool
c, ok = sm.sessions[uuid]
if !ok || c.closed() {
sm.updateCondition.Wait()
continue
}
sm.mu.Unlock()
break
}
return c, nil
}
func (c *client) Context() context.Context {
return c.context()
}
func (c *client) Name() string {
return c.name
}
func (c *client) SharedKey() string {
return c.sharedKey
}
func (c *client) Supports(url string) bool {
_, ok := c.supported[strings.ToLower(url)]
return ok
}
func (c *client) Conn() *grpc.ClientConn {
return c.cc
}

117
client/session/session.go Normal file
Просмотреть файл

@ -0,0 +1,117 @@
package session
import (
"net"
"github.com/docker/docker/pkg/stringid"
"github.com/pkg/errors"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/health"
"google.golang.org/grpc/health/grpc_health_v1"
)
const (
headerSessionUUID = "X-Docker-Expose-Session-Uuid"
headerSessionName = "X-Docker-Expose-Session-Name"
headerSessionSharedKey = "X-Docker-Expose-Session-Sharedkey"
headerSessionMethod = "X-Docker-Expose-Session-Grpc-Method"
)
// Dialer returns a connection that can be used by the session
type Dialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error)
// Attachable defines a feature that can be expsed on a session
type Attachable interface {
Register(*grpc.Server)
}
// Session is a long running connection between client and a daemon
type Session struct {
uuid string
name string
sharedKey string
ctx context.Context
cancelCtx func()
done chan struct{}
grpcServer *grpc.Server
}
// NewSession returns a new long running session
func NewSession(name, sharedKey string) (*Session, error) {
uuid := stringid.GenerateRandomID()
s := &Session{
uuid: uuid,
name: name,
sharedKey: sharedKey,
grpcServer: grpc.NewServer(),
}
grpc_health_v1.RegisterHealthServer(s.grpcServer, health.NewServer())
return s, nil
}
// Allow enable a given service to be reachable through the grpc session
func (s *Session) Allow(a Attachable) {
a.Register(s.grpcServer)
}
// UUID returns unique identifier for the session
func (s *Session) UUID() string {
return s.uuid
}
// Run activates the session
func (s *Session) Run(ctx context.Context, dialer Dialer) error {
ctx, cancel := context.WithCancel(ctx)
s.cancelCtx = cancel
s.done = make(chan struct{})
defer cancel()
defer close(s.done)
meta := make(map[string][]string)
meta[headerSessionUUID] = []string{s.uuid}
meta[headerSessionName] = []string{s.name}
meta[headerSessionSharedKey] = []string{s.sharedKey}
for name, svc := range s.grpcServer.GetServiceInfo() {
for _, method := range svc.Methods {
meta[headerSessionMethod] = append(meta[headerSessionMethod], MethodURL(name, method.Name))
}
}
conn, err := dialer(ctx, "h2c", meta)
if err != nil {
return errors.Wrap(err, "failed to dial gRPC")
}
serve(ctx, s.grpcServer, conn)
return nil
}
// Close closes the session
func (s *Session) Close() error {
if s.cancelCtx != nil && s.done != nil {
s.cancelCtx()
<-s.done
}
return nil
}
func (s *Session) context() context.Context {
return s.ctx
}
func (s *Session) closed() bool {
select {
case <-s.context().Done():
return true
default:
return false
}
}
// MethodURL returns a gRPC method URL for service and method name
func MethodURL(s, m string) string {
return "/" + s + "/" + m
}

Просмотреть файл

@ -23,10 +23,14 @@ import (
"github.com/docker/docker/api/server/router/image"
"github.com/docker/docker/api/server/router/network"
pluginrouter "github.com/docker/docker/api/server/router/plugin"
sessionrouter "github.com/docker/docker/api/server/router/session"
swarmrouter "github.com/docker/docker/api/server/router/swarm"
systemrouter "github.com/docker/docker/api/server/router/system"
"github.com/docker/docker/api/server/router/volume"
"github.com/docker/docker/builder/dockerfile"
"github.com/docker/docker/builder/fscache"
"github.com/docker/docker/cli/debug"
"github.com/docker/docker/client/session"
"github.com/docker/docker/daemon"
"github.com/docker/docker/daemon/cluster"
"github.com/docker/docker/daemon/config"
@ -46,6 +50,7 @@ import (
"github.com/docker/docker/runconfig"
"github.com/docker/go-connections/tlsconfig"
swarmapi "github.com/docker/swarmkit/api"
"github.com/pkg/errors"
"github.com/spf13/pflag"
)
@ -121,6 +126,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
}()
}
// TODO: extract to newApiServerConfig()
serverConfig := &apiserver.Config{
Logging: true,
SocketGroup: cli.Config.SocketGroup,
@ -152,8 +158,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
cli.Config.Hosts = make([]string, 1)
}
api := apiserver.New(serverConfig)
cli.api = api
cli.api = apiserver.New(serverConfig)
var hosts []string
@ -189,7 +194,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
}
logrus.Debugf("Listener created for HTTP on %s (%s)", proto, addr)
hosts = append(hosts, protoAddrParts[1])
api.Accept(addr, ls...)
cli.api.Accept(addr, ls...)
}
registryService := registry.NewService(cli.Config.ServiceOptions)
@ -207,7 +212,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
pluginStore := plugin.NewStore()
if err := cli.initMiddlewares(api, serverConfig, pluginStore); err != nil {
if err := cli.initMiddlewares(cli.api, serverConfig, pluginStore); err != nil {
logrus.Fatalf("Error creating middlewares: %v", err)
}
@ -227,6 +232,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
return fmt.Errorf("Error validating authorization plugin: %v", err)
}
// TODO: move into startMetricsServer()
if cli.Config.MetricsAddress != "" {
if !d.HasExperimental() {
return fmt.Errorf("metrics-addr is only supported when experimental is enabled")
@ -236,6 +242,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
}
}
// TODO: createAndStartCluster()
name, _ := os.Hostname()
// Use a buffered channel to pass changes from store watch API to daemon
@ -269,7 +276,14 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
cli.d = d
initRouter(api, d, c)
routerOptions, err := newRouterOptions(cli.Config, d)
if err != nil {
return err
}
routerOptions.api = cli.api
routerOptions.cluster = c
initRouter(routerOptions)
// process cluster change notifications
watchCtx, cancel := context.WithCancel(context.Background())
@ -282,7 +296,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
// We need to start it as a goroutine and wait on it so
// daemon doesn't exit
serveAPIWait := make(chan error)
go api.Wait(serveAPIWait)
go cli.api.Wait(serveAPIWait)
// after the daemon is done setting up we can notify systemd api
notifySystem()
@ -300,6 +314,54 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) {
return nil
}
type routerOptions struct {
sessionManager *session.Manager
buildBackend *buildbackend.Backend
buildCache *fscache.FSCache
daemon *daemon.Daemon
api *apiserver.Server
cluster *cluster.Cluster
}
func newRouterOptions(config *config.Config, daemon *daemon.Daemon) (routerOptions, error) {
opts := routerOptions{}
sm, err := session.NewManager()
if err != nil {
return opts, errors.Wrap(err, "failed to create sessionmanager")
}
builderStateDir := filepath.Join(config.Root, "builder")
buildCache, err := fscache.NewFSCache(fscache.Opt{
Backend: fscache.NewNaiveCacheBackend(builderStateDir),
Root: builderStateDir,
GCPolicy: fscache.GCPolicy{ // TODO: expose this in config
MaxSize: 1024 * 1024 * 512, // 512MB
MaxKeepDuration: 7 * 24 * time.Hour, // 1 week
},
})
if err != nil {
return opts, errors.Wrap(err, "failed to create fscache")
}
manager, err := dockerfile.NewBuildManager(daemon, sm, buildCache, daemon.IDMappings())
if err != nil {
return opts, err
}
bb, err := buildbackend.NewBackend(daemon, manager, buildCache)
if err != nil {
return opts, errors.Wrap(err, "failed to create buildmanager")
}
return routerOptions{
sessionManager: sm,
buildBackend: bb,
buildCache: buildCache,
daemon: daemon,
}, nil
}
func (cli *DaemonCli) reloadConfig() {
reload := func(config *config.Config) {
@ -442,27 +504,28 @@ func loadDaemonCliConfig(opts *daemonOptions) (*config.Config, error) {
return conf, nil
}
func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) {
func initRouter(opts routerOptions) {
decoder := runconfig.ContainerDecoder{}
routers := []router.Router{
// we need to add the checkpoint router before the container router or the DELETE gets masked
checkpointrouter.NewRouter(d, decoder),
container.NewRouter(d, decoder),
image.NewRouter(d, decoder),
systemrouter.NewRouter(d, c),
volume.NewRouter(d),
build.NewRouter(buildbackend.NewBackend(d, d, d.IDMappings()), d),
swarmrouter.NewRouter(c),
pluginrouter.NewRouter(d.PluginManager()),
distributionrouter.NewRouter(d),
checkpointrouter.NewRouter(opts.daemon, decoder),
container.NewRouter(opts.daemon, decoder),
image.NewRouter(opts.daemon, decoder),
systemrouter.NewRouter(opts.daemon, opts.cluster, opts.buildCache),
volume.NewRouter(opts.daemon),
build.NewRouter(opts.buildBackend, opts.daemon),
sessionrouter.NewRouter(opts.sessionManager),
swarmrouter.NewRouter(opts.cluster),
pluginrouter.NewRouter(opts.daemon.PluginManager()),
distributionrouter.NewRouter(opts.daemon),
}
if d.NetworkControllerEnabled() {
routers = append(routers, network.NewRouter(d, c))
if opts.daemon.NetworkControllerEnabled() {
routers = append(routers, network.NewRouter(opts.daemon, opts.cluster))
}
if d.HasExperimental() {
if opts.daemon.HasExperimental() {
for _, r := range routers {
for _, route := range r.Routes() {
if experimental, ok := route.(router.ExperimentalRoute); ok {
@ -472,9 +535,10 @@ func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) {
}
}
s.InitRouter(debug.IsEnabled(), routers...)
opts.api.InitRouter(debug.IsEnabled(), routers...)
}
// TODO: remove this from cli and return the authzMiddleware
func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config, pluginStore *plugin.Store) error {
v := cfg.Version

Просмотреть файл

@ -5,7 +5,8 @@ source "${SCRIPTDIR}/.validate"
IFS=$'\n'
files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' |
grep -v '^vendor/' || true) )
grep -v '^vendor/' |
grep -v '\.pb\.go$' || true) )
unset IFS
badFiles=()

Просмотреть файл

@ -4,7 +4,7 @@ export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "${SCRIPTDIR}/.validate"
IFS=$'\n'
files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' | grep -v '^api/types/plugins/logdriver/entry.pb.go' || true) )
files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' | grep -v '^api/types/container/' | grep -v '\.pb\.go$' || true) )
unset IFS
errors=()

Просмотреть файл

@ -12,6 +12,8 @@ import (
"strings"
"github.com/docker/docker/api/types"
"github.com/docker/docker/client/session"
"github.com/docker/docker/client/session/filesync"
"github.com/docker/docker/integration-cli/checker"
"github.com/docker/docker/integration-cli/cli/build/fakecontext"
"github.com/docker/docker/integration-cli/cli/build/fakegit"
@ -22,6 +24,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
)
func (s *DockerSuite) TestBuildAPIDockerFileRemote(c *check.C) {
@ -363,6 +366,108 @@ func (s *DockerRegistrySuite) TestBuildCopyFromForcePull(c *check.C) {
assert.Contains(c, string(out), "Successfully built")
}
func (s *DockerSuite) TestBuildWithSession(c *check.C) {
testRequires(c, ExperimentalDaemon)
dockerfile := `
FROM busybox
COPY file /
RUN cat /file
`
fctx := fakecontext.New(c, "",
fakecontext.WithFile("file", "some content"),
)
defer fctx.Close()
out := testBuildWithSession(c, fctx.Dir, dockerfile)
assert.Contains(c, out, "some content")
fctx.Add("second", "contentcontent")
dockerfile += `
COPY second /
RUN cat /second
`
out = testBuildWithSession(c, fctx.Dir, dockerfile)
assert.Equal(c, strings.Count(out, "Using cache"), 2)
assert.Contains(c, out, "contentcontent")
client, err := request.NewClient()
require.NoError(c, err)
du, err := client.DiskUsage(context.TODO())
assert.Nil(c, err)
assert.True(c, du.BuilderSize > 10)
out = testBuildWithSession(c, fctx.Dir, dockerfile)
assert.Equal(c, strings.Count(out, "Using cache"), 4)
du2, err := client.DiskUsage(context.TODO())
assert.Nil(c, err)
assert.Equal(c, du.BuilderSize, du2.BuilderSize)
// rebuild with regular tar, confirm cache still applies
fctx.Add("Dockerfile", dockerfile)
res, body, err := request.Post(
"/build",
request.RawContent(fctx.AsTarReader(c)),
request.ContentType("application/x-tar"))
require.NoError(c, err)
assert.Equal(c, http.StatusOK, res.StatusCode)
outBytes, err := testutil.ReadBody(body)
require.NoError(c, err)
assert.Contains(c, string(outBytes), "Successfully built")
assert.Equal(c, strings.Count(string(outBytes), "Using cache"), 4)
_, err = client.BuildCachePrune(context.TODO())
assert.Nil(c, err)
du, err = client.DiskUsage(context.TODO())
assert.Nil(c, err)
assert.Equal(c, du.BuilderSize, int64(0))
}
func testBuildWithSession(c *check.C, dir, dockerfile string) (outStr string) {
client, err := request.NewClient()
require.NoError(c, err)
sess, err := session.NewSession("foo1", "foo")
assert.Nil(c, err)
fsProvider := filesync.NewFSSyncProvider(dir, nil)
sess.Allow(fsProvider)
g, ctx := errgroup.WithContext(context.Background())
g.Go(func() error {
return sess.Run(ctx, client.DialSession)
})
g.Go(func() error {
res, body, err := request.Post("/build?remote=client-session&session="+sess.UUID(), func(req *http.Request) error {
req.Body = ioutil.NopCloser(strings.NewReader(dockerfile))
return nil
})
if err != nil {
return err
}
assert.Equal(c, res.StatusCode, http.StatusOK)
out, err := testutil.ReadBody(body)
require.NoError(c, err)
assert.Contains(c, string(out), "Successfully built")
sess.Close()
outStr = string(out)
return nil
})
err = g.Wait()
assert.Nil(c, err)
return
}
type buildLine struct {
Stream string
Aux struct {

Просмотреть файл

@ -0,0 +1,49 @@
package main
import (
"net/http"
"github.com/docker/docker/integration-cli/checker"
"github.com/docker/docker/integration-cli/request"
"github.com/docker/docker/pkg/testutil"
"github.com/go-check/check"
)
func (s *DockerSuite) TestSessionCreate(c *check.C) {
testRequires(c, ExperimentalDaemon)
res, body, err := request.Post("/session", func(r *http.Request) error {
r.Header.Set("X-Docker-Expose-Session-Uuid", "testsessioncreate") // so we don't block default name if something else is using it
r.Header.Set("Upgrade", "h2c")
return nil
})
c.Assert(err, checker.IsNil)
c.Assert(res.StatusCode, checker.Equals, http.StatusSwitchingProtocols)
c.Assert(res.Header.Get("Upgrade"), checker.Equals, "h2c")
c.Assert(body.Close(), checker.IsNil)
}
func (s *DockerSuite) TestSessionCreateWithBadUpgrade(c *check.C) {
testRequires(c, ExperimentalDaemon)
res, body, err := request.Post("/session")
c.Assert(err, checker.IsNil)
c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest)
buf, err := testutil.ReadBody(body)
c.Assert(err, checker.IsNil)
out := string(buf)
c.Assert(out, checker.Contains, "no upgrade")
res, body, err = request.Post("/session", func(r *http.Request) error {
r.Header.Set("Upgrade", "foo")
return nil
})
c.Assert(err, checker.IsNil)
c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest)
buf, err = testutil.ReadBody(body)
c.Assert(err, checker.IsNil)
out = string(buf)
c.Assert(out, checker.Contains, "not supported")
}

Просмотреть файл

@ -1789,7 +1789,7 @@ func (s *DockerDaemonSuite) TestDaemonNoSpaceLeftOnDeviceError(c *check.C) {
// create a 2MiB image and mount it as graph root
// Why in a container? Because `mount` sometimes behaves weirdly and often fails outright on this test in debian:jessie (which is what the test suite runs under if run from the Makefile)
dockerCmd(c, "run", "--rm", "-v", testDir+":/test", "busybox", "sh", "-c", "dd of=/test/testfs.img bs=1M seek=2 count=0")
dockerCmd(c, "run", "--rm", "-v", testDir+":/test", "busybox", "sh", "-c", "dd of=/test/testfs.img bs=1M seek=3 count=0")
icmd.RunCommand("mkfs.ext4", "-F", filepath.Join(testDir, "testfs.img")).Assert(c, icmd.Success)
result := icmd.RunCommand("losetup", "-f", "--show", filepath.Join(testDir, "testfs.img"))

Просмотреть файл

@ -17,6 +17,7 @@ import (
"strings"
"time"
"github.com/docker/docker/api"
dclient "github.com/docker/docker/client"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/ioutils"
@ -170,7 +171,7 @@ func NewClient() (dclient.APIClient, error) {
if err != nil {
return nil, err
}
return dclient.NewClient(host, "", httpClient, nil)
return dclient.NewClient(host, api.DefaultVersion, httpClient, nil)
}
// FIXME(vdemeester) httputil.ClientConn is deprecated, use http.Client instead (closer to actual client)

Просмотреть файл

@ -305,15 +305,7 @@ func (compression *Compression) Extension() string {
// FileInfoHeader creates a populated Header from fi.
// Compared to archive pkg this function fills in more information.
func FileInfoHeader(path, name string, fi os.FileInfo) (*tar.Header, error) {
var link string
if fi.Mode()&os.ModeSymlink != 0 {
var err error
link, err = os.Readlink(path)
if err != nil {
return nil, err
}
}
func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
hdr, err := tar.FileInfoHeader(fi, link)
if err != nil {
return nil, err
@ -327,12 +319,18 @@ func FileInfoHeader(path, name string, fi os.FileInfo) (*tar.Header, error) {
if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
return nil, err
}
return hdr, nil
}
// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem
// to a tar header
func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error {
capability, _ := system.Lgetxattr(path, "security.capability")
if capability != nil {
hdr.Xattrs = make(map[string]string)
hdr.Xattrs["security.capability"] = string(capability)
}
return hdr, nil
return nil
}
type tarWhiteoutConverter interface {
@ -386,10 +384,22 @@ func (ta *tarAppender) addTarFile(path, name string) error {
return err
}
hdr, err := FileInfoHeader(path, name, fi)
var link string
if fi.Mode()&os.ModeSymlink != 0 {
var err error
link, err = os.Readlink(path)
if err != nil {
return err
}
}
hdr, err := FileInfoHeader(name, fi, link)
if err != nil {
return err
}
if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil {
return err
}
// if it's not a directory and has more than 1 link,
// it's hard linked, so set the type flag accordingly

Просмотреть файл

@ -45,16 +45,13 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
err = errors.New("cannot convert stat value to syscall.Stat_t")
return
}
// Currently go does not fill in the major/minors
if s.Mode&syscall.S_IFBLK != 0 ||
s.Mode&syscall.S_IFCHR != 0 {
hdr.Devmajor = int64(major(uint64(s.Rdev)))
hdr.Devminor = int64(minor(uint64(s.Rdev)))
if ok {
// Currently go does not fill in the major/minors
if s.Mode&syscall.S_IFBLK != 0 ||
s.Mode&syscall.S_IFCHR != 0 {
hdr.Devmajor = int64(major(uint64(s.Rdev)))
hdr.Devminor = int64(minor(uint64(s.Rdev)))
}
}
return
@ -63,13 +60,10 @@ func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
err = errors.New("cannot convert stat value to syscall.Stat_t")
return
if ok {
inode = uint64(s.Ino)
}
inode = uint64(s.Ino)
return
}

Просмотреть файл

@ -102,6 +102,8 @@ google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
# containerd
github.com/containerd/containerd 3addd840653146c90a254301d6c3a663c7fd6429
github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
github.com/stevvooe/continuity cd7a8e21e2b6f84799f5dd4b65faf49c8d3ee02d
github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb
# cluster
github.com/docker/swarmkit a4bf0135f63fb60f0e76ae81579cde87f580db6e

202
vendor/github.com/stevvooe/continuity/LICENSE сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

19
vendor/github.com/stevvooe/continuity/README.md сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,19 @@
# continuity
[![GoDoc](https://godoc.org/github.com/containerd/continuity?status.svg)](https://godoc.org/github.com/containerd/continuity)
[![Build Status](https://travis-ci.org/containerd/continuity.svg?branch=master)](https://travis-ci.org/containerd/continuity)
A transport-agnostic, filesystem metadata manifest system
This project is a staging area for experiments in providing transport agnostic
metadata storage.
Please see https://github.com/opencontainers/specs/issues/11 for more details.
## Building Proto Package
If you change the proto file you will need to rebuild the generated Go with `go generate`.
```
go generate ./proto
```

10
vendor/github.com/stevvooe/continuity/sysx/asm.s сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,10 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !gccgo
#include "textflag.h"
TEXT ·use(SB),NOSPLIT,$0
RET

18
vendor/github.com/stevvooe/continuity/sysx/chmod_darwin.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,18 @@
package sysx
const (
// AtSymlinkNoFollow defined from AT_SYMLINK_NOFOLLOW in <sys/fcntl.h>
AtSymlinkNofollow = 0x20
)
const (
// SYS_FCHMODAT defined from golang.org/sys/unix
SYS_FCHMODAT = 467
)
// These functions will be generated by generate.sh
// $ GOOS=darwin GOARCH=386 ./generate.sh chmod
// $ GOOS=darwin GOARCH=amd64 ./generate.sh chmod
//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)

25
vendor/github.com/stevvooe/continuity/sysx/chmod_darwin_386.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,25 @@
// mksyscall.pl -l32 chmod_darwin.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx
import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := syscall.Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}

25
vendor/github.com/stevvooe/continuity/sysx/chmod_darwin_amd64.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,25 @@
// mksyscall.pl chmod_darwin.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx
import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := syscall.Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}

17
vendor/github.com/stevvooe/continuity/sysx/chmod_freebsd.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,17 @@
package sysx
const (
// AtSymlinkNoFollow defined from AT_SYMLINK_NOFOLLOW in <sys/fcntl.h>
AtSymlinkNofollow = 0x200
)
const (
// SYS_FCHMODAT defined from golang.org/sys/unix
SYS_FCHMODAT = 490
)
// These functions will be generated by generate.sh
// $ GOOS=freebsd GOARCH=amd64 ./generate.sh chmod
//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)

25
vendor/github.com/stevvooe/continuity/sysx/chmod_freebsd_amd64.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,25 @@
// mksyscall.pl chmod_freebsd.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx
import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
_, _, e1 := syscall.Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0)
use(unsafe.Pointer(_p0))
if e1 != 0 {
err = errnoErr(e1)
}
return
}

12
vendor/github.com/stevvooe/continuity/sysx/chmod_linux.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,12 @@
package sysx
import "syscall"
const (
// AtSymlinkNoFollow defined from AT_SYMLINK_NOFOLLOW in /usr/include/linux/fcntl.h
AtSymlinkNofollow = 0x100
)
func Fchmodat(dirfd int, path string, mode uint32, flags int) error {
return syscall.Fchmodat(dirfd, path, mode, flags)
}

9
vendor/github.com/stevvooe/continuity/sysx/copy_linux.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,9 @@
package sysx
// These functions will be generated by generate.sh
// $ GOOS=linux GOARCH=386 ./generate.sh copy
// $ GOOS=linux GOARCH=amd64 ./generate.sh copy
// $ GOOS=linux GOARCH=arm ./generate.sh copy
// $ GOOS=linux GOARCH=arm64 ./generate.sh copy
//sys CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error)

20
vendor/github.com/stevvooe/continuity/sysx/copy_linux_386.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,20 @@
// mksyscall.pl -l32 copy_linux.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx
import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error) {
r0, _, e1 := syscall.Syscall6(SYS_COPY_FILE_RANGE, uintptr(fdin), uintptr(unsafe.Pointer(offin)), uintptr(fdout), uintptr(unsafe.Pointer(offout)), uintptr(len), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

20
vendor/github.com/stevvooe/continuity/sysx/copy_linux_amd64.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,20 @@
// mksyscall.pl copy_linux.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx
import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error) {
r0, _, e1 := syscall.Syscall6(SYS_COPY_FILE_RANGE, uintptr(fdin), uintptr(unsafe.Pointer(offin)), uintptr(fdout), uintptr(unsafe.Pointer(offout)), uintptr(len), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

20
vendor/github.com/stevvooe/continuity/sysx/copy_linux_arm.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,20 @@
// mksyscall.pl -l32 copy_linux.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx
import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error) {
r0, _, e1 := syscall.Syscall6(SYS_COPY_FILE_RANGE, uintptr(fdin), uintptr(unsafe.Pointer(offin)), uintptr(fdout), uintptr(unsafe.Pointer(offout)), uintptr(len), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

20
vendor/github.com/stevvooe/continuity/sysx/copy_linux_arm64.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,20 @@
// mksyscall.pl copy_linux.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx
import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error) {
r0, _, e1 := syscall.Syscall6(SYS_COPY_FILE_RANGE, uintptr(fdin), uintptr(unsafe.Pointer(offin)), uintptr(fdout), uintptr(unsafe.Pointer(offout)), uintptr(len), uintptr(flags))
n = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

7
vendor/github.com/stevvooe/continuity/sysx/nodata_linux.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,7 @@
package sysx
import (
"syscall"
)
const ENODATA = syscall.ENODATA

9
vendor/github.com/stevvooe/continuity/sysx/nodata_unix.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,9 @@
// +build darwin freebsd
package sysx
import (
"syscall"
)
const ENODATA = syscall.ENOATTR

37
vendor/github.com/stevvooe/continuity/sysx/sys.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,37 @@
package sysx
import (
"syscall"
"unsafe"
)
var _zero uintptr
// use is a no-op, but the compiler cannot see that it is.
// Calling use(p) ensures that p is kept live until that point.
//go:noescape
func use(p unsafe.Pointer)
// Do the interface allocations only once for common
// Errno values.
var (
errEAGAIN error = syscall.EAGAIN
errEINVAL error = syscall.EINVAL
errENOENT error = syscall.ENOENT
)
// errnoErr returns common boxed Errno values, to prevent
// allocations at runtime.
func errnoErr(e syscall.Errno) error {
switch e {
case 0:
return nil
case syscall.EAGAIN:
return errEAGAIN
case syscall.EINVAL:
return errEINVAL
case syscall.ENOENT:
return errENOENT
}
return e
}

7
vendor/github.com/stevvooe/continuity/sysx/sysnum_linux_386.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,7 @@
package sysx
const (
// SYS_COPYFILERANGE defined in Kernel 4.5+
// Number defined in /usr/include/asm/unistd_32.h
SYS_COPY_FILE_RANGE = 377
)

7
vendor/github.com/stevvooe/continuity/sysx/sysnum_linux_amd64.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,7 @@
package sysx
const (
// SYS_COPYFILERANGE defined in Kernel 4.5+
// Number defined in /usr/include/asm/unistd_64.h
SYS_COPY_FILE_RANGE = 326
)

7
vendor/github.com/stevvooe/continuity/sysx/sysnum_linux_arm.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,7 @@
package sysx
const (
// SYS_COPY_FILE_RANGE defined in Kernel 4.5+
// Number defined in /usr/include/arm-linux-gnueabihf/asm/unistd.h
SYS_COPY_FILE_RANGE = 391
)

7
vendor/github.com/stevvooe/continuity/sysx/sysnum_linux_arm64.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,7 @@
package sysx
const (
// SYS_COPY_FILE_RANGE defined in Kernel 4.5+
// Number defined in /usr/include/asm-generic/unistd.h
SYS_COPY_FILE_RANGE = 285
)

67
vendor/github.com/stevvooe/continuity/sysx/xattr.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,67 @@
package sysx
import (
"bytes"
"fmt"
"syscall"
)
const defaultXattrBufferSize = 5
var ErrNotSupported = fmt.Errorf("not supported")
type listxattrFunc func(path string, dest []byte) (int, error)
func listxattrAll(path string, listFunc listxattrFunc) ([]string, error) {
var p []byte // nil on first execution
for {
n, err := listFunc(path, p) // first call gets buffer size.
if err != nil {
return nil, err
}
if n > len(p) {
p = make([]byte, n)
continue
}
p = p[:n]
ps := bytes.Split(bytes.TrimSuffix(p, []byte{0}), []byte{0})
var entries []string
for _, p := range ps {
s := string(p)
if s != "" {
entries = append(entries, s)
}
}
return entries, nil
}
}
type getxattrFunc func(string, string, []byte) (int, error)
func getxattrAll(path, attr string, getFunc getxattrFunc) ([]byte, error) {
p := make([]byte, defaultXattrBufferSize)
for {
n, err := getFunc(path, attr, p)
if err != nil {
if errno, ok := err.(syscall.Errno); ok && errno == syscall.ERANGE {
p = make([]byte, len(p)*2) // this can't be ideal.
continue // try again!
}
return nil, err
}
// realloc to correct size and repeat
if n > len(p) {
p = make([]byte, n)
continue
}
return p[:n], nil
}
}

71
vendor/github.com/stevvooe/continuity/sysx/xattr_darwin.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,71 @@
package sysx
// These functions will be generated by generate.sh
// $ GOOS=darwin GOARCH=386 ./generate.sh xattr
// $ GOOS=darwin GOARCH=amd64 ./generate.sh xattr
//sys getxattr(path string, attr string, dest []byte, pos int, options int) (sz int, err error)
//sys setxattr(path string, attr string, data []byte, flags int) (err error)
//sys removexattr(path string, attr string, options int) (err error)
//sys listxattr(path string, dest []byte, options int) (sz int, err error)
//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
const (
xattrNoFollow = 0x01
)
func listxattrFollow(path string, dest []byte) (sz int, err error) {
return listxattr(path, dest, 0)
}
// Listxattr calls syscall getxattr
func Listxattr(path string) ([]string, error) {
return listxattrAll(path, listxattrFollow)
}
// Removexattr calls syscall getxattr
func Removexattr(path string, attr string) (err error) {
return removexattr(path, attr, 0)
}
// Setxattr calls syscall setxattr
func Setxattr(path string, attr string, data []byte, flags int) (err error) {
return setxattr(path, attr, data, flags)
}
func getxattrFollow(path, attr string, dest []byte) (sz int, err error) {
return getxattr(path, attr, dest, 0, 0)
}
// Getxattr calls syscall getxattr
func Getxattr(path, attr string) ([]byte, error) {
return getxattrAll(path, attr, getxattrFollow)
}
func listxattrNoFollow(path string, dest []byte) (sz int, err error) {
return listxattr(path, dest, xattrNoFollow)
}
// LListxattr calls syscall listxattr with XATTR_NOFOLLOW
func LListxattr(path string) ([]string, error) {
return listxattrAll(path, listxattrNoFollow)
}
// LRemovexattr calls syscall removexattr with XATTR_NOFOLLOW
func LRemovexattr(path string, attr string) (err error) {
return removexattr(path, attr, xattrNoFollow)
}
// Setxattr calls syscall setxattr with XATTR_NOFOLLOW
func LSetxattr(path string, attr string, data []byte, flags int) (err error) {
return setxattr(path, attr, data, flags|xattrNoFollow)
}
func getxattrNoFollow(path, attr string, dest []byte) (sz int, err error) {
return getxattr(path, attr, dest, 0, xattrNoFollow)
}
// LGetxattr calls syscall getxattr with XATTR_NOFOLLOW
func LGetxattr(path, attr string) ([]byte, error) {
return getxattrAll(path, attr, getxattrNoFollow)
}

111
vendor/github.com/stevvooe/continuity/sysx/xattr_darwin_386.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,111 @@
// mksyscall.pl -l32 xattr_darwin.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx
import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getxattr(path string, attr string, dest []byte, pos int, options int) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(dest) > 0 {
_p2 = unsafe.Pointer(&dest[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), uintptr(pos), uintptr(options))
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setxattr(path string, attr string, data []byte, flags int) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(data) > 0 {
_p2 = unsafe.Pointer(&data[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func removexattr(path string, attr string, options int) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
_, _, e1 := syscall.Syscall(syscall.SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options))
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func listxattr(path string, dest []byte, options int) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(dest) > 0 {
_p1 = unsafe.Pointer(&dest[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall6(syscall.SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(options), 0, 0)
use(unsafe.Pointer(_p0))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

111
vendor/github.com/stevvooe/continuity/sysx/xattr_darwin_amd64.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,111 @@
// mksyscall.pl xattr_darwin.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx
import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getxattr(path string, attr string, dest []byte, pos int, options int) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(dest) > 0 {
_p2 = unsafe.Pointer(&dest[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall6(syscall.SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), uintptr(pos), uintptr(options))
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setxattr(path string, attr string, data []byte, flags int) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(data) > 0 {
_p2 = unsafe.Pointer(&data[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall.Syscall6(syscall.SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func removexattr(path string, attr string, options int) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
_, _, e1 := syscall.Syscall(syscall.SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options))
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func listxattr(path string, dest []byte, options int) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(dest) > 0 {
_p1 = unsafe.Pointer(&dest[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall6(syscall.SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)), uintptr(options), 0, 0)
use(unsafe.Pointer(_p0))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

53
vendor/github.com/stevvooe/continuity/sysx/xattr_freebsd.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,53 @@
package sysx
import (
"errors"
)
// Initial stub version for FreeBSD. FreeBSD has a different
// syscall API from Darwin and Linux for extended attributes;
// it is also not widely used. It is not exposed at all by the
// Go syscall package, so we need to implement directly eventually.
var unsupported error = errors.New("extended attributes unsupported on FreeBSD")
// Listxattr calls syscall listxattr and reads all content
// and returns a string array
func Listxattr(path string) ([]string, error) {
return []string{}, nil
}
// Removexattr calls syscall removexattr
func Removexattr(path string, attr string) (err error) {
return unsupported
}
// Setxattr calls syscall setxattr
func Setxattr(path string, attr string, data []byte, flags int) (err error) {
return unsupported
}
// Getxattr calls syscall getxattr
func Getxattr(path, attr string) ([]byte, error) {
return []byte{}, nil
}
// LListxattr lists xattrs, not following symlinks
func LListxattr(path string) ([]string, error) {
return []string{}, nil
}
// LRemovexattr removes an xattr, not following symlinks
func LRemovexattr(path string, attr string) (err error) {
return unsupported
}
// LSetxattr sets an xattr, not following symlinks
func LSetxattr(path string, attr string, data []byte, flags int) (err error) {
return unsupported
}
// LGetxattr gets an xattr, not following symlinks
func LGetxattr(path, attr string) ([]byte, error) {
return []byte{}, nil
}

61
vendor/github.com/stevvooe/continuity/sysx/xattr_linux.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,61 @@
package sysx
import "syscall"
// These functions will be generated by generate.sh
// $ GOOS=linux GOARCH=386 ./generate.sh xattr
// $ GOOS=linux GOARCH=amd64 ./generate.sh xattr
// $ GOOS=linux GOARCH=arm ./generate.sh xattr
// $ GOOS=linux GOARCH=arm64 ./generate.sh xattr
// $ GOOS=linux GOARCH=ppc64 ./generate.sh xattr
// $ GOOS=linux GOARCH=ppc64le ./generate.sh xattr
// $ GOOS=linux GOARCH=s390x ./generate.sh xattr
// Listxattr calls syscall listxattr and reads all content
// and returns a string array
func Listxattr(path string) ([]string, error) {
return listxattrAll(path, syscall.Listxattr)
}
// Removexattr calls syscall removexattr
func Removexattr(path string, attr string) (err error) {
return syscall.Removexattr(path, attr)
}
// Setxattr calls syscall setxattr
func Setxattr(path string, attr string, data []byte, flags int) (err error) {
return syscall.Setxattr(path, attr, data, flags)
}
// Getxattr calls syscall getxattr
func Getxattr(path, attr string) ([]byte, error) {
return getxattrAll(path, attr, syscall.Getxattr)
}
//sys llistxattr(path string, dest []byte) (sz int, err error)
// LListxattr lists xattrs, not following symlinks
func LListxattr(path string) ([]string, error) {
return listxattrAll(path, llistxattr)
}
//sys lremovexattr(path string, attr string) (err error)
// LRemovexattr removes an xattr, not following symlinks
func LRemovexattr(path string, attr string) (err error) {
return lremovexattr(path, attr)
}
//sys lsetxattr(path string, attr string, data []byte, flags int) (err error)
// LSetxattr sets an xattr, not following symlinks
func LSetxattr(path string, attr string, data []byte, flags int) (err error) {
return lsetxattr(path, attr, data, flags)
}
//sys lgetxattr(path string, attr string, dest []byte) (sz int, err error)
// LGetxattr gets an xattr, not following symlinks
func LGetxattr(path, attr string) ([]byte, error) {
return getxattrAll(path, attr, lgetxattr)
}

111
vendor/github.com/stevvooe/continuity/sysx/xattr_linux_386.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,111 @@
// mksyscall.pl -l32 xattr_linux.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx
import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func llistxattr(path string, dest []byte) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(dest) > 0 {
_p1 = unsafe.Pointer(&dest[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
use(unsafe.Pointer(_p0))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lremovexattr(path string, attr string) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(data) > 0 {
_p2 = unsafe.Pointer(&data[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(dest) > 0 {
_p2 = unsafe.Pointer(&dest[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

111
vendor/github.com/stevvooe/continuity/sysx/xattr_linux_amd64.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,111 @@
// mksyscall.pl xattr_linux.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx
import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func llistxattr(path string, dest []byte) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(dest) > 0 {
_p1 = unsafe.Pointer(&dest[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
use(unsafe.Pointer(_p0))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lremovexattr(path string, attr string) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(data) > 0 {
_p2 = unsafe.Pointer(&data[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(dest) > 0 {
_p2 = unsafe.Pointer(&dest[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

111
vendor/github.com/stevvooe/continuity/sysx/xattr_linux_arm.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,111 @@
// mksyscall.pl -l32 xattr_linux.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx
import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func llistxattr(path string, dest []byte) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(dest) > 0 {
_p1 = unsafe.Pointer(&dest[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
use(unsafe.Pointer(_p0))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lremovexattr(path string, attr string) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(data) > 0 {
_p2 = unsafe.Pointer(&data[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(dest) > 0 {
_p2 = unsafe.Pointer(&dest[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

111
vendor/github.com/stevvooe/continuity/sysx/xattr_linux_arm64.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,111 @@
// mksyscall.pl xattr_linux.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx
import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func llistxattr(path string, dest []byte) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(dest) > 0 {
_p1 = unsafe.Pointer(&dest[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
use(unsafe.Pointer(_p0))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lremovexattr(path string, attr string) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(data) > 0 {
_p2 = unsafe.Pointer(&data[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(dest) > 0 {
_p2 = unsafe.Pointer(&dest[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

111
vendor/github.com/stevvooe/continuity/sysx/xattr_linux_ppc64.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,111 @@
// mksyscall.pl xattr_linux.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx
import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func llistxattr(path string, dest []byte) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(dest) > 0 {
_p1 = unsafe.Pointer(&dest[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
use(unsafe.Pointer(_p0))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lremovexattr(path string, attr string) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(data) > 0 {
_p2 = unsafe.Pointer(&data[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(dest) > 0 {
_p2 = unsafe.Pointer(&dest[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

111
vendor/github.com/stevvooe/continuity/sysx/xattr_linux_ppc64le.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,111 @@
// mksyscall.pl xattr_linux.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx
import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func llistxattr(path string, dest []byte) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(dest) > 0 {
_p1 = unsafe.Pointer(&dest[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
use(unsafe.Pointer(_p0))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lremovexattr(path string, attr string) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(data) > 0 {
_p2 = unsafe.Pointer(&data[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(dest) > 0 {
_p2 = unsafe.Pointer(&dest[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

111
vendor/github.com/stevvooe/continuity/sysx/xattr_linux_s390x.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,111 @@
// mksyscall.pl xattr_linux.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package sysx
import (
"syscall"
"unsafe"
)
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func llistxattr(path string, dest []byte) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(dest) > 0 {
_p1 = unsafe.Pointer(&dest[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall(syscall.SYS_LLISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(dest)))
use(unsafe.Pointer(_p0))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lremovexattr(path string, attr string) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
_, _, e1 := syscall.Syscall(syscall.SYS_LREMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lsetxattr(path string, attr string, data []byte, flags int) (err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(data) > 0 {
_p2 = unsafe.Pointer(&data[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
_, _, e1 := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(data)), uintptr(flags), 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func lgetxattr(path string, attr string, dest []byte) (sz int, err error) {
var _p0 *byte
_p0, err = syscall.BytePtrFromString(path)
if err != nil {
return
}
var _p1 *byte
_p1, err = syscall.BytePtrFromString(attr)
if err != nil {
return
}
var _p2 unsafe.Pointer
if len(dest) > 0 {
_p2 = unsafe.Pointer(&dest[0])
} else {
_p2 = unsafe.Pointer(&_zero)
}
r0, _, e1 := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(_p2), uintptr(len(dest)), 0, 0)
use(unsafe.Pointer(_p0))
use(unsafe.Pointer(_p1))
sz = int(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}

22
vendor/github.com/tonistiigi/fsutil/LICENSE сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,22 @@
MIT
Copyright 2017 Tõnis Tiigi <tonistiigi@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

37
vendor/github.com/tonistiigi/fsutil/diff.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,37 @@
package fsutil
import (
"os"
"golang.org/x/net/context"
)
type walkerFn func(ctx context.Context, pathC chan<- *currentPath) error
func Changes(ctx context.Context, a, b walkerFn, changeFn ChangeFunc) error {
return nil
}
type HandleChangeFn func(ChangeKind, string, os.FileInfo, error) error
func GetWalkerFn(root string) walkerFn {
return func(ctx context.Context, pathC chan<- *currentPath) error {
return Walk(ctx, root, nil, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
p := &currentPath{
path: path,
f: f,
}
select {
case <-ctx.Done():
return ctx.Err()
case pathC <- p:
return nil
}
})
}
}

199
vendor/github.com/tonistiigi/fsutil/diff_containerd.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,199 @@
package fsutil
import (
"os"
"strings"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
)
// Everything below is copied from containerd/fs. TODO: remove duplication @dmcgowan
// Const redefined because containerd/fs doesn't build on !linux
// ChangeKind is the type of modification that
// a change is making.
type ChangeKind int
const (
// ChangeKindAdd represents an addition of
// a file
ChangeKindAdd ChangeKind = iota
// ChangeKindModify represents a change to
// an existing file
ChangeKindModify
// ChangeKindDelete represents a delete of
// a file
ChangeKindDelete
)
// ChangeFunc is the type of function called for each change
// computed during a directory changes calculation.
type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error
type currentPath struct {
path string
f os.FileInfo
// fullPath string
}
// doubleWalkDiff walks both directories to create a diff
func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b walkerFn) (err error) {
g, ctx := errgroup.WithContext(ctx)
var (
c1 = make(chan *currentPath, 128)
c2 = make(chan *currentPath, 128)
f1, f2 *currentPath
rmdir string
)
g.Go(func() error {
defer close(c1)
return a(ctx, c1)
})
g.Go(func() error {
defer close(c2)
return b(ctx, c2)
})
g.Go(func() error {
loop0:
for c1 != nil || c2 != nil {
if f1 == nil && c1 != nil {
f1, err = nextPath(ctx, c1)
if err != nil {
return err
}
if f1 == nil {
c1 = nil
}
}
if f2 == nil && c2 != nil {
f2, err = nextPath(ctx, c2)
if err != nil {
return err
}
if f2 == nil {
c2 = nil
}
}
if f1 == nil && f2 == nil {
continue
}
var f os.FileInfo
k, p := pathChange(f1, f2)
switch k {
case ChangeKindAdd:
if rmdir != "" {
rmdir = ""
}
f = f2.f
f2 = nil
case ChangeKindDelete:
// Check if this file is already removed by being
// under of a removed directory
if rmdir != "" && strings.HasPrefix(f1.path, rmdir) {
f1 = nil
continue
} else if rmdir == "" && f1.f.IsDir() {
rmdir = f1.path + string(os.PathSeparator)
} else if rmdir != "" {
rmdir = ""
}
f1 = nil
case ChangeKindModify:
same, err := sameFile(f1, f2)
if err != nil {
return err
}
if f1.f.IsDir() && !f2.f.IsDir() {
rmdir = f1.path + string(os.PathSeparator)
} else if rmdir != "" {
rmdir = ""
}
f = f2.f
f1 = nil
f2 = nil
if same {
continue loop0
}
}
if err := changeFn(k, p, f, nil); err != nil {
return err
}
}
return nil
})
return g.Wait()
}
func pathChange(lower, upper *currentPath) (ChangeKind, string) {
if lower == nil {
if upper == nil {
panic("cannot compare nil paths")
}
return ChangeKindAdd, upper.path
}
if upper == nil {
return ChangeKindDelete, lower.path
}
switch i := ComparePath(lower.path, upper.path); {
case i < 0:
// File in lower that is not in upper
return ChangeKindDelete, lower.path
case i > 0:
// File in upper that is not in lower
return ChangeKindAdd, upper.path
default:
return ChangeKindModify, upper.path
}
}
func sameFile(f1, f2 *currentPath) (same bool, retErr error) {
// If not a directory also check size, modtime, and content
if !f1.f.IsDir() {
if f1.f.Size() != f2.f.Size() {
return false, nil
}
t1 := f1.f.ModTime()
t2 := f2.f.ModTime()
if t1.UnixNano() != t2.UnixNano() {
return false, nil
}
}
ls1, ok := f1.f.Sys().(*Stat)
if !ok {
return false, nil
}
ls2, ok := f1.f.Sys().(*Stat)
if !ok {
return false, nil
}
return compareStat(ls1, ls2)
}
// compareStat returns whether the stats are equivalent,
// whether the files are considered the same file, and
// an error
func compareStat(ls1, ls2 *Stat) (bool, error) {
return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Devmajor == ls2.Devmajor && ls1.Devminor == ls2.Devminor && ls1.Linkname == ls2.Linkname, nil
}
func nextPath(ctx context.Context, pathC <-chan *currentPath) (*currentPath, error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
case p := <-pathC:
return p, nil
}
}

37
vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,37 @@
package fsutil
import (
"bytes"
"syscall"
"github.com/pkg/errors"
"github.com/stevvooe/continuity/sysx"
)
// compareSysStat returns whether the stats are equivalent,
// whether the files are considered the same file, and
// an error
func compareSysStat(s1, s2 interface{}) (bool, error) {
ls1, ok := s1.(*syscall.Stat_t)
if !ok {
return false, nil
}
ls2, ok := s2.(*syscall.Stat_t)
if !ok {
return false, nil
}
return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Rdev == ls2.Rdev, nil
}
func compareCapabilities(p1, p2 string) (bool, error) {
c1, err := sysx.LGetxattr(p1, "security.capability")
if err != nil && err != syscall.ENODATA {
return false, errors.Wrapf(err, "failed to get xattr for %s", p1)
}
c2, err := sysx.LGetxattr(p2, "security.capability")
if err != nil && err != syscall.ENODATA {
return false, errors.Wrapf(err, "failed to get xattr for %s", p2)
}
return bytes.Equal(c1, c2), nil
}

353
vendor/github.com/tonistiigi/fsutil/diskwriter.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,353 @@
// +build linux windows
package fsutil
import (
"archive/tar"
"crypto/sha256"
"encoding/hex"
"hash"
"io"
"os"
"path/filepath"
"strconv"
"sync"
"time"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/tarsum"
"github.com/pkg/errors"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
)
type WriteToFunc func(context.Context, string, io.WriteCloser) error
type DiskWriterOpt struct {
AsyncDataCb WriteToFunc
SyncDataCb WriteToFunc
NotifyCb func(ChangeKind, string, os.FileInfo, error) error
}
type DiskWriter struct {
opt DiskWriterOpt
dest string
wg sync.WaitGroup
ctx context.Context
cancel func()
eg *errgroup.Group
}
func NewDiskWriter(ctx context.Context, dest string, opt DiskWriterOpt) (*DiskWriter, error) {
if opt.SyncDataCb == nil && opt.AsyncDataCb == nil {
return nil, errors.New("no data callback specified")
}
if opt.SyncDataCb != nil && opt.AsyncDataCb != nil {
return nil, errors.New("can't specify both sync and async data callbacks")
}
ctx, cancel := context.WithCancel(ctx)
eg, ctx := errgroup.WithContext(ctx)
return &DiskWriter{
opt: opt,
dest: dest,
eg: eg,
ctx: ctx,
cancel: cancel,
}, nil
}
func (dw *DiskWriter) Wait(ctx context.Context) error {
return dw.eg.Wait()
}
func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err error) (retErr error) {
if err != nil {
return err
}
select {
case <-dw.ctx.Done():
return dw.ctx.Err()
default:
}
defer func() {
if retErr != nil {
dw.cancel()
}
}()
p = filepath.FromSlash(p)
destPath := filepath.Join(dw.dest, p)
if kind == ChangeKindDelete {
// todo: no need to validate if diff is trusted but is it always?
if err := os.RemoveAll(destPath); err != nil {
return errors.Wrapf(err, "failed to remove: %s", destPath)
}
if dw.opt.NotifyCb != nil {
if err := dw.opt.NotifyCb(kind, p, nil, nil); err != nil {
return err
}
}
return nil
}
stat, ok := fi.Sys().(*Stat)
if !ok {
return errors.Errorf("%s invalid change without stat information", p)
}
rename := true
oldFi, err := os.Lstat(destPath)
if err != nil {
if os.IsNotExist(err) {
if kind != ChangeKindAdd {
return errors.Wrapf(err, "invalid addition: %s", destPath)
}
rename = false
} else {
return errors.Wrapf(err, "failed to stat %s", destPath)
}
}
if oldFi != nil && fi.IsDir() && oldFi.IsDir() {
if err := rewriteMetadata(destPath, stat); err != nil {
return errors.Wrapf(err, "error setting dir metadata for %s", destPath)
}
return nil
}
newPath := destPath
if rename {
newPath = filepath.Join(filepath.Dir(destPath), ".tmp."+nextSuffix())
}
isRegularFile := false
switch {
case fi.IsDir():
if err := os.Mkdir(newPath, fi.Mode()); err != nil {
return errors.Wrapf(err, "failed to create dir %s", newPath)
}
case fi.Mode()&os.ModeDevice != 0 || fi.Mode()&os.ModeNamedPipe != 0:
if err := handleTarTypeBlockCharFifo(newPath, stat); err != nil {
return errors.Wrapf(err, "failed to create device %s", newPath)
}
case fi.Mode()&os.ModeSymlink != 0:
if err := os.Symlink(stat.Linkname, newPath); err != nil {
return errors.Wrapf(err, "failed to symlink %s", newPath)
}
case stat.Linkname != "":
if err := os.Link(filepath.Join(dw.dest, stat.Linkname), newPath); err != nil {
return errors.Wrapf(err, "failed to link %s to %s", newPath, stat.Linkname)
}
default:
isRegularFile = true
file, err := os.OpenFile(newPath, os.O_CREATE|os.O_WRONLY, fi.Mode()) //todo: windows
if err != nil {
return errors.Wrapf(err, "failed to create %s", newPath)
}
if dw.opt.SyncDataCb != nil {
if err := dw.processChange(ChangeKindAdd, p, fi, file); err != nil {
file.Close()
return err
}
break
}
if err := file.Close(); err != nil {
return errors.Wrapf(err, "failed to close %s", newPath)
}
}
if err := rewriteMetadata(newPath, stat); err != nil {
return errors.Wrapf(err, "error setting metadata for %s", newPath)
}
if rename {
if err := os.Rename(newPath, destPath); err != nil {
return errors.Wrapf(err, "failed to rename %s to %s", newPath, destPath)
}
}
if isRegularFile {
if dw.opt.AsyncDataCb != nil {
dw.requestAsyncFileData(p, destPath, fi)
}
} else {
return dw.processChange(kind, p, fi, nil)
}
return nil
}
func (dw *DiskWriter) requestAsyncFileData(p, dest string, fi os.FileInfo) {
// todo: limit worker threads
dw.eg.Go(func() error {
if err := dw.processChange(ChangeKindAdd, p, fi, &lazyFileWriter{
dest: dest,
}); err != nil {
return err
}
return chtimes(dest, fi.ModTime().UnixNano()) // TODO: parent dirs
})
}
func (dw *DiskWriter) processChange(kind ChangeKind, p string, fi os.FileInfo, w io.WriteCloser) error {
origw := w
var hw *hashedWriter
if dw.opt.NotifyCb != nil {
var err error
if hw, err = newHashWriter(p, fi, w); err != nil {
return err
}
w = hw
}
if origw != nil {
fn := dw.opt.SyncDataCb
if fn == nil && dw.opt.AsyncDataCb != nil {
fn = dw.opt.AsyncDataCb
}
if err := fn(dw.ctx, p, w); err != nil {
return err
}
} else {
if hw != nil {
hw.Close()
}
}
if hw != nil {
return dw.opt.NotifyCb(kind, p, hw, nil)
}
return nil
}
type hashedWriter struct {
os.FileInfo
io.Writer
h hash.Hash
w io.WriteCloser
sum string
}
func newHashWriter(p string, fi os.FileInfo, w io.WriteCloser) (*hashedWriter, error) {
h, err := NewTarsumHash(p, fi)
if err != nil {
return nil, err
}
hw := &hashedWriter{
FileInfo: fi,
Writer: io.MultiWriter(w, h),
h: h,
w: w,
}
return hw, nil
}
func (hw *hashedWriter) Close() error {
hw.sum = string(hex.EncodeToString(hw.h.Sum(nil)))
if hw.w != nil {
return hw.w.Close()
}
return nil
}
func (hw *hashedWriter) Hash() string {
return hw.sum
}
type lazyFileWriter struct {
dest string
ctx context.Context
f *os.File
}
func (lfw *lazyFileWriter) Write(dt []byte) (int, error) {
if lfw.f == nil {
file, err := os.OpenFile(lfw.dest, os.O_WRONLY, 0) //todo: windows
if err != nil {
return 0, errors.Wrapf(err, "failed to open %s", lfw.dest)
}
lfw.f = file
}
return lfw.f.Write(dt)
}
func (lfw *lazyFileWriter) Close() error {
if lfw.f != nil {
return lfw.f.Close()
}
return nil
}
func mkdev(major int64, minor int64) uint32 {
return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
}
// Random number state.
// We generate random temporary file names so that there's a good
// chance the file doesn't exist yet - keeps the number of tries in
// TempFile to a minimum.
var rand uint32
var randmu sync.Mutex
func reseed() uint32 {
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
}
func nextSuffix() string {
randmu.Lock()
r := rand
if r == 0 {
r = reseed()
}
r = r*1664525 + 1013904223 // constants from Numerical Recipes
rand = r
randmu.Unlock()
return strconv.Itoa(int(1e9 + r%1e9))[1:]
}
func NewTarsumHash(p string, fi os.FileInfo) (hash.Hash, error) {
stat, ok := fi.Sys().(*Stat)
link := ""
if ok {
link = stat.Linkname
}
if fi.IsDir() {
p += string(os.PathSeparator)
}
h, err := archive.FileInfoHeader(p, fi, link)
if err != nil {
return nil, err
}
h.Name = p
if ok {
h.Uid = int(stat.Uid)
h.Gid = int(stat.Gid)
h.Linkname = stat.Linkname
if stat.Xattrs != nil {
h.Xattrs = make(map[string]string)
for k, v := range stat.Xattrs {
h.Xattrs[k] = string(v)
}
}
}
tsh := &tarsumHash{h: h, Hash: sha256.New()}
tsh.Reset()
return tsh, nil
}
// Reset resets the Hash to its initial state.
func (tsh *tarsumHash) Reset() {
tsh.Hash.Reset()
tarsum.WriteV1Header(tsh.h, tsh.Hash)
}
type tarsumHash struct {
hash.Hash
h *tar.Header
}

64
vendor/github.com/tonistiigi/fsutil/diskwriter_linux.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,64 @@
// +build linux
package fsutil
import (
"os"
"syscall"
"github.com/pkg/errors"
"github.com/stevvooe/continuity/sysx"
"golang.org/x/sys/unix"
)
func rewriteMetadata(p string, stat *Stat) error {
for key, value := range stat.Xattrs {
sysx.Setxattr(p, key, value, 0)
}
if err := os.Lchown(p, int(stat.Uid), int(stat.Gid)); err != nil {
return errors.Wrapf(err, "failed to lchown %s", p)
}
if os.FileMode(stat.Mode)&os.ModeSymlink == 0 {
if err := os.Chmod(p, os.FileMode(stat.Mode)); err != nil {
return errors.Wrapf(err, "failed to chown %s", p)
}
}
if err := chtimes(p, stat.ModTime); err != nil {
return errors.Wrapf(err, "failed to chtimes %s", p)
}
return nil
}
func chtimes(path string, un int64) error {
var utimes [2]unix.Timespec
utimes[0] = unix.NsecToTimespec(un)
utimes[1] = utimes[0]
if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil {
return errors.Wrap(err, "failed call to UtimesNanoAt")
}
return nil
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(path string, stat *Stat) error {
mode := uint32(stat.Mode & 07777)
if os.FileMode(stat.Mode)&os.ModeCharDevice != 0 {
mode |= syscall.S_IFCHR
} else if os.FileMode(stat.Mode)&os.ModeNamedPipe != 0 {
mode |= syscall.S_IFIFO
} else {
mode |= syscall.S_IFBLK
}
if err := syscall.Mknod(path, mode, int(mkdev(stat.Devmajor, stat.Devminor))); err != nil {
return err
}
return nil
}

25
vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,25 @@
// +build windows
package fsutil
import (
"os"
"time"
"github.com/pkg/errors"
)
func rewriteMetadata(p string, stat *Stat) error {
return chtimes(p, stat.ModTime)
}
func chtimes(path string, un int64) error {
mtime := time.Unix(0, un)
return os.Chtimes(path, mtime, mtime)
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(path string, stat *Stat) error {
return errors.New("Not implemented on windows")
}

3
vendor/github.com/tonistiigi/fsutil/generate.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,3 @@
package fsutil
//go:generate protoc --gogoslick_out=. stat.proto wire.proto

46
vendor/github.com/tonistiigi/fsutil/hardlinks.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,46 @@
package fsutil
import (
"os"
"github.com/pkg/errors"
)
// Hardlinks validates that all targets for links were part of the changes
type Hardlinks struct {
seenFiles map[string]struct{}
}
func (v *Hardlinks) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
if v.seenFiles == nil {
v.seenFiles = make(map[string]struct{})
}
if kind == ChangeKindDelete {
return nil
}
stat, ok := fi.Sys().(*Stat)
if !ok {
return errors.Errorf("invalid change without stat info: %s", p)
}
if fi.IsDir() || fi.Mode()&os.ModeSymlink != 0 {
return nil
}
if len(stat.Linkname) > 0 {
if _, ok := v.seenFiles[stat.Linkname]; !ok {
return errors.Errorf("invalid link %s to unknown path: %q", p, stat.Linkname)
}
} else {
v.seenFiles[p] = struct{}{}
}
return nil
}

45
vendor/github.com/tonistiigi/fsutil/readme.md сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,45 @@
Incremental file directory sync tools in golang.
```
BENCH_FILE_SIZE=10000 ./bench.test --test.bench .
BenchmarkCopyWithTar10-4 2000 995242 ns/op
BenchmarkCopyWithTar50-4 300 4710021 ns/op
BenchmarkCopyWithTar200-4 100 16627260 ns/op
BenchmarkCopyWithTar1000-4 20 60031459 ns/op
BenchmarkCPA10-4 1000 1678367 ns/op
BenchmarkCPA50-4 500 3690306 ns/op
BenchmarkCPA200-4 200 9495066 ns/op
BenchmarkCPA1000-4 50 29769289 ns/op
BenchmarkDiffCopy10-4 2000 943889 ns/op
BenchmarkDiffCopy50-4 500 3285950 ns/op
BenchmarkDiffCopy200-4 200 8563792 ns/op
BenchmarkDiffCopy1000-4 50 29511340 ns/op
BenchmarkDiffCopyProto10-4 2000 944615 ns/op
BenchmarkDiffCopyProto50-4 500 3334940 ns/op
BenchmarkDiffCopyProto200-4 200 9420038 ns/op
BenchmarkDiffCopyProto1000-4 50 30632429 ns/op
BenchmarkIncrementalDiffCopy10-4 2000 691993 ns/op
BenchmarkIncrementalDiffCopy50-4 1000 1304253 ns/op
BenchmarkIncrementalDiffCopy200-4 500 3306519 ns/op
BenchmarkIncrementalDiffCopy1000-4 200 10211343 ns/op
BenchmarkIncrementalDiffCopy5000-4 20 55194427 ns/op
BenchmarkIncrementalDiffCopy10000-4 20 91759289 ns/op
BenchmarkIncrementalCopyWithTar10-4 2000 1020258 ns/op
BenchmarkIncrementalCopyWithTar50-4 300 5348786 ns/op
BenchmarkIncrementalCopyWithTar200-4 100 19495000 ns/op
BenchmarkIncrementalCopyWithTar1000-4 20 70338507 ns/op
BenchmarkIncrementalRsync10-4 30 45215754 ns/op
BenchmarkIncrementalRsync50-4 30 45837260 ns/op
BenchmarkIncrementalRsync200-4 30 48780614 ns/op
BenchmarkIncrementalRsync1000-4 20 54801892 ns/op
BenchmarkIncrementalRsync5000-4 20 84782542 ns/op
BenchmarkIncrementalRsync10000-4 10 103355108 ns/op
BenchmarkRsync10-4 30 46776470 ns/op
BenchmarkRsync50-4 30 48601555 ns/op
BenchmarkRsync200-4 20 59642691 ns/op
BenchmarkRsync1000-4 20 101343010 ns/op
BenchmarkGnuTar10-4 500 3171448 ns/op
BenchmarkGnuTar50-4 300 5030296 ns/op
BenchmarkGnuTar200-4 100 10464313 ns/op
BenchmarkGnuTar1000-4 50 30375257 ns/op
```

210
vendor/github.com/tonistiigi/fsutil/receive.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,210 @@
// +build linux windows
package fsutil
import (
"io"
"os"
"sync"
"github.com/pkg/errors"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
)
func Receive(ctx context.Context, conn Stream, dest string, notifyHashed ChangeFunc) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
r := &receiver{
conn: &syncStream{Stream: conn},
dest: dest,
files: make(map[string]uint32),
pipes: make(map[uint32]io.WriteCloser),
notifyHashed: notifyHashed,
}
return r.run(ctx)
}
type receiver struct {
dest string
conn Stream
files map[string]uint32
pipes map[uint32]io.WriteCloser
mu sync.RWMutex
muPipes sync.RWMutex
notifyHashed ChangeFunc
orderValidator Validator
hlValidator Hardlinks
}
type dynamicWalker struct {
walkChan chan *currentPath
closed bool
}
func newDynamicWalker() *dynamicWalker {
return &dynamicWalker{
walkChan: make(chan *currentPath, 128),
}
}
func (w *dynamicWalker) update(p *currentPath) error {
if w.closed {
return errors.New("walker is closed")
}
if p == nil {
close(w.walkChan)
return nil
}
w.walkChan <- p
return nil
}
func (w *dynamicWalker) fill(ctx context.Context, pathC chan<- *currentPath) error {
for {
select {
case p, ok := <-w.walkChan:
if !ok {
return nil
}
pathC <- p
case <-ctx.Done():
return ctx.Err()
}
}
return nil
}
func (r *receiver) run(ctx context.Context) error {
g, ctx := errgroup.WithContext(ctx)
dw, err := NewDiskWriter(ctx, r.dest, DiskWriterOpt{
AsyncDataCb: r.asyncDataFunc,
NotifyCb: r.notifyHashed,
})
if err != nil {
return err
}
w := newDynamicWalker()
g.Go(func() error {
err := doubleWalkDiff(ctx, dw.HandleChange, GetWalkerFn(r.dest), w.fill)
if err != nil {
return err
}
if err := dw.Wait(ctx); err != nil {
return err
}
r.conn.SendMsg(&Packet{Type: PACKET_FIN})
return nil
})
g.Go(func() error {
var i uint32 = 0
var p Packet
for {
p = Packet{Data: p.Data[:0]}
if err := r.conn.RecvMsg(&p); err != nil {
return err
}
switch p.Type {
case PACKET_STAT:
if p.Stat == nil {
if err := w.update(nil); err != nil {
return err
}
break
}
if fileCanRequestData(os.FileMode(p.Stat.Mode)) {
r.mu.Lock()
r.files[p.Stat.Path] = i
r.mu.Unlock()
}
i++
cp := &currentPath{path: p.Stat.Path, f: &StatInfo{p.Stat}}
if err := r.orderValidator.HandleChange(ChangeKindAdd, cp.path, cp.f, nil); err != nil {
return err
}
if err := r.hlValidator.HandleChange(ChangeKindAdd, cp.path, cp.f, nil); err != nil {
return err
}
if err := w.update(cp); err != nil {
return err
}
case PACKET_DATA:
r.muPipes.Lock()
pw, ok := r.pipes[p.ID]
r.muPipes.Unlock()
if !ok {
return errors.Errorf("invalid file request %s", p.ID)
}
if len(p.Data) == 0 {
if err := pw.Close(); err != nil {
return err
}
} else {
if _, err := pw.Write(p.Data); err != nil {
return err
}
}
case PACKET_FIN:
return nil
}
}
})
return g.Wait()
}
func (r *receiver) asyncDataFunc(ctx context.Context, p string, wc io.WriteCloser) error {
r.mu.Lock()
id, ok := r.files[p]
if !ok {
r.mu.Unlock()
return errors.Errorf("invalid file request %s", p)
}
delete(r.files, p)
r.mu.Unlock()
wwc := newWrappedWriteCloser(wc)
r.muPipes.Lock()
r.pipes[id] = wwc
r.muPipes.Unlock()
if err := r.conn.SendMsg(&Packet{Type: PACKET_REQ, ID: id}); err != nil {
return err
}
err := wwc.Wait(ctx)
r.muPipes.Lock()
delete(r.pipes, id)
r.muPipes.Unlock()
return err
}
type wrappedWriteCloser struct {
io.WriteCloser
err error
once sync.Once
done chan struct{}
}
func newWrappedWriteCloser(wc io.WriteCloser) *wrappedWriteCloser {
return &wrappedWriteCloser{WriteCloser: wc, done: make(chan struct{})}
}
func (w *wrappedWriteCloser) Close() error {
w.err = w.WriteCloser.Close()
w.once.Do(func() { close(w.done) })
return w.err
}
func (w *wrappedWriteCloser) Wait(ctx context.Context) error {
select {
case <-ctx.Done():
return ctx.Err()
case <-w.done:
return w.err
}
}

14
vendor/github.com/tonistiigi/fsutil/receive_unsupported.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,14 @@
// +build !linux,!windows
package fsutil
import (
"runtime"
"github.com/pkg/errors"
"golang.org/x/net/context"
)
func Receive(ctx context.Context, conn Stream, dest string, notifyHashed ChangeFunc) error {
return errors.Errorf("receive is unsupported in %s", runtime.GOOS)
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше