Merge pull request #3314 from crazy-max/buildx-default
Set buildx as default builder
This commit is contained in:
Коммит
c780f7c4ab
|
@ -5,6 +5,7 @@ ARG GO_VERSION=1.16.11
|
||||||
ARG XX_VERSION=1.0.0-rc.2
|
ARG XX_VERSION=1.0.0-rc.2
|
||||||
ARG GOVERSIONINFO_VERSION=v1.3.0
|
ARG GOVERSIONINFO_VERSION=v1.3.0
|
||||||
ARG GOTESTSUM_VERSION=v1.7.0
|
ARG GOTESTSUM_VERSION=v1.7.0
|
||||||
|
ARG BUILDX_VERSION=0.7.1
|
||||||
|
|
||||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-${BASE_VARIANT} AS gostable
|
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-${BASE_VARIANT} AS gostable
|
||||||
FROM --platform=$BUILDPLATFORM golang:1.17rc1-${BASE_VARIANT} AS golatest
|
FROM --platform=$BUILDPLATFORM golang:1.17rc1-${BASE_VARIANT} AS golatest
|
||||||
|
@ -106,6 +107,8 @@ ARG COMPOSE_VERSION=1.29.2
|
||||||
RUN curl -fsSL https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/bin/docker-compose && \
|
RUN curl -fsSL https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/bin/docker-compose && \
|
||||||
chmod +x /usr/local/bin/docker-compose
|
chmod +x /usr/local/bin/docker-compose
|
||||||
|
|
||||||
|
FROM docker/buildx-bin:${BUILDX_VERSION} AS buildx
|
||||||
|
|
||||||
FROM e2e-base-${BASE_VARIANT} AS e2e
|
FROM e2e-base-${BASE_VARIANT} AS e2e
|
||||||
ARG NOTARY_VERSION=v0.6.1
|
ARG NOTARY_VERSION=v0.6.1
|
||||||
ADD --chmod=0755 https://github.com/theupdateframework/notary/releases/download/${NOTARY_VERSION}/notary-Linux-amd64 /usr/local/bin/notary
|
ADD --chmod=0755 https://github.com/theupdateframework/notary/releases/download/${NOTARY_VERSION}/notary-Linux-amd64 /usr/local/bin/notary
|
||||||
|
@ -114,6 +117,7 @@ RUN echo 'notary.cert' >> /etc/ca-certificates.conf && update-ca-certificates
|
||||||
COPY --from=gotestsum /out/gotestsum /usr/bin/gotestsum
|
COPY --from=gotestsum /out/gotestsum /usr/bin/gotestsum
|
||||||
COPY --from=build /out ./build/
|
COPY --from=build /out ./build/
|
||||||
COPY --from=build-plugins /out ./build/
|
COPY --from=build-plugins /out ./build/
|
||||||
|
COPY --from=buildx /buildx /usr/libexec/docker/cli-plugins/docker-buildx
|
||||||
COPY . .
|
COPY . .
|
||||||
ENV DOCKER_BUILDKIT=1
|
ENV DOCKER_BUILDKIT=1
|
||||||
ENV PATH=/go/src/github.com/docker/cli/build:$PATH
|
ENV PATH=/go/src/github.com/docker/cli/build:$PATH
|
||||||
|
|
|
@ -104,6 +104,36 @@ func listPluginCandidates(dirs []string) (map[string][]string, error) {
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPlugin returns a plugin on the system by its name
|
||||||
|
func GetPlugin(name string, dockerCli command.Cli, rootcmd *cobra.Command) (*Plugin, error) {
|
||||||
|
pluginDirs, err := getPluginDirs(dockerCli)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
candidates, err := listPluginCandidates(pluginDirs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if paths, ok := candidates[name]; ok {
|
||||||
|
if len(paths) == 0 {
|
||||||
|
return nil, errPluginNotFound(name)
|
||||||
|
}
|
||||||
|
c := &candidate{paths[0]}
|
||||||
|
p, err := newPlugin(c, rootcmd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if !IsNotFound(p.Err) {
|
||||||
|
p.ShadowedPaths = paths[1:]
|
||||||
|
}
|
||||||
|
return &p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errPluginNotFound(name)
|
||||||
|
}
|
||||||
|
|
||||||
// ListPlugins produces a list of the plugins available on the system
|
// ListPlugins produces a list of the plugins available on the system
|
||||||
func ListPlugins(dockerCli command.Cli, rootcmd *cobra.Command) ([]Plugin, error) {
|
func ListPlugins(dockerCli command.Cli, rootcmd *cobra.Command) ([]Plugin, error) {
|
||||||
pluginDirs, err := getPluginDirs(dockerCli)
|
pluginDirs, err := getPluginDirs(dockerCli)
|
||||||
|
|
|
@ -82,6 +82,29 @@ func TestListPluginCandidates(t *testing.T) {
|
||||||
assert.DeepEqual(t, candidates, exp)
|
assert.DeepEqual(t, candidates, exp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetPlugin(t *testing.T) {
|
||||||
|
dir := fs.NewDir(t, t.Name(),
|
||||||
|
fs.WithFile("docker-bbb", `
|
||||||
|
#!/bin/sh
|
||||||
|
echo '{"SchemaVersion":"0.1.0"}'`, fs.WithMode(0777)),
|
||||||
|
fs.WithFile("docker-aaa", `
|
||||||
|
#!/bin/sh
|
||||||
|
echo '{"SchemaVersion":"0.1.0"}'`, fs.WithMode(0777)),
|
||||||
|
)
|
||||||
|
defer dir.Remove()
|
||||||
|
|
||||||
|
cli := test.NewFakeCli(nil)
|
||||||
|
cli.SetConfigFile(&configfile.ConfigFile{CLIPluginsExtraDirs: []string{dir.Path()}})
|
||||||
|
|
||||||
|
plugin, err := GetPlugin("bbb", cli, &cobra.Command{})
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.Equal(t, plugin.Name, "bbb")
|
||||||
|
|
||||||
|
_, err = GetPlugin("ccc", cli, &cobra.Command{})
|
||||||
|
assert.Error(t, err, "Error: No such CLI plugin: ccc")
|
||||||
|
assert.Assert(t, IsNotFound(err))
|
||||||
|
}
|
||||||
|
|
||||||
func TestListPluginsIsSorted(t *testing.T) {
|
func TestListPluginsIsSorted(t *testing.T) {
|
||||||
dir := fs.NewDir(t, t.Name(),
|
dir := fs.NewDir(t, t.Name(),
|
||||||
fs.WithFile("docker-bbb", `
|
fs.WithFile("docker-bbb", `
|
||||||
|
|
|
@ -7,7 +7,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -169,20 +168,6 @@ func (cli *DockerCli) ContentTrustEnabled() bool {
|
||||||
return cli.contentTrust
|
return cli.contentTrust
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildKitEnabled returns whether buildkit is enabled either through a daemon setting
|
|
||||||
// or otherwise the client-side DOCKER_BUILDKIT environment variable
|
|
||||||
func BuildKitEnabled(si ServerInfo) (bool, error) {
|
|
||||||
buildkitEnabled := si.BuildkitVersion == types.BuilderBuildKit
|
|
||||||
if buildkitEnv := os.Getenv("DOCKER_BUILDKIT"); buildkitEnv != "" {
|
|
||||||
var err error
|
|
||||||
buildkitEnabled, err = strconv.ParseBool(buildkitEnv)
|
|
||||||
if err != nil {
|
|
||||||
return false, errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return buildkitEnabled, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ManifestStore returns a store for local manifests
|
// ManifestStore returns a store for local manifests
|
||||||
func (cli *DockerCli) ManifestStore() manifeststore.Store {
|
func (cli *DockerCli) ManifestStore() manifeststore.Store {
|
||||||
// TODO: support override default location from config file
|
// TODO: support override default location from config file
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/csv"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
@ -57,7 +56,6 @@ type buildOptions struct {
|
||||||
isolation string
|
isolation string
|
||||||
quiet bool
|
quiet bool
|
||||||
noCache bool
|
noCache bool
|
||||||
progress string
|
|
||||||
rm bool
|
rm bool
|
||||||
forceRm bool
|
forceRm bool
|
||||||
pull bool
|
pull bool
|
||||||
|
@ -71,9 +69,6 @@ type buildOptions struct {
|
||||||
stream bool
|
stream bool
|
||||||
platform string
|
platform string
|
||||||
untrusted bool
|
untrusted bool
|
||||||
secrets []string
|
|
||||||
ssh []string
|
|
||||||
outputs []string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// dockerfileFromStdin returns true when the user specified that the Dockerfile
|
// dockerfileFromStdin returns true when the user specified that the Dockerfile
|
||||||
|
@ -118,40 +113,26 @@ func NewBuildCommand(dockerCli command.Cli) *cobra.Command {
|
||||||
flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format")
|
flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format")
|
||||||
flags.Var(&options.buildArgs, "build-arg", "Set build-time variables")
|
flags.Var(&options.buildArgs, "build-arg", "Set build-time variables")
|
||||||
flags.Var(options.ulimits, "ulimit", "Ulimit options")
|
flags.Var(options.ulimits, "ulimit", "Ulimit options")
|
||||||
flags.SetAnnotation("ulimit", "no-buildkit", nil)
|
|
||||||
flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
|
flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')")
|
||||||
flags.VarP(&options.memory, "memory", "m", "Memory limit")
|
flags.VarP(&options.memory, "memory", "m", "Memory limit")
|
||||||
flags.SetAnnotation("memory", "no-buildkit", nil)
|
|
||||||
flags.Var(&options.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap")
|
flags.Var(&options.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap")
|
||||||
flags.SetAnnotation("memory-swap", "no-buildkit", nil)
|
|
||||||
flags.Var(&options.shmSize, "shm-size", "Size of /dev/shm")
|
flags.Var(&options.shmSize, "shm-size", "Size of /dev/shm")
|
||||||
flags.SetAnnotation("shm-size", "no-buildkit", nil)
|
|
||||||
flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)")
|
flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)")
|
||||||
flags.SetAnnotation("cpu-shares", "no-buildkit", nil)
|
|
||||||
flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
|
flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period")
|
||||||
flags.SetAnnotation("cpu-period", "no-buildkit", nil)
|
|
||||||
flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
|
flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
|
||||||
flags.SetAnnotation("cpu-quota", "no-buildkit", nil)
|
|
||||||
flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
|
flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
|
||||||
flags.SetAnnotation("cpuset-cpus", "no-buildkit", nil)
|
|
||||||
flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)")
|
flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)")
|
||||||
flags.SetAnnotation("cpuset-mems", "no-buildkit", nil)
|
|
||||||
flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container")
|
flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container")
|
||||||
flags.SetAnnotation("cgroup-parent", "no-buildkit", nil)
|
|
||||||
flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology")
|
flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology")
|
||||||
flags.Var(&options.labels, "label", "Set metadata for an image")
|
flags.Var(&options.labels, "label", "Set metadata for an image")
|
||||||
flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image")
|
flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image")
|
||||||
flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build")
|
flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build")
|
||||||
flags.SetAnnotation("rm", "no-buildkit", nil)
|
|
||||||
flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers")
|
flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers")
|
||||||
flags.SetAnnotation("force-rm", "no-buildkit", nil)
|
|
||||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
|
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
|
||||||
flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image")
|
flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image")
|
||||||
flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources")
|
flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources")
|
||||||
flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip")
|
flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip")
|
||||||
flags.SetAnnotation("compress", "no-buildkit", nil)
|
|
||||||
flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options")
|
flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options")
|
||||||
flags.SetAnnotation("security-opt", "no-buildkit", nil)
|
|
||||||
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
|
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
|
||||||
flags.SetAnnotation("network", "version", []string{"1.25"})
|
flags.SetAnnotation("network", "version", []string{"1.25"})
|
||||||
flags.Var(&options.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)")
|
flags.Var(&options.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)")
|
||||||
|
@ -162,7 +143,6 @@ func NewBuildCommand(dockerCli command.Cli) *cobra.Command {
|
||||||
|
|
||||||
flags.StringVar(&options.platform, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Set platform if server is multi-platform capable")
|
flags.StringVar(&options.platform, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Set platform if server is multi-platform capable")
|
||||||
flags.SetAnnotation("platform", "version", []string{"1.38"})
|
flags.SetAnnotation("platform", "version", []string{"1.38"})
|
||||||
flags.SetAnnotation("platform", "buildkit", nil)
|
|
||||||
|
|
||||||
flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer")
|
flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer")
|
||||||
flags.SetAnnotation("squash", "experimental", nil)
|
flags.SetAnnotation("squash", "experimental", nil)
|
||||||
|
@ -171,21 +151,6 @@ func NewBuildCommand(dockerCli command.Cli) *cobra.Command {
|
||||||
flags.BoolVar(&options.stream, "stream", false, "Stream attaches to server to negotiate build context")
|
flags.BoolVar(&options.stream, "stream", false, "Stream attaches to server to negotiate build context")
|
||||||
flags.MarkHidden("stream")
|
flags.MarkHidden("stream")
|
||||||
|
|
||||||
flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (auto, plain, tty). Use plain to show container output")
|
|
||||||
flags.SetAnnotation("progress", "buildkit", nil)
|
|
||||||
|
|
||||||
flags.StringArrayVar(&options.secrets, "secret", []string{}, "Secret file to expose to the build (only if BuildKit enabled): id=mysecret,src=/local/secret")
|
|
||||||
flags.SetAnnotation("secret", "version", []string{"1.39"})
|
|
||||||
flags.SetAnnotation("secret", "buildkit", nil)
|
|
||||||
|
|
||||||
flags.StringArrayVar(&options.ssh, "ssh", []string{}, "SSH agent socket or keys to expose to the build (only if BuildKit enabled) (format: default|<id>[=<socket>|<key>[,<key>]])")
|
|
||||||
flags.SetAnnotation("ssh", "version", []string{"1.39"})
|
|
||||||
flags.SetAnnotation("ssh", "buildkit", nil)
|
|
||||||
|
|
||||||
flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, "Output destination (format: type=local,dest=path)")
|
|
||||||
flags.SetAnnotation("output", "version", []string{"1.40"})
|
|
||||||
flags.SetAnnotation("output", "buildkit", nil)
|
|
||||||
|
|
||||||
return cmd
|
return cmd
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -207,15 +172,8 @@ func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error {
|
||||||
|
|
||||||
// nolint: gocyclo
|
// nolint: gocyclo
|
||||||
func runBuild(dockerCli command.Cli, options buildOptions) error {
|
func runBuild(dockerCli command.Cli, options buildOptions) error {
|
||||||
buildkitEnabled, err := command.BuildKitEnabled(dockerCli.ServerInfo())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if buildkitEnabled {
|
|
||||||
return runBuildBuildKit(dockerCli, options)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
err error
|
||||||
buildCtx io.ReadCloser
|
buildCtx io.ReadCloser
|
||||||
dockerfileCtx io.ReadCloser
|
dockerfileCtx io.ReadCloser
|
||||||
contextDir string
|
contextDir string
|
||||||
|
@ -609,58 +567,3 @@ func imageBuildOptions(dockerCli command.Cli, options buildOptions) types.ImageB
|
||||||
Platform: options.platform,
|
Platform: options.platform,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseOutputs(inp []string) ([]types.ImageBuildOutput, error) {
|
|
||||||
var outs []types.ImageBuildOutput
|
|
||||||
if len(inp) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
for _, s := range inp {
|
|
||||||
csvReader := csv.NewReader(strings.NewReader(s))
|
|
||||||
fields, err := csvReader.Read()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(fields) == 1 && fields[0] == s && !strings.HasPrefix(s, "type=") {
|
|
||||||
if s == "-" {
|
|
||||||
outs = append(outs, types.ImageBuildOutput{
|
|
||||||
Type: "tar",
|
|
||||||
Attrs: map[string]string{
|
|
||||||
"dest": s,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
outs = append(outs, types.ImageBuildOutput{
|
|
||||||
Type: "local",
|
|
||||||
Attrs: map[string]string{
|
|
||||||
"dest": s,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
out := types.ImageBuildOutput{
|
|
||||||
Attrs: map[string]string{},
|
|
||||||
}
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return nil, errors.Errorf("invalid value %s", field)
|
|
||||||
}
|
|
||||||
key := strings.ToLower(parts[0])
|
|
||||||
value := parts[1]
|
|
||||||
switch key {
|
|
||||||
case "type":
|
|
||||||
out.Type = value
|
|
||||||
default:
|
|
||||||
out.Attrs[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if out.Type == "" {
|
|
||||||
return nil, errors.Errorf("type is required for output")
|
|
||||||
}
|
|
||||||
outs = append(outs, out)
|
|
||||||
}
|
|
||||||
return outs, nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,525 +0,0 @@
|
||||||
package image
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/csv"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/containerd/console"
|
|
||||||
"github.com/containerd/containerd/platforms"
|
|
||||||
"github.com/docker/cli/cli"
|
|
||||||
"github.com/docker/cli/cli/command"
|
|
||||||
"github.com/docker/cli/cli/command/image/build"
|
|
||||||
"github.com/docker/cli/opts"
|
|
||||||
"github.com/docker/docker/api/types"
|
|
||||||
"github.com/docker/docker/pkg/jsonmessage"
|
|
||||||
"github.com/docker/docker/pkg/stringid"
|
|
||||||
"github.com/docker/docker/pkg/urlutil"
|
|
||||||
controlapi "github.com/moby/buildkit/api/services/control"
|
|
||||||
"github.com/moby/buildkit/client"
|
|
||||||
"github.com/moby/buildkit/session"
|
|
||||||
"github.com/moby/buildkit/session/auth/authprovider"
|
|
||||||
"github.com/moby/buildkit/session/filesync"
|
|
||||||
"github.com/moby/buildkit/session/secrets/secretsprovider"
|
|
||||||
"github.com/moby/buildkit/session/sshforward/sshprovider"
|
|
||||||
"github.com/moby/buildkit/util/appcontext"
|
|
||||||
"github.com/moby/buildkit/util/gitutil"
|
|
||||||
"github.com/moby/buildkit/util/progress/progressui"
|
|
||||||
"github.com/moby/buildkit/util/progress/progresswriter"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
fsutiltypes "github.com/tonistiigi/fsutil/types"
|
|
||||||
"github.com/tonistiigi/go-rosetta"
|
|
||||||
"golang.org/x/sync/errgroup"
|
|
||||||
)
|
|
||||||
|
|
||||||
const uploadRequestRemote = "upload-request"
|
|
||||||
|
|
||||||
var errDockerfileConflict = errors.New("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles")
|
|
||||||
|
|
||||||
//nolint: gocyclo
|
|
||||||
func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
|
|
||||||
ctx := appcontext.Context()
|
|
||||||
|
|
||||||
s, err := trySession(dockerCli, options.context, false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if s == nil {
|
|
||||||
return errors.Errorf("buildkit not supported by daemon")
|
|
||||||
}
|
|
||||||
|
|
||||||
if options.imageIDFile != "" {
|
|
||||||
// Avoid leaving a stale file if we eventually fail
|
|
||||||
if err := os.Remove(options.imageIDFile); err != nil && !os.IsNotExist(err) {
|
|
||||||
return errors.Wrap(err, "removing image ID file")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
remote string
|
|
||||||
body io.Reader
|
|
||||||
dockerfileName = options.dockerfileName
|
|
||||||
dockerfileReader io.ReadCloser
|
|
||||||
dockerfileDir string
|
|
||||||
contextDir string
|
|
||||||
)
|
|
||||||
|
|
||||||
stdoutUsed := false
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case options.contextFromStdin():
|
|
||||||
if options.dockerfileFromStdin() {
|
|
||||||
return errStdinConflict
|
|
||||||
}
|
|
||||||
rc, isArchive, err := build.DetectArchiveReader(dockerCli.In())
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if isArchive {
|
|
||||||
body = rc
|
|
||||||
remote = uploadRequestRemote
|
|
||||||
} else {
|
|
||||||
if options.dockerfileName != "" {
|
|
||||||
return errDockerfileConflict
|
|
||||||
}
|
|
||||||
dockerfileReader = rc
|
|
||||||
remote = clientSessionRemote
|
|
||||||
// TODO: make fssync handle empty contextdir
|
|
||||||
contextDir, _ = ioutil.TempDir("", "empty-dir")
|
|
||||||
defer os.RemoveAll(contextDir)
|
|
||||||
}
|
|
||||||
case isLocalDir(options.context):
|
|
||||||
contextDir = options.context
|
|
||||||
if options.dockerfileFromStdin() {
|
|
||||||
dockerfileReader = dockerCli.In()
|
|
||||||
} else if options.dockerfileName != "" {
|
|
||||||
dockerfileName = filepath.Base(options.dockerfileName)
|
|
||||||
dockerfileDir = filepath.Dir(options.dockerfileName)
|
|
||||||
} else {
|
|
||||||
dockerfileDir = options.context
|
|
||||||
}
|
|
||||||
remote = clientSessionRemote
|
|
||||||
case urlutil.IsGitURL(options.context):
|
|
||||||
remote = options.context
|
|
||||||
case urlutil.IsURL(options.context):
|
|
||||||
remote = options.context
|
|
||||||
default:
|
|
||||||
return errors.Errorf("unable to prepare context: path %q not found", options.context)
|
|
||||||
}
|
|
||||||
|
|
||||||
if dockerfileReader != nil {
|
|
||||||
dockerfileName = build.DefaultDockerfileName
|
|
||||||
dockerfileDir, err = build.WriteTempDockerfile(dockerfileReader)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(dockerfileDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
outputs, err := parseOutputs(options.outputs)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "failed to parse outputs")
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, out := range outputs {
|
|
||||||
switch out.Type {
|
|
||||||
case "local":
|
|
||||||
// dest is handled on client side for local exporter
|
|
||||||
outDir, ok := out.Attrs["dest"]
|
|
||||||
if !ok {
|
|
||||||
return errors.Errorf("dest is required for local output")
|
|
||||||
}
|
|
||||||
delete(out.Attrs, "dest")
|
|
||||||
s.Allow(filesync.NewFSSyncTargetDir(outDir))
|
|
||||||
case "tar":
|
|
||||||
// dest is handled on client side for tar exporter
|
|
||||||
outFile, ok := out.Attrs["dest"]
|
|
||||||
if !ok {
|
|
||||||
return errors.Errorf("dest is required for tar output")
|
|
||||||
}
|
|
||||||
var w io.WriteCloser
|
|
||||||
if outFile == "-" {
|
|
||||||
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
|
||||||
return errors.Errorf("refusing to write output to console")
|
|
||||||
}
|
|
||||||
w = os.Stdout
|
|
||||||
stdoutUsed = true
|
|
||||||
} else {
|
|
||||||
f, err := os.Create(outFile)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "failed to open %s", outFile)
|
|
||||||
}
|
|
||||||
w = f
|
|
||||||
}
|
|
||||||
output := func(map[string]string) (io.WriteCloser, error) { return w, nil }
|
|
||||||
s.Allow(filesync.NewFSSyncTarget(output))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if dockerfileDir != "" {
|
|
||||||
s.Allow(filesync.NewFSSyncProvider([]filesync.SyncedDir{
|
|
||||||
{
|
|
||||||
Name: "context",
|
|
||||||
Dir: contextDir,
|
|
||||||
Map: resetUIDAndGID,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Name: "dockerfile",
|
|
||||||
Dir: dockerfileDir,
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
dockerAuthProvider := authprovider.NewDockerAuthProvider(os.Stderr)
|
|
||||||
s.Allow(dockerAuthProvider)
|
|
||||||
if len(options.secrets) > 0 {
|
|
||||||
sp, err := parseSecretSpecs(options.secrets)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "could not parse secrets: %v", options.secrets)
|
|
||||||
}
|
|
||||||
s.Allow(sp)
|
|
||||||
}
|
|
||||||
|
|
||||||
sshSpecs := options.ssh
|
|
||||||
if len(sshSpecs) == 0 && isGitSSH(remote) {
|
|
||||||
sshSpecs = []string{"default"}
|
|
||||||
}
|
|
||||||
if len(sshSpecs) > 0 {
|
|
||||||
sshp, err := parseSSHSpecs(sshSpecs)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "could not parse ssh: %v", sshSpecs)
|
|
||||||
}
|
|
||||||
s.Allow(sshp)
|
|
||||||
}
|
|
||||||
|
|
||||||
eg, ctx := errgroup.WithContext(ctx)
|
|
||||||
|
|
||||||
dialSession := func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) {
|
|
||||||
return dockerCli.Client().DialHijack(ctx, "/session", proto, meta)
|
|
||||||
}
|
|
||||||
eg.Go(func() error {
|
|
||||||
return s.Run(context.TODO(), dialSession)
|
|
||||||
})
|
|
||||||
|
|
||||||
buildID := stringid.GenerateRandomID()
|
|
||||||
if body != nil {
|
|
||||||
eg.Go(func() error {
|
|
||||||
buildOptions := types.ImageBuildOptions{
|
|
||||||
Version: types.BuilderBuildKit,
|
|
||||||
BuildID: uploadRequestRemote + ":" + buildID,
|
|
||||||
}
|
|
||||||
|
|
||||||
response, err := dockerCli.Client().ImageBuild(context.Background(), body, buildOptions)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer response.Body.Close()
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
if v := os.Getenv("BUILDKIT_PROGRESS"); v != "" && options.progress == "auto" {
|
|
||||||
options.progress = v
|
|
||||||
}
|
|
||||||
|
|
||||||
if strings.EqualFold(options.platform, "local") {
|
|
||||||
p := platforms.DefaultSpec()
|
|
||||||
p.Architecture = rosetta.NativeArch() // current binary architecture might be emulated
|
|
||||||
options.platform = platforms.Format(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
eg.Go(func() error {
|
|
||||||
defer func() { // make sure the Status ends cleanly on build errors
|
|
||||||
s.Close()
|
|
||||||
}()
|
|
||||||
|
|
||||||
buildOptions := imageBuildOptions(dockerCli, options)
|
|
||||||
buildOptions.Version = types.BuilderBuildKit
|
|
||||||
buildOptions.Dockerfile = dockerfileName
|
|
||||||
// buildOptions.AuthConfigs = authConfigs // handled by session
|
|
||||||
buildOptions.RemoteContext = remote
|
|
||||||
buildOptions.SessionID = s.ID()
|
|
||||||
buildOptions.BuildID = buildID
|
|
||||||
buildOptions.Outputs = outputs
|
|
||||||
return doBuild(ctx, eg, dockerCli, stdoutUsed, options, buildOptions, dockerAuthProvider)
|
|
||||||
})
|
|
||||||
|
|
||||||
return eg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
//nolint: gocyclo
|
|
||||||
func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, stdoutUsed bool, options buildOptions, buildOptions types.ImageBuildOptions, at session.Attachable) (finalErr error) {
|
|
||||||
response, err := dockerCli.Client().ImageBuild(context.Background(), nil, buildOptions)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer response.Body.Close()
|
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
defer close(done)
|
|
||||||
eg.Go(func() error {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return dockerCli.Client().BuildCancel(context.TODO(), buildOptions.BuildID)
|
|
||||||
case <-done:
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
t := newTracer()
|
|
||||||
ssArr := []*client.SolveStatus{}
|
|
||||||
|
|
||||||
if err := opts.ValidateProgressOutput(options.progress); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
displayStatus := func(out *os.File, displayCh chan *client.SolveStatus) {
|
|
||||||
var c console.Console
|
|
||||||
// TODO: Handle tty output in non-tty environment.
|
|
||||||
if cons, err := console.ConsoleFromFile(out); err == nil && (options.progress == "auto" || options.progress == "tty") {
|
|
||||||
c = cons
|
|
||||||
}
|
|
||||||
// not using shared context to not disrupt display but let it finish reporting errors
|
|
||||||
eg.Go(func() error {
|
|
||||||
return progressui.DisplaySolveStatus(context.TODO(), "", c, out, displayCh)
|
|
||||||
})
|
|
||||||
if s, ok := at.(interface {
|
|
||||||
SetLogger(progresswriter.Logger)
|
|
||||||
}); ok {
|
|
||||||
s.SetLogger(func(s *client.SolveStatus) {
|
|
||||||
displayCh <- s
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if options.quiet {
|
|
||||||
eg.Go(func() error {
|
|
||||||
// TODO: make sure t.displayCh closes
|
|
||||||
for ss := range t.displayCh {
|
|
||||||
ssArr = append(ssArr, ss)
|
|
||||||
}
|
|
||||||
<-done
|
|
||||||
// TODO: verify that finalErr is indeed set when error occurs
|
|
||||||
if finalErr != nil {
|
|
||||||
displayCh := make(chan *client.SolveStatus)
|
|
||||||
go func() {
|
|
||||||
for _, ss := range ssArr {
|
|
||||||
displayCh <- ss
|
|
||||||
}
|
|
||||||
close(displayCh)
|
|
||||||
}()
|
|
||||||
displayStatus(os.Stderr, displayCh)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
displayStatus(os.Stderr, t.displayCh)
|
|
||||||
}
|
|
||||||
defer close(t.displayCh)
|
|
||||||
|
|
||||||
buf := bytes.NewBuffer(nil)
|
|
||||||
|
|
||||||
imageID := ""
|
|
||||||
writeAux := func(msg jsonmessage.JSONMessage) {
|
|
||||||
if msg.ID == "moby.image.id" {
|
|
||||||
var result types.BuildResult
|
|
||||||
if err := json.Unmarshal(*msg.Aux, &result); err != nil {
|
|
||||||
fmt.Fprintf(dockerCli.Err(), "failed to parse aux message: %v", err)
|
|
||||||
}
|
|
||||||
imageID = result.ID
|
|
||||||
return
|
|
||||||
}
|
|
||||||
t.write(msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = jsonmessage.DisplayJSONMessagesStream(response.Body, buf, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), writeAux)
|
|
||||||
if err != nil {
|
|
||||||
if jerr, ok := err.(*jsonmessage.JSONError); ok {
|
|
||||||
// If no error code is set, default to 1
|
|
||||||
if jerr.Code == 0 {
|
|
||||||
jerr.Code = 1
|
|
||||||
}
|
|
||||||
return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Everything worked so if -q was provided the output from the daemon
|
|
||||||
// should be just the image ID and we'll print that to stdout.
|
|
||||||
//
|
|
||||||
// TODO: we may want to use Aux messages with ID "moby.image.id" regardless of options.quiet (i.e. don't send HTTP param q=1)
|
|
||||||
// instead of assuming that output is image ID if options.quiet.
|
|
||||||
if options.quiet && !stdoutUsed {
|
|
||||||
imageID = buf.String()
|
|
||||||
fmt.Fprint(dockerCli.Out(), imageID)
|
|
||||||
}
|
|
||||||
|
|
||||||
if options.imageIDFile != "" {
|
|
||||||
if imageID == "" {
|
|
||||||
return errors.Errorf("cannot write %s because server did not provide an image ID", options.imageIDFile)
|
|
||||||
}
|
|
||||||
imageID = strings.TrimSpace(imageID)
|
|
||||||
if err := ioutil.WriteFile(options.imageIDFile, []byte(imageID), 0666); err != nil {
|
|
||||||
return errors.Wrap(err, "cannot write image ID file")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func resetUIDAndGID(_ string, s *fsutiltypes.Stat) bool {
|
|
||||||
s.Uid = 0
|
|
||||||
s.Gid = 0
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
type tracer struct {
|
|
||||||
displayCh chan *client.SolveStatus
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTracer() *tracer {
|
|
||||||
return &tracer{
|
|
||||||
displayCh: make(chan *client.SolveStatus),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t *tracer) write(msg jsonmessage.JSONMessage) {
|
|
||||||
var resp controlapi.StatusResponse
|
|
||||||
|
|
||||||
if msg.ID != "moby.buildkit.trace" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var dt []byte
|
|
||||||
// ignoring all messages that are not understood
|
|
||||||
if err := json.Unmarshal(*msg.Aux, &dt); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := (&resp).Unmarshal(dt); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s := client.SolveStatus{}
|
|
||||||
for _, v := range resp.Vertexes {
|
|
||||||
s.Vertexes = append(s.Vertexes, &client.Vertex{
|
|
||||||
Digest: v.Digest,
|
|
||||||
Inputs: v.Inputs,
|
|
||||||
Name: v.Name,
|
|
||||||
Started: v.Started,
|
|
||||||
Completed: v.Completed,
|
|
||||||
Error: v.Error,
|
|
||||||
Cached: v.Cached,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
for _, v := range resp.Statuses {
|
|
||||||
s.Statuses = append(s.Statuses, &client.VertexStatus{
|
|
||||||
ID: v.ID,
|
|
||||||
Vertex: v.Vertex,
|
|
||||||
Name: v.Name,
|
|
||||||
Total: v.Total,
|
|
||||||
Current: v.Current,
|
|
||||||
Timestamp: v.Timestamp,
|
|
||||||
Started: v.Started,
|
|
||||||
Completed: v.Completed,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
for _, v := range resp.Logs {
|
|
||||||
s.Logs = append(s.Logs, &client.VertexLog{
|
|
||||||
Vertex: v.Vertex,
|
|
||||||
Stream: int(v.Stream),
|
|
||||||
Data: v.Msg,
|
|
||||||
Timestamp: v.Timestamp,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
t.displayCh <- &s
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseSecretSpecs(sl []string) (session.Attachable, error) {
|
|
||||||
fs := make([]secretsprovider.Source, 0, len(sl))
|
|
||||||
for _, v := range sl {
|
|
||||||
s, err := parseSecret(v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
fs = append(fs, *s)
|
|
||||||
}
|
|
||||||
store, err := secretsprovider.NewStore(fs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return secretsprovider.NewSecretProvider(store), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseSecret(value string) (*secretsprovider.Source, error) {
|
|
||||||
csvReader := csv.NewReader(strings.NewReader(value))
|
|
||||||
fields, err := csvReader.Read()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to parse csv secret")
|
|
||||||
}
|
|
||||||
|
|
||||||
fs := secretsprovider.Source{}
|
|
||||||
|
|
||||||
var typ string
|
|
||||||
for _, field := range fields {
|
|
||||||
parts := strings.SplitN(field, "=", 2)
|
|
||||||
key := strings.ToLower(parts[0])
|
|
||||||
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field)
|
|
||||||
}
|
|
||||||
|
|
||||||
value := parts[1]
|
|
||||||
switch key {
|
|
||||||
case "type":
|
|
||||||
if value != "file" && value != "env" {
|
|
||||||
return nil, errors.Errorf("unsupported secret type %q", value)
|
|
||||||
}
|
|
||||||
typ = value
|
|
||||||
case "id":
|
|
||||||
fs.ID = value
|
|
||||||
case "source", "src":
|
|
||||||
fs.FilePath = value
|
|
||||||
case "env":
|
|
||||||
fs.Env = value
|
|
||||||
default:
|
|
||||||
return nil, errors.Errorf("unexpected key '%s' in '%s'", key, field)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if typ == "env" && fs.Env == "" {
|
|
||||||
fs.Env = fs.FilePath
|
|
||||||
fs.FilePath = ""
|
|
||||||
}
|
|
||||||
return &fs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseSSHSpecs(sl []string) (session.Attachable, error) {
|
|
||||||
configs := make([]sshprovider.AgentConfig, 0, len(sl))
|
|
||||||
for _, v := range sl {
|
|
||||||
c := parseSSH(v)
|
|
||||||
configs = append(configs, *c)
|
|
||||||
}
|
|
||||||
return sshprovider.NewSSHAgentProvider(configs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseSSH(value string) *sshprovider.AgentConfig {
|
|
||||||
parts := strings.SplitN(value, "=", 2)
|
|
||||||
cfg := sshprovider.AgentConfig{
|
|
||||||
ID: parts[0],
|
|
||||||
}
|
|
||||||
if len(parts) > 1 {
|
|
||||||
cfg.Paths = strings.Split(parts[1], ",")
|
|
||||||
}
|
|
||||||
return &cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
func isGitSSH(url string) bool {
|
|
||||||
_, gitProtocol := gitutil.ParseProtocol(url)
|
|
||||||
return gitProtocol == gitutil.SSHProtocol
|
|
||||||
}
|
|
|
@ -1,69 +0,0 @@
|
||||||
package image
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/docker/cli/cli/command"
|
|
||||||
cliconfig "github.com/docker/cli/cli/config"
|
|
||||||
"github.com/docker/docker/api/types/versions"
|
|
||||||
"github.com/moby/buildkit/session"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
const clientSessionRemote = "client-session"
|
|
||||||
|
|
||||||
func isSessionSupported(dockerCli command.Cli, forStream bool) bool {
|
|
||||||
if !forStream && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.39") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return dockerCli.ServerInfo().HasExperimental && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.31")
|
|
||||||
}
|
|
||||||
|
|
||||||
func trySession(dockerCli command.Cli, contextDir string, forStream bool) (*session.Session, error) {
|
|
||||||
if !isSessionSupported(dockerCli, forStream) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
sharedKey := getBuildSharedKey(contextDir)
|
|
||||||
s, err := session.NewSession(context.Background(), filepath.Base(contextDir), sharedKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to create session")
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getBuildSharedKey(dir string) string {
|
|
||||||
// build session is hash of build dir with node based randomness
|
|
||||||
s := sha256.Sum256([]byte(fmt.Sprintf("%s:%s", tryNodeIdentifier(), dir)))
|
|
||||||
return hex.EncodeToString(s[:])
|
|
||||||
}
|
|
||||||
|
|
||||||
func tryNodeIdentifier() string {
|
|
||||||
out := cliconfig.Dir() // return config dir as default on permission error
|
|
||||||
if err := os.MkdirAll(cliconfig.Dir(), 0700); err == nil {
|
|
||||||
sessionFile := filepath.Join(cliconfig.Dir(), ".buildNodeID")
|
|
||||||
if _, err := os.Lstat(sessionFile); err != nil {
|
|
||||||
if os.IsNotExist(err) { // create a new file with stored randomness
|
|
||||||
b := make([]byte, 32)
|
|
||||||
if _, err := rand.Read(b); err != nil {
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
if err := ioutil.WriteFile(sessionFile, []byte(hex.EncodeToString(b)), 0600); err != nil {
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
dt, err := ioutil.ReadFile(sessionFile)
|
|
||||||
if err == nil {
|
|
||||||
return string(dt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
@ -18,7 +17,6 @@ import (
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/docker/docker/pkg/archive"
|
"github.com/docker/docker/pkg/archive"
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"github.com/moby/buildkit/session/secrets/secretsprovider"
|
|
||||||
"gotest.tools/v3/assert"
|
"gotest.tools/v3/assert"
|
||||||
"gotest.tools/v3/env"
|
"gotest.tools/v3/env"
|
||||||
"gotest.tools/v3/fs"
|
"gotest.tools/v3/fs"
|
||||||
|
@ -182,75 +180,6 @@ RUN echo hello world
|
||||||
assert.DeepEqual(t, fakeBuild.filenames(t), []string{"Dockerfile"})
|
assert.DeepEqual(t, fakeBuild.filenames(t), []string{"Dockerfile"})
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParseSecret(t *testing.T) {
|
|
||||||
type testcase struct {
|
|
||||||
value string
|
|
||||||
errExpected bool
|
|
||||||
errMatch string
|
|
||||||
source *secretsprovider.Source
|
|
||||||
}
|
|
||||||
var testcases = []testcase{
|
|
||||||
{
|
|
||||||
value: "",
|
|
||||||
errExpected: true,
|
|
||||||
}, {
|
|
||||||
value: "foobar",
|
|
||||||
errExpected: true,
|
|
||||||
errMatch: "must be a key=value pair",
|
|
||||||
}, {
|
|
||||||
value: "foo,bar",
|
|
||||||
errExpected: true,
|
|
||||||
errMatch: "must be a key=value pair",
|
|
||||||
}, {
|
|
||||||
value: "foo=bar",
|
|
||||||
errExpected: true,
|
|
||||||
errMatch: "unexpected key",
|
|
||||||
}, {
|
|
||||||
value: "src=somefile",
|
|
||||||
source: &secretsprovider.Source{FilePath: "somefile"},
|
|
||||||
}, {
|
|
||||||
value: "source=somefile",
|
|
||||||
source: &secretsprovider.Source{FilePath: "somefile"},
|
|
||||||
}, {
|
|
||||||
value: "id=mysecret",
|
|
||||||
source: &secretsprovider.Source{ID: "mysecret"},
|
|
||||||
}, {
|
|
||||||
value: "id=mysecret,src=somefile",
|
|
||||||
source: &secretsprovider.Source{ID: "mysecret", FilePath: "somefile"},
|
|
||||||
}, {
|
|
||||||
value: "id=mysecret,source=somefile,type=file",
|
|
||||||
source: &secretsprovider.Source{ID: "mysecret", FilePath: "somefile"},
|
|
||||||
}, {
|
|
||||||
value: "id=mysecret,src=somefile,src=othersecretfile",
|
|
||||||
source: &secretsprovider.Source{ID: "mysecret", FilePath: "othersecretfile"},
|
|
||||||
}, {
|
|
||||||
value: "id=mysecret,src=somefile,env=SECRET",
|
|
||||||
source: &secretsprovider.Source{ID: "mysecret", FilePath: "somefile", Env: "SECRET"},
|
|
||||||
}, {
|
|
||||||
value: "type=file",
|
|
||||||
source: &secretsprovider.Source{},
|
|
||||||
}, {
|
|
||||||
value: "type=env",
|
|
||||||
source: &secretsprovider.Source{},
|
|
||||||
}, {
|
|
||||||
value: "type=invalid",
|
|
||||||
errExpected: true,
|
|
||||||
errMatch: "unsupported secret type",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range testcases {
|
|
||||||
t.Run(tc.value, func(t *testing.T) {
|
|
||||||
secret, err := parseSecret(tc.value)
|
|
||||||
assert.Equal(t, err != nil, tc.errExpected, fmt.Sprintf("err=%v errExpected=%t", err, tc.errExpected))
|
|
||||||
if tc.errMatch != "" {
|
|
||||||
assert.ErrorContains(t, err, tc.errMatch)
|
|
||||||
}
|
|
||||||
assert.DeepEqual(t, secret, tc.source)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type fakeBuild struct {
|
type fakeBuild struct {
|
||||||
context *tar.Reader
|
context *tar.Reader
|
||||||
options types.ImageBuildOptions
|
options types.ImageBuildOptions
|
||||||
|
|
|
@ -0,0 +1,50 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
keyBuilderAlias = "builder"
|
||||||
|
)
|
||||||
|
|
||||||
|
var allowedAliases = map[string]struct{}{
|
||||||
|
keyBuilderAlias: {},
|
||||||
|
}
|
||||||
|
|
||||||
|
func processAliases(dockerCli command.Cli, cmd *cobra.Command, args, osArgs []string) ([]string, []string, error) {
|
||||||
|
var err error
|
||||||
|
aliasMap := dockerCli.ConfigFile().Aliases
|
||||||
|
aliases := make([][2][]string, 0, len(aliasMap))
|
||||||
|
|
||||||
|
for k, v := range aliasMap {
|
||||||
|
if _, ok := allowedAliases[k]; !ok {
|
||||||
|
return args, osArgs, errors.Errorf("not allowed to alias %q (allowed: %#v)", k, allowedAliases)
|
||||||
|
}
|
||||||
|
if _, _, err := cmd.Find(strings.Split(v, " ")); err == nil {
|
||||||
|
return args, osArgs, errors.Errorf("not allowed to alias with builtin %q as target", v)
|
||||||
|
}
|
||||||
|
aliases = append(aliases, [2][]string{{k}, {v}})
|
||||||
|
}
|
||||||
|
|
||||||
|
args, osArgs, err = processBuilder(dockerCli, cmd, args, os.Args)
|
||||||
|
if err != nil {
|
||||||
|
return args, os.Args, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, al := range aliases {
|
||||||
|
var didChange bool
|
||||||
|
args, didChange = command.StringSliceReplaceAt(args, al[0], al[1], 0)
|
||||||
|
if didChange {
|
||||||
|
osArgs, _ = command.StringSliceReplaceAt(osArgs, al[0], al[1], -1)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return args, osArgs, nil
|
||||||
|
}
|
|
@ -0,0 +1,124 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
pluginmanager "github.com/docker/cli/cli-plugins/manager"
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
builderDefaultPlugin = "buildx"
|
||||||
|
buildxMissingWarning = `DEPRECATED: The legacy builder is deprecated and will be removed in a future release.
|
||||||
|
Install the buildx component to build images with BuildKit:
|
||||||
|
https://docs.docker.com/go/buildx/
|
||||||
|
`
|
||||||
|
|
||||||
|
buildxMissingError = `ERROR: BuildKit is enabled but the buildx component is missing or broken.
|
||||||
|
Install the buildx component to build images with BuildKit:
|
||||||
|
https://docs.docker.com/go/buildx/
|
||||||
|
`
|
||||||
|
)
|
||||||
|
|
||||||
|
func newBuilderError(warn bool, err error) error {
|
||||||
|
var errorMsg string
|
||||||
|
if warn {
|
||||||
|
errorMsg = buildxMissingWarning
|
||||||
|
} else {
|
||||||
|
errorMsg = buildxMissingError
|
||||||
|
}
|
||||||
|
if pluginmanager.IsNotFound(err) {
|
||||||
|
return errors.New(errorMsg)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%w\n\n%s", err, errorMsg)
|
||||||
|
}
|
||||||
|
return fmt.Errorf("%s", errorMsg)
|
||||||
|
}
|
||||||
|
|
||||||
|
func processBuilder(dockerCli command.Cli, cmd *cobra.Command, args, osargs []string) ([]string, []string, error) {
|
||||||
|
var useLegacy bool
|
||||||
|
var useBuilder bool
|
||||||
|
|
||||||
|
// check DOCKER_BUILDKIT env var is present and
|
||||||
|
// if not assume we want to use the builder component
|
||||||
|
if v, ok := os.LookupEnv("DOCKER_BUILDKIT"); ok {
|
||||||
|
enabled, err := strconv.ParseBool(v)
|
||||||
|
if err != nil {
|
||||||
|
return args, osargs, errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value")
|
||||||
|
}
|
||||||
|
if !enabled {
|
||||||
|
useLegacy = true
|
||||||
|
} else {
|
||||||
|
useBuilder = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if a builder alias is defined, use it instead
|
||||||
|
// of the default one
|
||||||
|
builderAlias := builderDefaultPlugin
|
||||||
|
aliasMap := dockerCli.ConfigFile().Aliases
|
||||||
|
if v, ok := aliasMap[keyBuilderAlias]; ok {
|
||||||
|
useBuilder = true
|
||||||
|
builderAlias = v
|
||||||
|
}
|
||||||
|
|
||||||
|
// is this a build that should be forwarded to the builder?
|
||||||
|
fwargs, fwosargs, forwarded := forwardBuilder(builderAlias, args, osargs)
|
||||||
|
if !forwarded {
|
||||||
|
return args, osargs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if useLegacy {
|
||||||
|
// display warning if not wcow and continue
|
||||||
|
if dockerCli.ServerInfo().OSType != "windows" {
|
||||||
|
_, _ = fmt.Fprintln(dockerCli.Err(), newBuilderError(true, nil))
|
||||||
|
}
|
||||||
|
return args, osargs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// check plugin is available if cmd forwarded
|
||||||
|
plugin, perr := pluginmanager.GetPlugin(builderAlias, dockerCli, cmd.Root())
|
||||||
|
if perr == nil && plugin != nil {
|
||||||
|
perr = plugin.Err
|
||||||
|
}
|
||||||
|
if perr != nil {
|
||||||
|
// if builder enforced with DOCKER_BUILDKIT=1, cmd must fail if plugin missing or broken
|
||||||
|
if useBuilder {
|
||||||
|
return args, osargs, newBuilderError(false, perr)
|
||||||
|
}
|
||||||
|
// otherwise, display warning and continue
|
||||||
|
_, _ = fmt.Fprintln(dockerCli.Err(), newBuilderError(true, perr))
|
||||||
|
return args, osargs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return fwargs, fwosargs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func forwardBuilder(alias string, args, osargs []string) ([]string, []string, bool) {
|
||||||
|
aliases := [][2][]string{
|
||||||
|
{
|
||||||
|
{"builder"},
|
||||||
|
{alias},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{"build"},
|
||||||
|
{alias, "build"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
{"image", "build"},
|
||||||
|
{alias, "build"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, al := range aliases {
|
||||||
|
if fwargs, changed := command.StringSliceReplaceAt(args, al[0], al[1], 0); changed {
|
||||||
|
fwosargs, _ := command.StringSliceReplaceAt(osargs, al[0], al[1], -1)
|
||||||
|
return fwargs, fwosargs, true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return args, osargs, false
|
||||||
|
}
|
|
@ -0,0 +1,131 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/docker/cli/cli/command"
|
||||||
|
"github.com/docker/cli/internal/test/output"
|
||||||
|
"gotest.tools/v3/assert"
|
||||||
|
"gotest.tools/v3/env"
|
||||||
|
"gotest.tools/v3/fs"
|
||||||
|
)
|
||||||
|
|
||||||
|
var pluginFilename = "docker-buildx"
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
pluginFilename = pluginFilename + ".exe"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuildWithBuilder(t *testing.T) {
|
||||||
|
dir := fs.NewDir(t, t.Name(),
|
||||||
|
fs.WithFile(pluginFilename, `#!/bin/sh
|
||||||
|
echo '{"SchemaVersion":"0.1.0","Vendor":"Docker Inc.","Version":"v0.6.3","ShortDescription":"Build with BuildKit"}'`, fs.WithMode(0777)),
|
||||||
|
)
|
||||||
|
defer dir.Remove()
|
||||||
|
|
||||||
|
var b bytes.Buffer
|
||||||
|
dockerCli, err := command.NewDockerCli(command.WithInputStream(discard), command.WithCombinedStreams(&b))
|
||||||
|
assert.NilError(t, err)
|
||||||
|
dockerCli.ConfigFile().CLIPluginsExtraDirs = []string{dir.Path()}
|
||||||
|
|
||||||
|
tcmd := newDockerCommand(dockerCli)
|
||||||
|
tcmd.SetArgs([]string{"build", "."})
|
||||||
|
|
||||||
|
cmd, args, err := tcmd.HandleGlobalFlags()
|
||||||
|
assert.NilError(t, err)
|
||||||
|
|
||||||
|
args, os.Args, err = processBuilder(dockerCli, cmd, args, os.Args)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.DeepEqual(t, []string{builderDefaultPlugin, "build", "."}, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuildkitDisabled(t *testing.T) {
|
||||||
|
defer env.Patch(t, "DOCKER_BUILDKIT", "0")()
|
||||||
|
|
||||||
|
dir := fs.NewDir(t, t.Name(),
|
||||||
|
fs.WithFile(pluginFilename, `#!/bin/sh exit 1`, fs.WithMode(0777)),
|
||||||
|
)
|
||||||
|
defer dir.Remove()
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(nil)
|
||||||
|
|
||||||
|
dockerCli, err := command.NewDockerCli(command.WithInputStream(discard), command.WithCombinedStreams(b))
|
||||||
|
assert.NilError(t, err)
|
||||||
|
dockerCli.ConfigFile().CLIPluginsExtraDirs = []string{dir.Path()}
|
||||||
|
|
||||||
|
tcmd := newDockerCommand(dockerCli)
|
||||||
|
tcmd.SetArgs([]string{"build", "."})
|
||||||
|
|
||||||
|
cmd, args, err := tcmd.HandleGlobalFlags()
|
||||||
|
assert.NilError(t, err)
|
||||||
|
|
||||||
|
args, os.Args, err = processBuilder(dockerCli, cmd, args, os.Args)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.DeepEqual(t, []string{"build", "."}, args)
|
||||||
|
|
||||||
|
output.Assert(t, b.String(), map[int]func(string) error{
|
||||||
|
0: output.Suffix("DEPRECATED: The legacy builder is deprecated and will be removed in a future release."),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderBroken(t *testing.T) {
|
||||||
|
dir := fs.NewDir(t, t.Name(),
|
||||||
|
fs.WithFile(pluginFilename, `#!/bin/sh exit 1`, fs.WithMode(0777)),
|
||||||
|
)
|
||||||
|
defer dir.Remove()
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(nil)
|
||||||
|
|
||||||
|
dockerCli, err := command.NewDockerCli(command.WithInputStream(discard), command.WithCombinedStreams(b))
|
||||||
|
assert.NilError(t, err)
|
||||||
|
dockerCli.ConfigFile().CLIPluginsExtraDirs = []string{dir.Path()}
|
||||||
|
|
||||||
|
tcmd := newDockerCommand(dockerCli)
|
||||||
|
tcmd.SetArgs([]string{"build", "."})
|
||||||
|
|
||||||
|
cmd, args, err := tcmd.HandleGlobalFlags()
|
||||||
|
assert.NilError(t, err)
|
||||||
|
|
||||||
|
args, os.Args, err = processBuilder(dockerCli, cmd, args, os.Args)
|
||||||
|
assert.NilError(t, err)
|
||||||
|
assert.DeepEqual(t, []string{"build", "."}, args)
|
||||||
|
|
||||||
|
output.Assert(t, b.String(), map[int]func(string) error{
|
||||||
|
0: output.Prefix("failed to fetch metadata:"),
|
||||||
|
2: output.Suffix("DEPRECATED: The legacy builder is deprecated and will be removed in a future release."),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBuilderBrokenEnforced(t *testing.T) {
|
||||||
|
defer env.Patch(t, "DOCKER_BUILDKIT", "1")()
|
||||||
|
|
||||||
|
dir := fs.NewDir(t, t.Name(),
|
||||||
|
fs.WithFile(pluginFilename, `#!/bin/sh exit 1`, fs.WithMode(0777)),
|
||||||
|
)
|
||||||
|
defer dir.Remove()
|
||||||
|
|
||||||
|
b := bytes.NewBuffer(nil)
|
||||||
|
|
||||||
|
dockerCli, err := command.NewDockerCli(command.WithInputStream(discard), command.WithCombinedStreams(b))
|
||||||
|
assert.NilError(t, err)
|
||||||
|
dockerCli.ConfigFile().CLIPluginsExtraDirs = []string{dir.Path()}
|
||||||
|
|
||||||
|
tcmd := newDockerCommand(dockerCli)
|
||||||
|
tcmd.SetArgs([]string{"build", "."})
|
||||||
|
|
||||||
|
cmd, args, err := tcmd.HandleGlobalFlags()
|
||||||
|
assert.NilError(t, err)
|
||||||
|
|
||||||
|
args, os.Args, err = processBuilder(dockerCli, cmd, args, os.Args)
|
||||||
|
assert.DeepEqual(t, []string{"build", "."}, args)
|
||||||
|
|
||||||
|
output.Assert(t, err.Error(), map[int]func(string) error{
|
||||||
|
0: output.Prefix("failed to fetch metadata:"),
|
||||||
|
2: output.Suffix("ERROR: BuildKit is enabled but the buildx component is missing or broken."),
|
||||||
|
})
|
||||||
|
}
|
|
@ -22,10 +22,6 @@ import (
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
|
||||||
var allowedAliases = map[string]struct{}{
|
|
||||||
"builder": {},
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDockerCommand(dockerCli *command.DockerCli) *cli.TopLevelCommand {
|
func newDockerCommand(dockerCli *command.DockerCli) *cli.TopLevelCommand {
|
||||||
var (
|
var (
|
||||||
opts *cliflags.ClientOptions
|
opts *cliflags.ClientOptions
|
||||||
|
@ -220,38 +216,6 @@ func tryPluginRun(dockerCli command.Cli, cmd *cobra.Command, subcommand string)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func processAliases(dockerCli command.Cli, cmd *cobra.Command, args, osArgs []string) ([]string, []string, error) {
|
|
||||||
aliasMap := dockerCli.ConfigFile().Aliases
|
|
||||||
aliases := make([][2][]string, 0, len(aliasMap))
|
|
||||||
|
|
||||||
for k, v := range aliasMap {
|
|
||||||
if _, ok := allowedAliases[k]; !ok {
|
|
||||||
return args, osArgs, errors.Errorf("Not allowed to alias %q. Allowed aliases: %#v", k, allowedAliases)
|
|
||||||
}
|
|
||||||
if _, _, err := cmd.Find(strings.Split(v, " ")); err == nil {
|
|
||||||
return args, osArgs, errors.Errorf("Not allowed to alias with builtin %q as target", v)
|
|
||||||
}
|
|
||||||
aliases = append(aliases, [2][]string{{k}, {v}})
|
|
||||||
}
|
|
||||||
|
|
||||||
if v, ok := aliasMap["builder"]; ok {
|
|
||||||
aliases = append(aliases,
|
|
||||||
[2][]string{{"build"}, {v, "build"}},
|
|
||||||
[2][]string{{"image", "build"}, {v, "build"}},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
for _, al := range aliases {
|
|
||||||
var didChange bool
|
|
||||||
args, didChange = command.StringSliceReplaceAt(args, al[0], al[1], 0)
|
|
||||||
if didChange {
|
|
||||||
osArgs, _ = command.StringSliceReplaceAt(osArgs, al[0], al[1], -1)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return args, osArgs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func runDocker(dockerCli *command.DockerCli) error {
|
func runDocker(dockerCli *command.DockerCli) error {
|
||||||
tcmd := newDockerCommand(dockerCli)
|
tcmd := newDockerCommand(dockerCli)
|
||||||
|
|
||||||
|
@ -346,8 +310,6 @@ func hideSubcommandIf(subcmd *cobra.Command, condition func(string) bool, annota
|
||||||
|
|
||||||
func hideUnsupportedFeatures(cmd *cobra.Command, details versionDetails) error {
|
func hideUnsupportedFeatures(cmd *cobra.Command, details versionDetails) error {
|
||||||
var (
|
var (
|
||||||
buildKitDisabled = func(_ string) bool { v, _ := command.BuildKitEnabled(details.ServerInfo()); return !v }
|
|
||||||
buildKitEnabled = func(_ string) bool { v, _ := command.BuildKitEnabled(details.ServerInfo()); return v }
|
|
||||||
notExperimental = func(_ string) bool { return !details.ServerInfo().HasExperimental }
|
notExperimental = func(_ string) bool { return !details.ServerInfo().HasExperimental }
|
||||||
notOSType = func(v string) bool { return v != details.ServerInfo().OSType }
|
notOSType = func(v string) bool { return v != details.ServerInfo().OSType }
|
||||||
versionOlderThan = func(v string) bool { return versions.LessThan(details.Client().ClientVersion(), v) }
|
versionOlderThan = func(v string) bool { return versions.LessThan(details.Client().ClientVersion(), v) }
|
||||||
|
@ -365,16 +327,12 @@ func hideUnsupportedFeatures(cmd *cobra.Command, details versionDetails) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
hideFlagIf(f, buildKitDisabled, "buildkit")
|
|
||||||
hideFlagIf(f, buildKitEnabled, "no-buildkit")
|
|
||||||
hideFlagIf(f, notExperimental, "experimental")
|
hideFlagIf(f, notExperimental, "experimental")
|
||||||
hideFlagIf(f, notOSType, "ostype")
|
hideFlagIf(f, notOSType, "ostype")
|
||||||
hideFlagIf(f, versionOlderThan, "version")
|
hideFlagIf(f, versionOlderThan, "version")
|
||||||
})
|
})
|
||||||
|
|
||||||
for _, subcmd := range cmd.Commands() {
|
for _, subcmd := range cmd.Commands() {
|
||||||
hideSubcommandIf(subcmd, buildKitDisabled, "buildkit")
|
|
||||||
hideSubcommandIf(subcmd, buildKitEnabled, "no-buildkit")
|
|
||||||
hideSubcommandIf(subcmd, notExperimental, "experimental")
|
hideSubcommandIf(subcmd, notExperimental, "experimental")
|
||||||
hideSubcommandIf(subcmd, notOSType, "ostype")
|
hideSubcommandIf(subcmd, notOSType, "ostype")
|
||||||
hideSubcommandIf(subcmd, versionOlderThan, "version")
|
hideSubcommandIf(subcmd, versionOlderThan, "version")
|
||||||
|
|
|
@ -2,6 +2,9 @@
|
||||||
|
|
||||||
ARG GO_VERSION=1.16.11
|
ARG GO_VERSION=1.16.11
|
||||||
|
|
||||||
|
ARG BUILDX_VERSION=0.7.1
|
||||||
|
FROM docker/buildx-bin:${BUILDX_VERSION} AS buildx
|
||||||
|
|
||||||
FROM golang:${GO_VERSION}-alpine AS golang
|
FROM golang:${GO_VERSION}-alpine AS golang
|
||||||
ENV CGO_ENABLED=0
|
ENV CGO_ENABLED=0
|
||||||
|
|
||||||
|
@ -32,6 +35,7 @@ CMD bash
|
||||||
ENV DISABLE_WARN_OUTSIDE_CONTAINER=1
|
ENV DISABLE_WARN_OUTSIDE_CONTAINER=1
|
||||||
ENV PATH=$PATH:/go/src/github.com/docker/cli/build
|
ENV PATH=$PATH:/go/src/github.com/docker/cli/build
|
||||||
|
|
||||||
|
COPY --from=buildx /buildx /usr/libexec/docker/cli-plugins/docker-buildx
|
||||||
COPY --from=gotestsum /go/bin/* /go/bin/
|
COPY --from=gotestsum /go/bin/* /go/bin/
|
||||||
COPY --from=goversioninfo /go/bin/* /go/bin/
|
COPY --from=goversioninfo /go/bin/* /go/bin/
|
||||||
|
|
||||||
|
|
|
@ -50,6 +50,8 @@ The table below provides an overview of the current status of deprecated feature
|
||||||
|
|
||||||
Status | Feature | Deprecated | Remove
|
Status | Feature | Deprecated | Remove
|
||||||
-----------|------------------------------------------------------------------------------------------------------------------------------------|------------|------------
|
-----------|------------------------------------------------------------------------------------------------------------------------------------|------------|------------
|
||||||
|
Deprecated | [Legacy builder for Linux images](#legacy-builder-for-linux-images) | v21.xx | -
|
||||||
|
Deprecated | [Legacy builder fallback](#legacy-builder-fallback) | v21.xx | -
|
||||||
Removed | [Support for encrypted TLS private keys](#support-for-encrypted-tls-private-keys) | v20.10 | v21.xx
|
Removed | [Support for encrypted TLS private keys](#support-for-encrypted-tls-private-keys) | v20.10 | v21.xx
|
||||||
Deprecated | [Kubernetes stack and context support](#kubernetes-stack-and-context-support) | v20.10 | -
|
Deprecated | [Kubernetes stack and context support](#kubernetes-stack-and-context-support) | v20.10 | -
|
||||||
Deprecated | [Pulling images from non-compliant image registries](#pulling-images-from-non-compliant-image-registries) | v20.10 | -
|
Deprecated | [Pulling images from non-compliant image registries](#pulling-images-from-non-compliant-image-registries) | v20.10 | -
|
||||||
|
@ -99,6 +101,83 @@ Removed | [`--api-enable-cors` flag on `dockerd`](#--api-enable-cors-flag-on-
|
||||||
Removed | [`--run` flag on `docker commit`](#--run-flag-on-docker-commit) | v0.10 | v1.13
|
Removed | [`--run` flag on `docker commit`](#--run-flag-on-docker-commit) | v0.10 | v1.13
|
||||||
Removed | [Three arguments form in `docker import`](#three-arguments-form-in-docker-import) | v0.6.7 | v1.12
|
Removed | [Three arguments form in `docker import`](#three-arguments-form-in-docker-import) | v0.6.7 | v1.12
|
||||||
|
|
||||||
|
### Legacy builder for Linux images
|
||||||
|
|
||||||
|
**Deprecated in Release: v21.xx**
|
||||||
|
|
||||||
|
Docker v21.xx now uses BuildKit by default to build Linux images, and uses the
|
||||||
|
[Buildx](https://docs.docker.com/buildx/working-with-buildx/) CLI component for
|
||||||
|
`docker build`. With this change, `docker build` now exposes all advanced features
|
||||||
|
that BuildKit provides and which were previously only available through the
|
||||||
|
`docker buildx` subcommands.
|
||||||
|
|
||||||
|
The Buildx component is installed automatically when installing the `docker` CLI
|
||||||
|
using our `.deb` or `.rpm` packages, and statically linked binaries are provided
|
||||||
|
both on `download.docker.com`, and through the [`docker/buildx-bin` image](https://hub.docker.com/r/docker/buildx-bin)
|
||||||
|
on Docker Hub. Refer the [Buildx section](http://docs.docker.com/go/buildx/) for
|
||||||
|
detailed instructions on installing the Buildx component.
|
||||||
|
|
||||||
|
This release marks the beginning of the deprecation cycle of the classic ("legacy")
|
||||||
|
builder for Linux images. No active development will happen on the classic builder
|
||||||
|
(except for bugfixes). BuildKit development started five Years ago, left the
|
||||||
|
"experimental" phase since Docker 18.09, and is already the default builder for
|
||||||
|
[Docker Desktop](https://docs.docker.com/desktop/mac/release-notes/3.x/#docker-desktop-320).
|
||||||
|
While we're comfortable that BuildKit is stable for general use, there may be
|
||||||
|
some changes in behavior. If you encounter issues with BuildKit, we encourage
|
||||||
|
you to report issues in the [BuildKit issue tracker on GitHub](https://github.com/moby/buildkit/){:target="_blank" rel="noopener" class="_"}
|
||||||
|
|
||||||
|
> Classic builder for building Windows images
|
||||||
|
>
|
||||||
|
> BuildKit does not (yet) provide support for building Windows images, and
|
||||||
|
> `docker build` continues to use the classic builder to build native Windows
|
||||||
|
> images on Windows daemons.
|
||||||
|
|
||||||
|
### Legacy builder fallback
|
||||||
|
|
||||||
|
**Deprecated in Release: v21.xx**
|
||||||
|
|
||||||
|
[Docker v21.xx now uses BuildKit by default to build Linux images](#legacy-builder-for-linux-images),
|
||||||
|
which requires the Buildx component to build images with BuildKit. There may be
|
||||||
|
situations where the Buildx component is not available, and BuildKit cannot be
|
||||||
|
used.
|
||||||
|
|
||||||
|
To provide a smooth transition to BuildKit as the default builder, Docker v21.xx
|
||||||
|
has an automatic fallback for some situations, or produces an error to assist
|
||||||
|
users to resolve the problem.
|
||||||
|
|
||||||
|
In situations where the user did not explicitly opt-in to use BuildKit (i.e.,
|
||||||
|
`DOCKER_BUILDKIT=1` is not set), the CLI automatically falls back to the classic
|
||||||
|
builder, but prints a deprecation warning:
|
||||||
|
|
||||||
|
```
|
||||||
|
DEPRECATED: The legacy builder is deprecated and will be removed in a future release.
|
||||||
|
Install the buildx component to build images with BuildKit:
|
||||||
|
https://docs.docker.com/go/buildx/
|
||||||
|
```
|
||||||
|
|
||||||
|
This situation may occur if the `docker` CLI is installed using the static binaries,
|
||||||
|
and the Buildx component is not installed or not installed correctly. This fallback
|
||||||
|
will be removed in a future release, therefore we recommend to [install the Buildx component](https://docs.docker.com/go/buildx/)
|
||||||
|
and use BuildKit for your builds, or opt-out of using BuildKit with `DOCKER_BUILDKIT=0`.
|
||||||
|
|
||||||
|
If you opted-in to use BuildKit (`DOCKER_BUILDKIT=1`), but the Buildx component
|
||||||
|
is missing, an error is printed instead, and the `docker build` command fails:
|
||||||
|
|
||||||
|
```
|
||||||
|
ERROR: BuildKit is enabled but the buildx component is missing or broken.
|
||||||
|
Install the buildx component to build images with BuildKit:
|
||||||
|
https://docs.docker.com/go/buildx/
|
||||||
|
```
|
||||||
|
|
||||||
|
We recommend to [install the Buildx component](https://docs.docker.com/go/buildx/)
|
||||||
|
to continue using BuildKit for your builds, but alternatively, users can either
|
||||||
|
unset the `DOCKER_BUILDKIT` environment variable to fall back to the legacy builder,
|
||||||
|
or opt-out of using BuildKit with `DOCKER_BUILDKIT=0`.
|
||||||
|
|
||||||
|
Be aware that the [classic builder is deprecated](#legacy-builder-for-linux-images)
|
||||||
|
so both the automatic fallback and opting-out of using BuildKit will no longer
|
||||||
|
be possible in a future release.
|
||||||
|
|
||||||
### Support for encrypted TLS private keys
|
### Support for encrypted TLS private keys
|
||||||
|
|
||||||
**Deprecated in Release: v20.10**
|
**Deprecated in Release: v20.10**
|
||||||
|
|
|
@ -12,12 +12,15 @@ import (
|
||||||
"github.com/docker/cli/internal/test/output"
|
"github.com/docker/cli/internal/test/output"
|
||||||
"gotest.tools/v3/assert"
|
"gotest.tools/v3/assert"
|
||||||
is "gotest.tools/v3/assert/cmp"
|
is "gotest.tools/v3/assert/cmp"
|
||||||
|
"gotest.tools/v3/env"
|
||||||
"gotest.tools/v3/fs"
|
"gotest.tools/v3/fs"
|
||||||
"gotest.tools/v3/icmd"
|
"gotest.tools/v3/icmd"
|
||||||
"gotest.tools/v3/skip"
|
"gotest.tools/v3/skip"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestBuildFromContextDirectoryWithTag(t *testing.T) {
|
func TestBuildFromContextDirectoryWithTag(t *testing.T) {
|
||||||
|
defer env.Patch(t, "DOCKER_BUILDKIT", "0")()
|
||||||
|
|
||||||
dir := fs.NewDir(t, "test-build-context-dir",
|
dir := fs.NewDir(t, "test-build-context-dir",
|
||||||
fs.WithFile("run", "echo running", fs.WithMode(0755)),
|
fs.WithFile("run", "echo running", fs.WithMode(0755)),
|
||||||
fs.WithDir("data", fs.WithFile("one", "1111")),
|
fs.WithDir("data", fs.WithFile("one", "1111")),
|
||||||
|
@ -34,7 +37,12 @@ func TestBuildFromContextDirectoryWithTag(t *testing.T) {
|
||||||
withWorkingDir(dir))
|
withWorkingDir(dir))
|
||||||
defer icmd.RunCommand("docker", "image", "rm", "myimage")
|
defer icmd.RunCommand("docker", "image", "rm", "myimage")
|
||||||
|
|
||||||
result.Assert(t, icmd.Expected{Err: icmd.None})
|
const buildxMissingWarning = `DEPRECATED: The legacy builder is deprecated and will be removed in a future release.
|
||||||
|
Install the buildx component to build images with BuildKit:
|
||||||
|
https://docs.docker.com/go/buildx/
|
||||||
|
`
|
||||||
|
|
||||||
|
result.Assert(t, icmd.Expected{Err: buildxMissingWarning})
|
||||||
output.Assert(t, result.Stdout(), map[int]func(string) error{
|
output.Assert(t, result.Stdout(), map[int]func(string) error{
|
||||||
0: output.Prefix("Sending build context to Docker daemon"),
|
0: output.Prefix("Sending build context to Docker daemon"),
|
||||||
1: output.Suffix("Step 1/4 : FROM registry:5000/alpine:3.6"),
|
1: output.Suffix("Step 1/4 : FROM registry:5000/alpine:3.6"),
|
||||||
|
@ -50,6 +58,7 @@ func TestBuildFromContextDirectoryWithTag(t *testing.T) {
|
||||||
|
|
||||||
func TestTrustedBuild(t *testing.T) {
|
func TestTrustedBuild(t *testing.T) {
|
||||||
skip.If(t, environment.RemoteDaemon())
|
skip.If(t, environment.RemoteDaemon())
|
||||||
|
defer env.Patch(t, "DOCKER_BUILDKIT", "0")()
|
||||||
|
|
||||||
dir := fixtures.SetupConfigFile(t)
|
dir := fixtures.SetupConfigFile(t)
|
||||||
defer dir.Remove()
|
defer dir.Remove()
|
||||||
|
@ -84,6 +93,7 @@ func TestTrustedBuild(t *testing.T) {
|
||||||
|
|
||||||
func TestTrustedBuildUntrustedImage(t *testing.T) {
|
func TestTrustedBuildUntrustedImage(t *testing.T) {
|
||||||
skip.If(t, environment.RemoteDaemon())
|
skip.If(t, environment.RemoteDaemon())
|
||||||
|
defer env.Patch(t, "DOCKER_BUILDKIT", "0")()
|
||||||
|
|
||||||
dir := fixtures.SetupConfigFile(t)
|
dir := fixtures.SetupConfigFile(t)
|
||||||
defer dir.Remove()
|
defer dir.Remove()
|
||||||
|
@ -110,6 +120,8 @@ func TestTrustedBuildUntrustedImage(t *testing.T) {
|
||||||
|
|
||||||
func TestBuildIidFileSquash(t *testing.T) {
|
func TestBuildIidFileSquash(t *testing.T) {
|
||||||
environment.SkipIfNotExperimentalDaemon(t)
|
environment.SkipIfNotExperimentalDaemon(t)
|
||||||
|
defer env.Patch(t, "DOCKER_BUILDKIT", "0")()
|
||||||
|
|
||||||
dir := fs.NewDir(t, "test-iidfile-squash")
|
dir := fs.NewDir(t, "test-iidfile-squash")
|
||||||
defer dir.Remove()
|
defer dir.Remove()
|
||||||
iidfile := filepath.Join(dir.Path(), "idsquash")
|
iidfile := filepath.Join(dir.Path(), "idsquash")
|
||||||
|
|
11
opts/opts.go
11
opts/opts.go
|
@ -321,17 +321,6 @@ func ValidateSysctl(val string) (string, error) {
|
||||||
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
|
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ValidateProgressOutput errors out if an invalid value is passed to --progress
|
|
||||||
func ValidateProgressOutput(val string) error {
|
|
||||||
valid := []string{"auto", "plain", "tty"}
|
|
||||||
for _, s := range valid {
|
|
||||||
if s == val {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Errorf("invalid value %q passed to --progress, valid values are: %s", val, strings.Join(valid, ", "))
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilterOpt is a flag type for validating filters
|
// FilterOpt is a flag type for validating filters
|
||||||
type FilterOpt struct {
|
type FilterOpt struct {
|
||||||
filter filters.Args
|
filter filters.Args
|
||||||
|
|
|
@ -8,7 +8,6 @@ go 1.16
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Microsoft/go-winio v0.4.19 // indirect
|
github.com/Microsoft/go-winio v0.4.19 // indirect
|
||||||
github.com/containerd/console v1.0.2
|
|
||||||
github.com/containerd/containerd v1.5.5
|
github.com/containerd/containerd v1.5.5
|
||||||
github.com/coreos/etcd v3.3.25+incompatible // indirect
|
github.com/coreos/etcd v3.3.25+incompatible // indirect
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.1
|
github.com/cpuguy83/go-md2man/v2 v2.0.1
|
||||||
|
@ -39,12 +38,10 @@ require (
|
||||||
github.com/spf13/cobra v1.1.3
|
github.com/spf13/cobra v1.1.3
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/theupdateframework/notary v0.7.1-0.20210315103452-bf96a202a09a
|
github.com/theupdateframework/notary v0.7.1-0.20210315103452-bf96a202a09a
|
||||||
github.com/tonistiigi/fsutil v0.0.0-20210609172227-d72af97c0eaf
|
|
||||||
github.com/tonistiigi/go-rosetta v0.0.0-20200727161949-f79598599c5d
|
github.com/tonistiigi/go-rosetta v0.0.0-20200727161949-f79598599c5d
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||||
github.com/xeipuuv/gojsonschema v1.2.0
|
github.com/xeipuuv/gojsonschema v1.2.0
|
||||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect
|
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
|
||||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359
|
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1
|
||||||
golang.org/x/text v0.3.4
|
golang.org/x/text v0.3.4
|
||||||
|
|
21
vendor.sum
21
vendor.sum
|
@ -57,7 +57,6 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
|
||||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||||
github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw=
|
github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw=
|
||||||
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
|
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
|
||||||
github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA=
|
|
||||||
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||||
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||||
|
@ -100,13 +99,11 @@ github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1
|
||||||
github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
|
github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
|
||||||
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
|
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
|
||||||
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
||||||
github.com/containerd/cgroups v1.0.1 h1:iJnMvco9XGvKUvNQkv88bE4uJXxRQH18efbKo9w5vHQ=
|
|
||||||
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
|
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
|
||||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||||
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||||
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
|
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
|
||||||
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
|
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
|
||||||
github.com/containerd/console v1.0.2 h1:Pi6D+aZXM+oUw1czuKgH5IJ+y0jhYcwBJfx5/Ghn9dE=
|
|
||||||
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
||||||
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||||
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||||
|
@ -137,7 +134,6 @@ github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv
|
||||||
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
|
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
|
||||||
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
|
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
|
||||||
github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
|
github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
|
||||||
github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU=
|
|
||||||
github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
|
github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
|
||||||
github.com/containerd/fuse-overlayfs-snapshotter v1.0.2/go.mod h1:nRZceC8a7dRm3Ao6cJAwuJWPFiBPaibHiFntRUnzhwU=
|
github.com/containerd/fuse-overlayfs-snapshotter v1.0.2/go.mod h1:nRZceC8a7dRm3Ao6cJAwuJWPFiBPaibHiFntRUnzhwU=
|
||||||
github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
|
github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
|
||||||
|
@ -160,12 +156,10 @@ github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDG
|
||||||
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||||
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
||||||
github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
|
github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
|
||||||
github.com/containerd/ttrpc v1.0.2 h1:2/O3oTZN36q2xRolk0a2WWGgh7/Vf/liElg5hFYLX9U=
|
|
||||||
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
|
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
|
||||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||||
github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
|
github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
|
||||||
github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
|
github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
|
||||||
github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY=
|
|
||||||
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
|
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
|
||||||
github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
|
github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
|
||||||
github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
|
github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
|
||||||
|
@ -292,9 +286,7 @@ github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblf
|
||||||
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
|
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
|
||||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/gofrs/flock v0.7.3 h1:I0EKY9l8HZCXTMYC4F80vwT6KNypV9uYKP3Alm/hjmQ=
|
|
||||||
github.com/gofrs/flock v0.7.3/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
github.com/gofrs/flock v0.7.3/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
|
||||||
github.com/gogo/googleapis v1.3.2 h1:kX1es4djPJrsDhY7aZKJy7aZasdcB5oSOEphMjSB53c=
|
|
||||||
github.com/gogo/googleapis v1.3.2/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
|
github.com/gogo/googleapis v1.3.2/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
|
||||||
github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||||
|
@ -331,7 +323,6 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
|
||||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||||
github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
|
|
||||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||||
|
@ -348,7 +339,6 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3
|
||||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=
|
|
||||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||||
github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
|
github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
|
||||||
|
@ -364,12 +354,10 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad
|
||||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 h1:0IKlLyQ3Hs9nDaiK5cSHAGmcQEIC8l2Ts1u6x5Dfrqg=
|
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s=
|
github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s=
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
|
|
||||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
|
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
|
||||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
|
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
|
||||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
|
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
|
||||||
|
@ -427,7 +415,6 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.12.3 h1:G5AfA94pHPysR56qqrkO2pxEexdDzrpFJ6yt/VqWxVU=
|
|
||||||
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
@ -478,7 +465,6 @@ github.com/mitchellh/mapstructure v1.3.2 h1:mRS76wmkOn3KkKAyXDu42V+6ebnXWIztFSYG
|
||||||
github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
github.com/moby/buildkit v0.8.2-0.20210615162540-9f254e18360a h1:1KdH8CRFygJ8oj8l8wD2TUy3hGaGUzXO2h6gyQKg780=
|
github.com/moby/buildkit v0.8.2-0.20210615162540-9f254e18360a h1:1KdH8CRFygJ8oj8l8wD2TUy3hGaGUzXO2h6gyQKg780=
|
||||||
github.com/moby/buildkit v0.8.2-0.20210615162540-9f254e18360a/go.mod h1:OieevFziOisPBM43fLKG+lPcVp9XW+BlUiws8VIpG6k=
|
github.com/moby/buildkit v0.8.2-0.20210615162540-9f254e18360a/go.mod h1:OieevFziOisPBM43fLKG+lPcVp9XW+BlUiws8VIpG6k=
|
||||||
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
|
||||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||||
github.com/moby/sys/mount v0.3.0 h1:bXZYMmq7DBQPwHRxH/MG+u9+XF90ZOwoXpHTOznMGp0=
|
github.com/moby/sys/mount v0.3.0 h1:bXZYMmq7DBQPwHRxH/MG+u9+XF90ZOwoXpHTOznMGp0=
|
||||||
github.com/moby/sys/mount v0.3.0/go.mod h1:U2Z3ur2rXPFrFmy4q6WMwWrBOAQGYtYTRVM8BIvzbwk=
|
github.com/moby/sys/mount v0.3.0/go.mod h1:U2Z3ur2rXPFrFmy4q6WMwWrBOAQGYtYTRVM8BIvzbwk=
|
||||||
|
@ -547,12 +533,10 @@ github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/
|
||||||
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc=
|
|
||||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
|
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
|
||||||
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
|
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
|
||||||
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
|
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
|
||||||
github.com/opencontainers/selinux v1.8.2 h1:c4ca10UMgRcvZ6h0K4HtS15UaVSBEaE+iln2LVpAuGc=
|
|
||||||
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
|
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
|
||||||
github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU=
|
github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU=
|
||||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||||
|
@ -655,13 +639,10 @@ github.com/theupdateframework/notary v0.7.1-0.20210315103452-bf96a202a09a h1:tlJ
|
||||||
github.com/theupdateframework/notary v0.7.1-0.20210315103452-bf96a202a09a/go.mod h1:Y94A6rPp2OwNfP/7vmf8O2xx2IykP8pPXQ1DLouGnEw=
|
github.com/theupdateframework/notary v0.7.1-0.20210315103452-bf96a202a09a/go.mod h1:Y94A6rPp2OwNfP/7vmf8O2xx2IykP8pPXQ1DLouGnEw=
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
github.com/tonistiigi/fsutil v0.0.0-20210609172227-d72af97c0eaf h1:L0ixhsTk9j+dVnIvF6aiVCxPiaFvwTOyJxqimPq44p8=
|
|
||||||
github.com/tonistiigi/fsutil v0.0.0-20210609172227-d72af97c0eaf/go.mod h1:lJAxK//iyZ3yGbQswdrPTxugZIDM7sd4bEsD0x3XMHk=
|
github.com/tonistiigi/fsutil v0.0.0-20210609172227-d72af97c0eaf/go.mod h1:lJAxK//iyZ3yGbQswdrPTxugZIDM7sd4bEsD0x3XMHk=
|
||||||
github.com/tonistiigi/go-rosetta v0.0.0-20200727161949-f79598599c5d h1:wvQZpqy8p0D/FUia6ipKDhXrzPzBVJE4PZyPc5+5Ay0=
|
github.com/tonistiigi/go-rosetta v0.0.0-20200727161949-f79598599c5d h1:wvQZpqy8p0D/FUia6ipKDhXrzPzBVJE4PZyPc5+5Ay0=
|
||||||
github.com/tonistiigi/go-rosetta v0.0.0-20200727161949-f79598599c5d/go.mod h1:xKQhd7snlzKFuUi1taTGWjpRE8iFTA06DeacYi3CVFQ=
|
github.com/tonistiigi/go-rosetta v0.0.0-20200727161949-f79598599c5d/go.mod h1:xKQhd7snlzKFuUi1taTGWjpRE8iFTA06DeacYi3CVFQ=
|
||||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0=
|
|
||||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk=
|
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk=
|
||||||
github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305 h1:y/1cL5AL2oRcfzz8CAHHhR6kDDfIOT0WEyH5k40sccM=
|
|
||||||
github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305/go.mod h1:gXOLibKqQTRAVuVZ9gX7G9Ykky8ll8yb4slxsEMoY0c=
|
github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305/go.mod h1:gXOLibKqQTRAVuVZ9gX7G9Ykky8ll8yb4slxsEMoY0c=
|
||||||
github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||||
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||||
|
@ -697,7 +678,6 @@ go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3C
|
||||||
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
|
||||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||||
go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8=
|
|
||||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
|
@ -780,7 +760,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
|
||||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
|
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf h1:2ucpDCmfkl8Bd/FsLtiD653Wf96cW37s+iGx93zsu4k=
|
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf h1:2ucpDCmfkl8Bd/FsLtiD653Wf96cW37s+iGx93zsu4k=
|
||||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
|
|
@ -1,20 +0,0 @@
|
||||||
linters:
|
|
||||||
enable:
|
|
||||||
- structcheck
|
|
||||||
- varcheck
|
|
||||||
- staticcheck
|
|
||||||
- unconvert
|
|
||||||
- gofmt
|
|
||||||
- goimports
|
|
||||||
- golint
|
|
||||||
- ineffassign
|
|
||||||
- vet
|
|
||||||
- unused
|
|
||||||
- misspell
|
|
||||||
disable:
|
|
||||||
- errcheck
|
|
||||||
|
|
||||||
run:
|
|
||||||
timeout: 3m
|
|
||||||
skip-dirs:
|
|
||||||
- vendor
|
|
|
@ -1,191 +0,0 @@
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
https://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
Copyright The containerd Authors
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
https://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
|
@ -1,29 +0,0 @@
|
||||||
# console
|
|
||||||
|
|
||||||
[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/console)](https://pkg.go.dev/github.com/containerd/console)
|
|
||||||
[![Build Status](https://github.com/containerd/console/workflows/CI/badge.svg)](https://github.com/containerd/console/actions?query=workflow%3ACI)
|
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/console)](https://goreportcard.com/report/github.com/containerd/console)
|
|
||||||
|
|
||||||
Golang package for dealing with consoles. Light on deps and a simple API.
|
|
||||||
|
|
||||||
## Modifying the current process
|
|
||||||
|
|
||||||
```go
|
|
||||||
current := console.Current()
|
|
||||||
defer current.Reset()
|
|
||||||
|
|
||||||
if err := current.SetRaw(); err != nil {
|
|
||||||
}
|
|
||||||
ws, err := current.Size()
|
|
||||||
current.Resize(ws)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Project details
|
|
||||||
|
|
||||||
console is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
|
|
||||||
As a containerd sub-project, you will find the:
|
|
||||||
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
|
|
||||||
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
|
|
||||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
|
|
||||||
|
|
||||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
|
|
@ -1,87 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package console
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
var ErrNotAConsole = errors.New("provided file is not a console")
|
|
||||||
|
|
||||||
type File interface {
|
|
||||||
io.ReadWriteCloser
|
|
||||||
|
|
||||||
// Fd returns its file descriptor
|
|
||||||
Fd() uintptr
|
|
||||||
// Name returns its file name
|
|
||||||
Name() string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Console interface {
|
|
||||||
File
|
|
||||||
|
|
||||||
// Resize resizes the console to the provided window size
|
|
||||||
Resize(WinSize) error
|
|
||||||
// ResizeFrom resizes the calling console to the size of the
|
|
||||||
// provided console
|
|
||||||
ResizeFrom(Console) error
|
|
||||||
// SetRaw sets the console in raw mode
|
|
||||||
SetRaw() error
|
|
||||||
// DisableEcho disables echo on the console
|
|
||||||
DisableEcho() error
|
|
||||||
// Reset restores the console to its orignal state
|
|
||||||
Reset() error
|
|
||||||
// Size returns the window size of the console
|
|
||||||
Size() (WinSize, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WinSize specifies the window size of the console
|
|
||||||
type WinSize struct {
|
|
||||||
// Height of the console
|
|
||||||
Height uint16
|
|
||||||
// Width of the console
|
|
||||||
Width uint16
|
|
||||||
x uint16
|
|
||||||
y uint16
|
|
||||||
}
|
|
||||||
|
|
||||||
// Current returns the current process' console
|
|
||||||
func Current() (c Console) {
|
|
||||||
var err error
|
|
||||||
// Usually all three streams (stdin, stdout, and stderr)
|
|
||||||
// are open to the same console, but some might be redirected,
|
|
||||||
// so try all three.
|
|
||||||
for _, s := range []*os.File{os.Stderr, os.Stdout, os.Stdin} {
|
|
||||||
if c, err = ConsoleFromFile(s); err == nil {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// One of the std streams should always be a console
|
|
||||||
// for the design of this function.
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConsoleFromFile returns a console using the provided file
|
|
||||||
// nolint:golint
|
|
||||||
func ConsoleFromFile(f File) (Console, error) {
|
|
||||||
if err := checkConsole(f); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return newMaster(f)
|
|
||||||
}
|
|
|
@ -1,280 +0,0 @@
|
||||||
// +build linux
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package console
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxEvents = 128
|
|
||||||
)
|
|
||||||
|
|
||||||
// Epoller manages multiple epoll consoles using edge-triggered epoll api so we
|
|
||||||
// dont have to deal with repeated wake-up of EPOLLER or EPOLLHUP.
|
|
||||||
// For more details, see:
|
|
||||||
// - https://github.com/systemd/systemd/pull/4262
|
|
||||||
// - https://github.com/moby/moby/issues/27202
|
|
||||||
//
|
|
||||||
// Example usage of Epoller and EpollConsole can be as follow:
|
|
||||||
//
|
|
||||||
// epoller, _ := NewEpoller()
|
|
||||||
// epollConsole, _ := epoller.Add(console)
|
|
||||||
// go epoller.Wait()
|
|
||||||
// var (
|
|
||||||
// b bytes.Buffer
|
|
||||||
// wg sync.WaitGroup
|
|
||||||
// )
|
|
||||||
// wg.Add(1)
|
|
||||||
// go func() {
|
|
||||||
// io.Copy(&b, epollConsole)
|
|
||||||
// wg.Done()
|
|
||||||
// }()
|
|
||||||
// // perform I/O on the console
|
|
||||||
// epollConsole.Shutdown(epoller.CloseConsole)
|
|
||||||
// wg.Wait()
|
|
||||||
// epollConsole.Close()
|
|
||||||
type Epoller struct {
|
|
||||||
efd int
|
|
||||||
mu sync.Mutex
|
|
||||||
fdMapping map[int]*EpollConsole
|
|
||||||
closeOnce sync.Once
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEpoller returns an instance of epoller with a valid epoll fd.
|
|
||||||
func NewEpoller() (*Epoller, error) {
|
|
||||||
efd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Epoller{
|
|
||||||
efd: efd,
|
|
||||||
fdMapping: make(map[int]*EpollConsole),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add creates an epoll console based on the provided console. The console will
|
|
||||||
// be registered with EPOLLET (i.e. using edge-triggered notification) and its
|
|
||||||
// file descriptor will be set to non-blocking mode. After this, user should use
|
|
||||||
// the return console to perform I/O.
|
|
||||||
func (e *Epoller) Add(console Console) (*EpollConsole, error) {
|
|
||||||
sysfd := int(console.Fd())
|
|
||||||
// Set sysfd to non-blocking mode
|
|
||||||
if err := unix.SetNonblock(sysfd, true); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ev := unix.EpollEvent{
|
|
||||||
Events: unix.EPOLLIN | unix.EPOLLOUT | unix.EPOLLRDHUP | unix.EPOLLET,
|
|
||||||
Fd: int32(sysfd),
|
|
||||||
}
|
|
||||||
if err := unix.EpollCtl(e.efd, unix.EPOLL_CTL_ADD, sysfd, &ev); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
ef := &EpollConsole{
|
|
||||||
Console: console,
|
|
||||||
sysfd: sysfd,
|
|
||||||
readc: sync.NewCond(&sync.Mutex{}),
|
|
||||||
writec: sync.NewCond(&sync.Mutex{}),
|
|
||||||
}
|
|
||||||
e.mu.Lock()
|
|
||||||
e.fdMapping[sysfd] = ef
|
|
||||||
e.mu.Unlock()
|
|
||||||
return ef, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait starts the loop to wait for its consoles' notifications and signal
|
|
||||||
// appropriate console that it can perform I/O.
|
|
||||||
func (e *Epoller) Wait() error {
|
|
||||||
events := make([]unix.EpollEvent, maxEvents)
|
|
||||||
for {
|
|
||||||
n, err := unix.EpollWait(e.efd, events, -1)
|
|
||||||
if err != nil {
|
|
||||||
// EINTR: The call was interrupted by a signal handler before either
|
|
||||||
// any of the requested events occurred or the timeout expired
|
|
||||||
if err == unix.EINTR {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
ev := &events[i]
|
|
||||||
// the console is ready to be read from
|
|
||||||
if ev.Events&(unix.EPOLLIN|unix.EPOLLHUP|unix.EPOLLERR) != 0 {
|
|
||||||
if epfile := e.getConsole(int(ev.Fd)); epfile != nil {
|
|
||||||
epfile.signalRead()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// the console is ready to be written to
|
|
||||||
if ev.Events&(unix.EPOLLOUT|unix.EPOLLHUP|unix.EPOLLERR) != 0 {
|
|
||||||
if epfile := e.getConsole(int(ev.Fd)); epfile != nil {
|
|
||||||
epfile.signalWrite()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CloseConsole unregisters the console's file descriptor from epoll interface
|
|
||||||
func (e *Epoller) CloseConsole(fd int) error {
|
|
||||||
e.mu.Lock()
|
|
||||||
defer e.mu.Unlock()
|
|
||||||
delete(e.fdMapping, fd)
|
|
||||||
return unix.EpollCtl(e.efd, unix.EPOLL_CTL_DEL, fd, &unix.EpollEvent{})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Epoller) getConsole(sysfd int) *EpollConsole {
|
|
||||||
e.mu.Lock()
|
|
||||||
f := e.fdMapping[sysfd]
|
|
||||||
e.mu.Unlock()
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the epoll fd
|
|
||||||
func (e *Epoller) Close() error {
|
|
||||||
closeErr := os.ErrClosed // default to "file already closed"
|
|
||||||
e.closeOnce.Do(func() {
|
|
||||||
closeErr = unix.Close(e.efd)
|
|
||||||
})
|
|
||||||
return closeErr
|
|
||||||
}
|
|
||||||
|
|
||||||
// EpollConsole acts like a console but registers its file descriptor with an
|
|
||||||
// epoll fd and uses epoll API to perform I/O.
|
|
||||||
type EpollConsole struct {
|
|
||||||
Console
|
|
||||||
readc *sync.Cond
|
|
||||||
writec *sync.Cond
|
|
||||||
sysfd int
|
|
||||||
closed bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads up to len(p) bytes into p. It returns the number of bytes read
|
|
||||||
// (0 <= n <= len(p)) and any error encountered.
|
|
||||||
//
|
|
||||||
// If the console's read returns EAGAIN or EIO, we assume that it's a
|
|
||||||
// temporary error because the other side went away and wait for the signal
|
|
||||||
// generated by epoll event to continue.
|
|
||||||
func (ec *EpollConsole) Read(p []byte) (n int, err error) {
|
|
||||||
var read int
|
|
||||||
ec.readc.L.Lock()
|
|
||||||
defer ec.readc.L.Unlock()
|
|
||||||
for {
|
|
||||||
read, err = ec.Console.Read(p[n:])
|
|
||||||
n += read
|
|
||||||
if err != nil {
|
|
||||||
var hangup bool
|
|
||||||
if perr, ok := err.(*os.PathError); ok {
|
|
||||||
hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO)
|
|
||||||
} else {
|
|
||||||
hangup = (err == unix.EAGAIN || err == unix.EIO)
|
|
||||||
}
|
|
||||||
// if the other end disappear, assume this is temporary and wait for the
|
|
||||||
// signal to continue again. Unless we didnt read anything and the
|
|
||||||
// console is already marked as closed then we should exit
|
|
||||||
if hangup && !(n == 0 && len(p) > 0 && ec.closed) {
|
|
||||||
ec.readc.Wait()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// if we didnt read anything then return io.EOF to end gracefully
|
|
||||||
if n == 0 && len(p) > 0 && err == nil {
|
|
||||||
err = io.EOF
|
|
||||||
}
|
|
||||||
// signal for others that we finished the read
|
|
||||||
ec.readc.Signal()
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writes len(p) bytes from p to the console. It returns the number of bytes
|
|
||||||
// written from p (0 <= n <= len(p)) and any error encountered that caused
|
|
||||||
// the write to stop early.
|
|
||||||
//
|
|
||||||
// If writes to the console returns EAGAIN or EIO, we assume that it's a
|
|
||||||
// temporary error because the other side went away and wait for the signal
|
|
||||||
// generated by epoll event to continue.
|
|
||||||
func (ec *EpollConsole) Write(p []byte) (n int, err error) {
|
|
||||||
var written int
|
|
||||||
ec.writec.L.Lock()
|
|
||||||
defer ec.writec.L.Unlock()
|
|
||||||
for {
|
|
||||||
written, err = ec.Console.Write(p[n:])
|
|
||||||
n += written
|
|
||||||
if err != nil {
|
|
||||||
var hangup bool
|
|
||||||
if perr, ok := err.(*os.PathError); ok {
|
|
||||||
hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO)
|
|
||||||
} else {
|
|
||||||
hangup = (err == unix.EAGAIN || err == unix.EIO)
|
|
||||||
}
|
|
||||||
// if the other end disappears, assume this is temporary and wait for the
|
|
||||||
// signal to continue again.
|
|
||||||
if hangup {
|
|
||||||
ec.writec.Wait()
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// unrecoverable error, break the loop and return the error
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if n < len(p) && err == nil {
|
|
||||||
err = io.ErrShortWrite
|
|
||||||
}
|
|
||||||
// signal for others that we finished the write
|
|
||||||
ec.writec.Signal()
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown closes the file descriptor and signals call waiters for this fd.
|
|
||||||
// It accepts a callback which will be called with the console's fd. The
|
|
||||||
// callback typically will be used to do further cleanup such as unregister the
|
|
||||||
// console's fd from the epoll interface.
|
|
||||||
// User should call Shutdown and wait for all I/O operation to be finished
|
|
||||||
// before closing the console.
|
|
||||||
func (ec *EpollConsole) Shutdown(close func(int) error) error {
|
|
||||||
ec.readc.L.Lock()
|
|
||||||
defer ec.readc.L.Unlock()
|
|
||||||
ec.writec.L.Lock()
|
|
||||||
defer ec.writec.L.Unlock()
|
|
||||||
|
|
||||||
ec.readc.Broadcast()
|
|
||||||
ec.writec.Broadcast()
|
|
||||||
ec.closed = true
|
|
||||||
return close(ec.sysfd)
|
|
||||||
}
|
|
||||||
|
|
||||||
// signalRead signals that the console is readable.
|
|
||||||
func (ec *EpollConsole) signalRead() {
|
|
||||||
ec.readc.L.Lock()
|
|
||||||
ec.readc.Signal()
|
|
||||||
ec.readc.L.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// signalWrite signals that the console is writable.
|
|
||||||
func (ec *EpollConsole) signalWrite() {
|
|
||||||
ec.writec.L.Lock()
|
|
||||||
ec.writec.Signal()
|
|
||||||
ec.writec.L.Unlock()
|
|
||||||
}
|
|
|
@ -1,156 +0,0 @@
|
||||||
// +build darwin freebsd linux netbsd openbsd solaris
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package console
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NewPty creates a new pty pair
|
|
||||||
// The master is returned as the first console and a string
|
|
||||||
// with the path to the pty slave is returned as the second
|
|
||||||
func NewPty() (Console, string, error) {
|
|
||||||
f, err := openpt()
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
slave, err := ptsname(f)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
if err := unlockpt(f); err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
m, err := newMaster(f)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", err
|
|
||||||
}
|
|
||||||
return m, slave, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type master struct {
|
|
||||||
f File
|
|
||||||
original *unix.Termios
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) Read(b []byte) (int, error) {
|
|
||||||
return m.f.Read(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) Write(b []byte) (int, error) {
|
|
||||||
return m.f.Write(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) Close() error {
|
|
||||||
return m.f.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) Resize(ws WinSize) error {
|
|
||||||
return tcswinsz(m.f.Fd(), ws)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) ResizeFrom(c Console) error {
|
|
||||||
ws, err := c.Size()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return m.Resize(ws)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) Reset() error {
|
|
||||||
if m.original == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return tcset(m.f.Fd(), m.original)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) getCurrent() (unix.Termios, error) {
|
|
||||||
var termios unix.Termios
|
|
||||||
if err := tcget(m.f.Fd(), &termios); err != nil {
|
|
||||||
return unix.Termios{}, err
|
|
||||||
}
|
|
||||||
return termios, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) SetRaw() error {
|
|
||||||
rawState, err := m.getCurrent()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rawState = cfmakeraw(rawState)
|
|
||||||
rawState.Oflag = rawState.Oflag | unix.OPOST
|
|
||||||
return tcset(m.f.Fd(), &rawState)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) DisableEcho() error {
|
|
||||||
rawState, err := m.getCurrent()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rawState.Lflag = rawState.Lflag &^ unix.ECHO
|
|
||||||
return tcset(m.f.Fd(), &rawState)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) Size() (WinSize, error) {
|
|
||||||
return tcgwinsz(m.f.Fd())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) Fd() uintptr {
|
|
||||||
return m.f.Fd()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) Name() string {
|
|
||||||
return m.f.Name()
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkConsole checks if the provided file is a console
|
|
||||||
func checkConsole(f File) error {
|
|
||||||
var termios unix.Termios
|
|
||||||
if tcget(f.Fd(), &termios) != nil {
|
|
||||||
return ErrNotAConsole
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMaster(f File) (Console, error) {
|
|
||||||
m := &master{
|
|
||||||
f: f,
|
|
||||||
}
|
|
||||||
t, err := m.getCurrent()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
m.original = &t
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair
|
|
||||||
// created by us acts normally. In particular, a not-very-well-known default of
|
|
||||||
// Linux unix98 ptys is that they have +onlcr by default. While this isn't a
|
|
||||||
// problem for terminal emulators, because we relay data from the terminal we
|
|
||||||
// also relay that funky line discipline.
|
|
||||||
func ClearONLCR(fd uintptr) error {
|
|
||||||
return setONLCR(fd, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair
|
|
||||||
// created by us acts as intended for a terminal emulator.
|
|
||||||
func SetONLCR(fd uintptr) error {
|
|
||||||
return setONLCR(fd, true)
|
|
||||||
}
|
|
|
@ -1,216 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package console
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"golang.org/x/sys/windows"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
vtInputSupported bool
|
|
||||||
ErrNotImplemented = errors.New("not implemented")
|
|
||||||
)
|
|
||||||
|
|
||||||
func (m *master) initStdios() {
|
|
||||||
m.in = windows.Handle(os.Stdin.Fd())
|
|
||||||
if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil {
|
|
||||||
// Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it.
|
|
||||||
if err = windows.SetConsoleMode(m.in, m.inMode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err == nil {
|
|
||||||
vtInputSupported = true
|
|
||||||
}
|
|
||||||
// Unconditionally set the console mode back even on failure because SetConsoleMode
|
|
||||||
// remembers invalid bits on input handles.
|
|
||||||
windows.SetConsoleMode(m.in, m.inMode)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("failed to get console mode for stdin: %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
m.out = windows.Handle(os.Stdout.Fd())
|
|
||||||
if err := windows.GetConsoleMode(m.out, &m.outMode); err == nil {
|
|
||||||
if err := windows.SetConsoleMode(m.out, m.outMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil {
|
|
||||||
m.outMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
|
||||||
} else {
|
|
||||||
windows.SetConsoleMode(m.out, m.outMode)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Printf("failed to get console mode for stdout: %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
m.err = windows.Handle(os.Stderr.Fd())
|
|
||||||
if err := windows.GetConsoleMode(m.err, &m.errMode); err == nil {
|
|
||||||
if err := windows.SetConsoleMode(m.err, m.errMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil {
|
|
||||||
m.errMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
|
||||||
} else {
|
|
||||||
windows.SetConsoleMode(m.err, m.errMode)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fmt.Printf("failed to get console mode for stderr: %v\n", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type master struct {
|
|
||||||
in windows.Handle
|
|
||||||
inMode uint32
|
|
||||||
|
|
||||||
out windows.Handle
|
|
||||||
outMode uint32
|
|
||||||
|
|
||||||
err windows.Handle
|
|
||||||
errMode uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) SetRaw() error {
|
|
||||||
if err := makeInputRaw(m.in, m.inMode); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set StdOut and StdErr to raw mode, we ignore failures since
|
|
||||||
// windows.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this version of
|
|
||||||
// Windows.
|
|
||||||
|
|
||||||
windows.SetConsoleMode(m.out, m.outMode|windows.DISABLE_NEWLINE_AUTO_RETURN)
|
|
||||||
|
|
||||||
windows.SetConsoleMode(m.err, m.errMode|windows.DISABLE_NEWLINE_AUTO_RETURN)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) Reset() error {
|
|
||||||
for _, s := range []struct {
|
|
||||||
fd windows.Handle
|
|
||||||
mode uint32
|
|
||||||
}{
|
|
||||||
{m.in, m.inMode},
|
|
||||||
{m.out, m.outMode},
|
|
||||||
{m.err, m.errMode},
|
|
||||||
} {
|
|
||||||
if err := windows.SetConsoleMode(s.fd, s.mode); err != nil {
|
|
||||||
return errors.Wrap(err, "unable to restore console mode")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) Size() (WinSize, error) {
|
|
||||||
var info windows.ConsoleScreenBufferInfo
|
|
||||||
err := windows.GetConsoleScreenBufferInfo(m.out, &info)
|
|
||||||
if err != nil {
|
|
||||||
return WinSize{}, errors.Wrap(err, "unable to get console info")
|
|
||||||
}
|
|
||||||
|
|
||||||
winsize := WinSize{
|
|
||||||
Width: uint16(info.Window.Right - info.Window.Left + 1),
|
|
||||||
Height: uint16(info.Window.Bottom - info.Window.Top + 1),
|
|
||||||
}
|
|
||||||
|
|
||||||
return winsize, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) Resize(ws WinSize) error {
|
|
||||||
return ErrNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) ResizeFrom(c Console) error {
|
|
||||||
return ErrNotImplemented
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) DisableEcho() error {
|
|
||||||
mode := m.inMode &^ windows.ENABLE_ECHO_INPUT
|
|
||||||
mode |= windows.ENABLE_PROCESSED_INPUT
|
|
||||||
mode |= windows.ENABLE_LINE_INPUT
|
|
||||||
|
|
||||||
if err := windows.SetConsoleMode(m.in, mode); err != nil {
|
|
||||||
return errors.Wrap(err, "unable to set console to disable echo")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) Read(b []byte) (int, error) {
|
|
||||||
return os.Stdin.Read(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) Write(b []byte) (int, error) {
|
|
||||||
return os.Stdout.Write(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *master) Fd() uintptr {
|
|
||||||
return uintptr(m.in)
|
|
||||||
}
|
|
||||||
|
|
||||||
// on windows, console can only be made from os.Std{in,out,err}, hence there
|
|
||||||
// isnt a single name here we can use. Return a dummy "console" value in this
|
|
||||||
// case should be sufficient.
|
|
||||||
func (m *master) Name() string {
|
|
||||||
return "console"
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeInputRaw puts the terminal (Windows Console) connected to the given
|
|
||||||
// file descriptor into raw mode
|
|
||||||
func makeInputRaw(fd windows.Handle, mode uint32) error {
|
|
||||||
// See
|
|
||||||
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
|
|
||||||
// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
|
|
||||||
|
|
||||||
// Disable these modes
|
|
||||||
mode &^= windows.ENABLE_ECHO_INPUT
|
|
||||||
mode &^= windows.ENABLE_LINE_INPUT
|
|
||||||
mode &^= windows.ENABLE_MOUSE_INPUT
|
|
||||||
mode &^= windows.ENABLE_WINDOW_INPUT
|
|
||||||
mode &^= windows.ENABLE_PROCESSED_INPUT
|
|
||||||
|
|
||||||
// Enable these modes
|
|
||||||
mode |= windows.ENABLE_EXTENDED_FLAGS
|
|
||||||
mode |= windows.ENABLE_INSERT_MODE
|
|
||||||
mode |= windows.ENABLE_QUICK_EDIT_MODE
|
|
||||||
|
|
||||||
if vtInputSupported {
|
|
||||||
mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := windows.SetConsoleMode(fd, mode); err != nil {
|
|
||||||
return errors.Wrap(err, "unable to set console to raw mode")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkConsole(f File) error {
|
|
||||||
var mode uint32
|
|
||||||
if err := windows.GetConsoleMode(windows.Handle(f.Fd()), &mode); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMaster(f File) (Console, error) {
|
|
||||||
if f != os.Stdin && f != os.Stdout && f != os.Stderr {
|
|
||||||
return nil, errors.New("creating a console from a file is not supported on windows")
|
|
||||||
}
|
|
||||||
m := &master{}
|
|
||||||
m.initStdios()
|
|
||||||
return m, nil
|
|
||||||
}
|
|
|
@ -1,8 +0,0 @@
|
||||||
module github.com/containerd/console
|
|
||||||
|
|
||||||
go 1.13
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/pkg/errors v0.9.1
|
|
||||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
|
|
||||||
)
|
|
|
@ -1,4 +0,0 @@
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
|
|
||||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
|
@ -1,45 +0,0 @@
|
||||||
// +build freebsd,cgo
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package console
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
#include <fcntl.h>
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
// openpt allocates a new pseudo-terminal and establishes a connection with its
|
|
||||||
// control device.
|
|
||||||
func openpt() (*os.File, error) {
|
|
||||||
fd, err := C.posix_openpt(C.O_RDWR)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("posix_openpt: %w", err)
|
|
||||||
}
|
|
||||||
if _, err := C.grantpt(fd); err != nil {
|
|
||||||
C.close(fd)
|
|
||||||
return nil, fmt.Errorf("grantpt: %w", err)
|
|
||||||
}
|
|
||||||
return os.NewFile(uintptr(fd), ""), nil
|
|
||||||
}
|
|
|
@ -1,36 +0,0 @@
|
||||||
// +build freebsd,!cgo
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package console
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
//
|
|
||||||
// Implementing the functions below requires cgo support. Non-cgo stubs
|
|
||||||
// versions are defined below to enable cross-compilation of source code
|
|
||||||
// that depends on these functions, but the resultant cross-compiled
|
|
||||||
// binaries cannot actually be used. If the stub function(s) below are
|
|
||||||
// actually invoked they will display an error message and cause the
|
|
||||||
// calling process to exit.
|
|
||||||
//
|
|
||||||
|
|
||||||
func openpt() (*os.File, error) {
|
|
||||||
panic("openpt() support requires cgo.")
|
|
||||||
}
|
|
|
@ -1,30 +0,0 @@
|
||||||
// +build darwin linux netbsd openbsd solaris
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package console
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
// openpt allocates a new pseudo-terminal by opening the /dev/ptmx device
|
|
||||||
func openpt() (*os.File, error) {
|
|
||||||
return os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0)
|
|
||||||
}
|
|
|
@ -1,44 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package console
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
cmdTcGet = unix.TIOCGETA
|
|
||||||
cmdTcSet = unix.TIOCSETA
|
|
||||||
)
|
|
||||||
|
|
||||||
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
|
||||||
// unlockpt should be called before opening the slave side of a pty.
|
|
||||||
func unlockpt(f *os.File) error {
|
|
||||||
return unix.IoctlSetPointerInt(int(f.Fd()), unix.TIOCPTYUNLK, 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ptsname retrieves the name of the first available pts for the given master.
|
|
||||||
func ptsname(f *os.File) (string, error) {
|
|
||||||
n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCPTYGNAME)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("/dev/pts/%d", n), nil
|
|
||||||
}
|
|
|
@ -1,57 +0,0 @@
|
||||||
// +build freebsd,cgo
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package console
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
#include <stdlib.h>
|
|
||||||
#include <unistd.h>
|
|
||||||
*/
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
const (
|
|
||||||
cmdTcGet = unix.TIOCGETA
|
|
||||||
cmdTcSet = unix.TIOCSETA
|
|
||||||
)
|
|
||||||
|
|
||||||
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
|
||||||
// unlockpt should be called before opening the slave side of a pty.
|
|
||||||
func unlockpt(f *os.File) error {
|
|
||||||
fd := C.int(f.Fd())
|
|
||||||
if _, err := C.unlockpt(fd); err != nil {
|
|
||||||
C.close(fd)
|
|
||||||
return fmt.Errorf("unlockpt: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ptsname retrieves the name of the first available pts for the given master.
|
|
||||||
func ptsname(f *os.File) (string, error) {
|
|
||||||
n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("/dev/pts/%d", n), nil
|
|
||||||
}
|
|
|
@ -1,55 +0,0 @@
|
||||||
// +build freebsd,!cgo
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package console
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
cmdTcGet = unix.TIOCGETA
|
|
||||||
cmdTcSet = unix.TIOCSETA
|
|
||||||
)
|
|
||||||
|
|
||||||
//
|
|
||||||
// Implementing the functions below requires cgo support. Non-cgo stubs
|
|
||||||
// versions are defined below to enable cross-compilation of source code
|
|
||||||
// that depends on these functions, but the resultant cross-compiled
|
|
||||||
// binaries cannot actually be used. If the stub function(s) below are
|
|
||||||
// actually invoked they will display an error message and cause the
|
|
||||||
// calling process to exit.
|
|
||||||
//
|
|
||||||
|
|
||||||
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
|
||||||
// unlockpt should be called before opening the slave side of a pty.
|
|
||||||
func unlockpt(f *os.File) error {
|
|
||||||
panic("unlockpt() support requires cgo.")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ptsname retrieves the name of the first available pts for the given master.
|
|
||||||
func ptsname(f *os.File) (string, error) {
|
|
||||||
n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("/dev/pts/%d", n), nil
|
|
||||||
}
|
|
|
@ -1,51 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package console
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
cmdTcGet = unix.TCGETS
|
|
||||||
cmdTcSet = unix.TCSETS
|
|
||||||
)
|
|
||||||
|
|
||||||
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
|
||||||
// unlockpt should be called before opening the slave side of a pty.
|
|
||||||
func unlockpt(f *os.File) error {
|
|
||||||
var u int32
|
|
||||||
// XXX do not use unix.IoctlSetPointerInt here, see commit dbd69c59b81.
|
|
||||||
if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))); err != 0 {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ptsname retrieves the name of the first available pts for the given master.
|
|
||||||
func ptsname(f *os.File) (string, error) {
|
|
||||||
var u uint32
|
|
||||||
// XXX do not use unix.IoctlGetInt here, see commit dbd69c59b81.
|
|
||||||
if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&u))); err != 0 {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("/dev/pts/%d", u), nil
|
|
||||||
}
|
|
|
@ -1,45 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package console
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
cmdTcGet = unix.TIOCGETA
|
|
||||||
cmdTcSet = unix.TIOCSETA
|
|
||||||
)
|
|
||||||
|
|
||||||
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
|
||||||
// unlockpt should be called before opening the slave side of a pty.
|
|
||||||
// This does not exist on NetBSD, it does not allocate controlling terminals on open
|
|
||||||
func unlockpt(f *os.File) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ptsname retrieves the name of the first available pts for the given master.
|
|
||||||
func ptsname(f *os.File) (string, error) {
|
|
||||||
ptm, err := unix.IoctlGetPtmget(int(f.Fd()), unix.TIOCPTSNAME)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return string(ptm.Sn[:bytes.IndexByte(ptm.Sn[:], 0)]), nil
|
|
||||||
}
|
|
|
@ -1,51 +0,0 @@
|
||||||
// +build openbsd,cgo
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package console
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
//#include <stdlib.h>
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
const (
|
|
||||||
cmdTcGet = unix.TIOCGETA
|
|
||||||
cmdTcSet = unix.TIOCSETA
|
|
||||||
)
|
|
||||||
|
|
||||||
// ptsname retrieves the name of the first available pts for the given master.
|
|
||||||
func ptsname(f *os.File) (string, error) {
|
|
||||||
ptspath, err := C.ptsname(C.int(f.Fd()))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return C.GoString(ptspath), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
|
||||||
// unlockpt should be called before opening the slave side of a pty.
|
|
||||||
func unlockpt(f *os.File) error {
|
|
||||||
if _, err := C.grantpt(C.int(f.Fd())); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,47 +0,0 @@
|
||||||
// +build openbsd,!cgo
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
//
|
|
||||||
// Implementing the functions below requires cgo support. Non-cgo stubs
|
|
||||||
// versions are defined below to enable cross-compilation of source code
|
|
||||||
// that depends on these functions, but the resultant cross-compiled
|
|
||||||
// binaries cannot actually be used. If the stub function(s) below are
|
|
||||||
// actually invoked they will display an error message and cause the
|
|
||||||
// calling process to exit.
|
|
||||||
//
|
|
||||||
|
|
||||||
package console
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
cmdTcGet = unix.TIOCGETA
|
|
||||||
cmdTcSet = unix.TIOCSETA
|
|
||||||
)
|
|
||||||
|
|
||||||
func ptsname(f *os.File) (string, error) {
|
|
||||||
panic("ptsname() support requires cgo.")
|
|
||||||
}
|
|
||||||
|
|
||||||
func unlockpt(f *os.File) error {
|
|
||||||
panic("unlockpt() support requires cgo.")
|
|
||||||
}
|
|
|
@ -1,51 +0,0 @@
|
||||||
// +build solaris,cgo
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package console
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
//#include <stdlib.h>
|
|
||||||
import "C"
|
|
||||||
|
|
||||||
const (
|
|
||||||
cmdTcGet = unix.TCGETS
|
|
||||||
cmdTcSet = unix.TCSETS
|
|
||||||
)
|
|
||||||
|
|
||||||
// ptsname retrieves the name of the first available pts for the given master.
|
|
||||||
func ptsname(f *os.File) (string, error) {
|
|
||||||
ptspath, err := C.ptsname(C.int(f.Fd()))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return C.GoString(ptspath), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
|
||||||
// unlockpt should be called before opening the slave side of a pty.
|
|
||||||
func unlockpt(f *os.File) error {
|
|
||||||
if _, err := C.grantpt(C.int(f.Fd())); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,47 +0,0 @@
|
||||||
// +build solaris,!cgo
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
//
|
|
||||||
// Implementing the functions below requires cgo support. Non-cgo stubs
|
|
||||||
// versions are defined below to enable cross-compilation of source code
|
|
||||||
// that depends on these functions, but the resultant cross-compiled
|
|
||||||
// binaries cannot actually be used. If the stub function(s) below are
|
|
||||||
// actually invoked they will display an error message and cause the
|
|
||||||
// calling process to exit.
|
|
||||||
//
|
|
||||||
|
|
||||||
package console
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
cmdTcGet = unix.TCGETS
|
|
||||||
cmdTcSet = unix.TCSETS
|
|
||||||
)
|
|
||||||
|
|
||||||
func ptsname(f *os.File) (string, error) {
|
|
||||||
panic("ptsname() support requires cgo.")
|
|
||||||
}
|
|
||||||
|
|
||||||
func unlockpt(f *os.File) error {
|
|
||||||
panic("unlockpt() support requires cgo.")
|
|
||||||
}
|
|
|
@ -1,91 +0,0 @@
|
||||||
// +build darwin freebsd linux netbsd openbsd solaris
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package console
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
func tcget(fd uintptr, p *unix.Termios) error {
|
|
||||||
termios, err := unix.IoctlGetTermios(int(fd), cmdTcGet)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*p = *termios
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func tcset(fd uintptr, p *unix.Termios) error {
|
|
||||||
return unix.IoctlSetTermios(int(fd), cmdTcSet, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func tcgwinsz(fd uintptr) (WinSize, error) {
|
|
||||||
var ws WinSize
|
|
||||||
|
|
||||||
uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
|
|
||||||
if err != nil {
|
|
||||||
return ws, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Translate from unix.Winsize to console.WinSize
|
|
||||||
ws.Height = uws.Row
|
|
||||||
ws.Width = uws.Col
|
|
||||||
ws.x = uws.Xpixel
|
|
||||||
ws.y = uws.Ypixel
|
|
||||||
return ws, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func tcswinsz(fd uintptr, ws WinSize) error {
|
|
||||||
// Translate from console.WinSize to unix.Winsize
|
|
||||||
|
|
||||||
var uws unix.Winsize
|
|
||||||
uws.Row = ws.Height
|
|
||||||
uws.Col = ws.Width
|
|
||||||
uws.Xpixel = ws.x
|
|
||||||
uws.Ypixel = ws.y
|
|
||||||
|
|
||||||
return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &uws)
|
|
||||||
}
|
|
||||||
|
|
||||||
func setONLCR(fd uintptr, enable bool) error {
|
|
||||||
var termios unix.Termios
|
|
||||||
if err := tcget(fd, &termios); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if enable {
|
|
||||||
// Set +onlcr so we can act like a real terminal
|
|
||||||
termios.Oflag |= unix.ONLCR
|
|
||||||
} else {
|
|
||||||
// Set -onlcr so we don't have to deal with \r.
|
|
||||||
termios.Oflag &^= unix.ONLCR
|
|
||||||
}
|
|
||||||
return tcset(fd, &termios)
|
|
||||||
}
|
|
||||||
|
|
||||||
func cfmakeraw(t unix.Termios) unix.Termios {
|
|
||||||
t.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
|
|
||||||
t.Oflag &^= unix.OPOST
|
|
||||||
t.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
|
|
||||||
t.Cflag &^= (unix.CSIZE | unix.PARENB)
|
|
||||||
t.Cflag &^= unix.CS8
|
|
||||||
t.Cc[unix.VMIN] = 1
|
|
||||||
t.Cc[unix.VTIME] = 0
|
|
||||||
|
|
||||||
return t
|
|
||||||
}
|
|
5425
vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
сгенерированный
поставляемый
5425
vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
сгенерированный
поставляемый
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
334
vendor/github.com/containerd/containerd/api/services/content/v1/content.proto
сгенерированный
поставляемый
334
vendor/github.com/containerd/containerd/api/services/content/v1/content.proto
сгенерированный
поставляемый
|
@ -1,334 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package containerd.services.content.v1;
|
|
||||||
|
|
||||||
import weak "gogoproto/gogo.proto";
|
|
||||||
import "google/protobuf/field_mask.proto";
|
|
||||||
import "google/protobuf/timestamp.proto";
|
|
||||||
import "google/protobuf/empty.proto";
|
|
||||||
|
|
||||||
option go_package = "github.com/containerd/containerd/api/services/content/v1;content";
|
|
||||||
|
|
||||||
// Content provides access to a content addressable storage system.
|
|
||||||
service Content {
|
|
||||||
// Info returns information about a committed object.
|
|
||||||
//
|
|
||||||
// This call can be used for getting the size of content and checking for
|
|
||||||
// existence.
|
|
||||||
rpc Info(InfoRequest) returns (InfoResponse);
|
|
||||||
|
|
||||||
// Update updates content metadata.
|
|
||||||
//
|
|
||||||
// This call can be used to manage the mutable content labels. The
|
|
||||||
// immutable metadata such as digest, size, and committed at cannot
|
|
||||||
// be updated.
|
|
||||||
rpc Update(UpdateRequest) returns (UpdateResponse);
|
|
||||||
|
|
||||||
// List streams the entire set of content as Info objects and closes the
|
|
||||||
// stream.
|
|
||||||
//
|
|
||||||
// Typically, this will yield a large response, chunked into messages.
|
|
||||||
// Clients should make provisions to ensure they can handle the entire data
|
|
||||||
// set.
|
|
||||||
rpc List(ListContentRequest) returns (stream ListContentResponse);
|
|
||||||
|
|
||||||
// Delete will delete the referenced object.
|
|
||||||
rpc Delete(DeleteContentRequest) returns (google.protobuf.Empty);
|
|
||||||
|
|
||||||
// Read allows one to read an object based on the offset into the content.
|
|
||||||
//
|
|
||||||
// The requested data may be returned in one or more messages.
|
|
||||||
rpc Read(ReadContentRequest) returns (stream ReadContentResponse);
|
|
||||||
|
|
||||||
// Status returns the status for a single reference.
|
|
||||||
rpc Status(StatusRequest) returns (StatusResponse);
|
|
||||||
|
|
||||||
// ListStatuses returns the status of ongoing object ingestions, started via
|
|
||||||
// Write.
|
|
||||||
//
|
|
||||||
// Only those matching the regular expression will be provided in the
|
|
||||||
// response. If the provided regular expression is empty, all ingestions
|
|
||||||
// will be provided.
|
|
||||||
rpc ListStatuses(ListStatusesRequest) returns (ListStatusesResponse);
|
|
||||||
|
|
||||||
// Write begins or resumes writes to a resource identified by a unique ref.
|
|
||||||
// Only one active stream may exist at a time for each ref.
|
|
||||||
//
|
|
||||||
// Once a write stream has started, it may only write to a single ref, thus
|
|
||||||
// once a stream is started, the ref may be omitted on subsequent writes.
|
|
||||||
//
|
|
||||||
// For any write transaction represented by a ref, only a single write may
|
|
||||||
// be made to a given offset. If overlapping writes occur, it is an error.
|
|
||||||
// Writes should be sequential and implementations may throw an error if
|
|
||||||
// this is required.
|
|
||||||
//
|
|
||||||
// If expected_digest is set and already part of the content store, the
|
|
||||||
// write will fail.
|
|
||||||
//
|
|
||||||
// When completed, the commit flag should be set to true. If expected size
|
|
||||||
// or digest is set, the content will be validated against those values.
|
|
||||||
rpc Write(stream WriteContentRequest) returns (stream WriteContentResponse);
|
|
||||||
|
|
||||||
// Abort cancels the ongoing write named in the request. Any resources
|
|
||||||
// associated with the write will be collected.
|
|
||||||
rpc Abort(AbortRequest) returns (google.protobuf.Empty);
|
|
||||||
}
|
|
||||||
|
|
||||||
message Info {
|
|
||||||
// Digest is the hash identity of the blob.
|
|
||||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// Size is the total number of bytes in the blob.
|
|
||||||
int64 size = 2;
|
|
||||||
|
|
||||||
// CreatedAt provides the time at which the blob was committed.
|
|
||||||
google.protobuf.Timestamp created_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// UpdatedAt provides the time the info was last updated.
|
|
||||||
google.protobuf.Timestamp updated_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// Labels are arbitrary data on snapshots.
|
|
||||||
//
|
|
||||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
|
||||||
map<string, string> labels = 5;
|
|
||||||
}
|
|
||||||
|
|
||||||
message InfoRequest {
|
|
||||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message InfoResponse {
|
|
||||||
Info info = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message UpdateRequest {
|
|
||||||
Info info = 1 [(gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// UpdateMask specifies which fields to perform the update on. If empty,
|
|
||||||
// the operation applies to all fields.
|
|
||||||
//
|
|
||||||
// In info, Digest, Size, and CreatedAt are immutable,
|
|
||||||
// other field may be updated using this mask.
|
|
||||||
// If no mask is provided, all mutable field are updated.
|
|
||||||
google.protobuf.FieldMask update_mask = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message UpdateResponse {
|
|
||||||
Info info = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListContentRequest {
|
|
||||||
// Filters contains one or more filters using the syntax defined in the
|
|
||||||
// containerd filter package.
|
|
||||||
//
|
|
||||||
// The returned result will be those that match any of the provided
|
|
||||||
// filters. Expanded, containers that match the following will be
|
|
||||||
// returned:
|
|
||||||
//
|
|
||||||
// filters[0] or filters[1] or ... or filters[n-1] or filters[n]
|
|
||||||
//
|
|
||||||
// If filters is zero-length or nil, all items will be returned.
|
|
||||||
repeated string filters = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListContentResponse {
|
|
||||||
repeated Info info = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message DeleteContentRequest {
|
|
||||||
// Digest specifies which content to delete.
|
|
||||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadContentRequest defines the fields that make up a request to read a portion of
|
|
||||||
// data from a stored object.
|
|
||||||
message ReadContentRequest {
|
|
||||||
// Digest is the hash identity to read.
|
|
||||||
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// Offset specifies the number of bytes from the start at which to begin
|
|
||||||
// the read. If zero or less, the read will be from the start. This uses
|
|
||||||
// standard zero-indexed semantics.
|
|
||||||
int64 offset = 2;
|
|
||||||
|
|
||||||
// size is the total size of the read. If zero, the entire blob will be
|
|
||||||
// returned by the service.
|
|
||||||
int64 size = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadContentResponse carries byte data for a read request.
|
|
||||||
message ReadContentResponse {
|
|
||||||
int64 offset = 1; // offset of the returned data
|
|
||||||
bytes data = 2; // actual data
|
|
||||||
}
|
|
||||||
|
|
||||||
message Status {
|
|
||||||
google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
string ref = 3;
|
|
||||||
int64 offset = 4;
|
|
||||||
int64 total = 5;
|
|
||||||
string expected = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
message StatusRequest {
|
|
||||||
string ref = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message StatusResponse {
|
|
||||||
Status status = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListStatusesRequest {
|
|
||||||
repeated string filters = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message ListStatusesResponse {
|
|
||||||
repeated Status statuses = 1 [(gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteAction defines the behavior of a WriteRequest.
|
|
||||||
enum WriteAction {
|
|
||||||
option (gogoproto.goproto_enum_prefix) = false;
|
|
||||||
option (gogoproto.enum_customname) = "WriteAction";
|
|
||||||
|
|
||||||
// WriteActionStat instructs the writer to return the current status while
|
|
||||||
// holding the lock on the write.
|
|
||||||
STAT = 0 [(gogoproto.enumvalue_customname) = "WriteActionStat"];
|
|
||||||
|
|
||||||
// WriteActionWrite sets the action for the write request to write data.
|
|
||||||
//
|
|
||||||
// Any data included will be written at the provided offset. The
|
|
||||||
// transaction will be left open for further writes.
|
|
||||||
//
|
|
||||||
// This is the default.
|
|
||||||
WRITE = 1 [(gogoproto.enumvalue_customname) = "WriteActionWrite"];
|
|
||||||
|
|
||||||
// WriteActionCommit will write any outstanding data in the message and
|
|
||||||
// commit the write, storing it under the digest.
|
|
||||||
//
|
|
||||||
// This can be used in a single message to send the data, verify it and
|
|
||||||
// commit it.
|
|
||||||
//
|
|
||||||
// This action will always terminate the write.
|
|
||||||
COMMIT = 2 [(gogoproto.enumvalue_customname) = "WriteActionCommit"];
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteContentRequest writes data to the request ref at offset.
|
|
||||||
message WriteContentRequest {
|
|
||||||
// Action sets the behavior of the write.
|
|
||||||
//
|
|
||||||
// When this is a write and the ref is not yet allocated, the ref will be
|
|
||||||
// allocated and the data will be written at offset.
|
|
||||||
//
|
|
||||||
// If the action is write and the ref is allocated, it will accept data to
|
|
||||||
// an offset that has not yet been written.
|
|
||||||
//
|
|
||||||
// If the action is write and there is no data, the current write status
|
|
||||||
// will be returned. This works differently from status because the stream
|
|
||||||
// holds a lock.
|
|
||||||
WriteAction action = 1;
|
|
||||||
|
|
||||||
// Ref identifies the pre-commit object to write to.
|
|
||||||
string ref = 2;
|
|
||||||
|
|
||||||
// Total can be set to have the service validate the total size of the
|
|
||||||
// committed content.
|
|
||||||
//
|
|
||||||
// The latest value before or with the commit action message will be use to
|
|
||||||
// validate the content. If the offset overflows total, the service may
|
|
||||||
// report an error. It is only required on one message for the write.
|
|
||||||
//
|
|
||||||
// If the value is zero or less, no validation of the final content will be
|
|
||||||
// performed.
|
|
||||||
int64 total = 3;
|
|
||||||
|
|
||||||
// Expected can be set to have the service validate the final content against
|
|
||||||
// the provided digest.
|
|
||||||
//
|
|
||||||
// If the digest is already present in the object store, an AlreadyExists
|
|
||||||
// error will be returned.
|
|
||||||
//
|
|
||||||
// Only the latest version will be used to check the content against the
|
|
||||||
// digest. It is only required to include it on a single message, before or
|
|
||||||
// with the commit action message.
|
|
||||||
string expected = 4 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// Offset specifies the number of bytes from the start at which to begin
|
|
||||||
// the write. For most implementations, this means from the start of the
|
|
||||||
// file. This uses standard, zero-indexed semantics.
|
|
||||||
//
|
|
||||||
// If the action is write, the remote may remove all previously written
|
|
||||||
// data after the offset. Implementations may support arbitrary offsets but
|
|
||||||
// MUST support reseting this value to zero with a write. If an
|
|
||||||
// implementation does not support a write at a particular offset, an
|
|
||||||
// OutOfRange error must be returned.
|
|
||||||
int64 offset = 5;
|
|
||||||
|
|
||||||
// Data is the actual bytes to be written.
|
|
||||||
//
|
|
||||||
// If this is empty and the message is not a commit, a response will be
|
|
||||||
// returned with the current write state.
|
|
||||||
bytes data = 6;
|
|
||||||
|
|
||||||
// Labels are arbitrary data on snapshots.
|
|
||||||
//
|
|
||||||
// The combined size of a key/value pair cannot exceed 4096 bytes.
|
|
||||||
map<string, string> labels = 7;
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteContentResponse is returned on the culmination of a write call.
|
|
||||||
message WriteContentResponse {
|
|
||||||
// Action contains the action for the final message of the stream. A writer
|
|
||||||
// should confirm that they match the intended result.
|
|
||||||
WriteAction action = 1;
|
|
||||||
|
|
||||||
// StartedAt provides the time at which the write began.
|
|
||||||
//
|
|
||||||
// This must be set for stat and commit write actions. All other write
|
|
||||||
// actions may omit this.
|
|
||||||
google.protobuf.Timestamp started_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// UpdatedAt provides the last time of a successful write.
|
|
||||||
//
|
|
||||||
// This must be set for stat and commit write actions. All other write
|
|
||||||
// actions may omit this.
|
|
||||||
google.protobuf.Timestamp updated_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
|
||||||
|
|
||||||
// Offset is the current committed size for the write.
|
|
||||||
int64 offset = 4;
|
|
||||||
|
|
||||||
// Total provides the current, expected total size of the write.
|
|
||||||
//
|
|
||||||
// We include this to provide consistency with the Status structure on the
|
|
||||||
// client writer.
|
|
||||||
//
|
|
||||||
// This is only valid on the Stat and Commit response.
|
|
||||||
int64 total = 5;
|
|
||||||
|
|
||||||
// Digest, if present, includes the digest up to the currently committed
|
|
||||||
// bytes. If action is commit, this field will be set. It is implementation
|
|
||||||
// defined if this is set for other actions.
|
|
||||||
string digest = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
|
||||||
}
|
|
||||||
|
|
||||||
message AbortRequest {
|
|
||||||
string ref = 1;
|
|
||||||
}
|
|
|
@ -1,52 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package content
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/filters"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AdaptInfo returns `filters.Adaptor` that handles `content.Info`.
|
|
||||||
func AdaptInfo(info Info) filters.Adaptor {
|
|
||||||
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
|
|
||||||
if len(fieldpath) == 0 {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
switch fieldpath[0] {
|
|
||||||
case "digest":
|
|
||||||
return info.Digest.String(), true
|
|
||||||
case "size":
|
|
||||||
// TODO: support size based filtering
|
|
||||||
case "labels":
|
|
||||||
return checkMap(fieldpath[1:], info.Labels)
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", false
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkMap(fieldpath []string, m map[string]string) (string, bool) {
|
|
||||||
if len(m) == 0 {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
value, ok := m[strings.Join(fieldpath, ".")]
|
|
||||||
return value, ok
|
|
||||||
}
|
|
|
@ -1,182 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package content
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/opencontainers/go-digest"
|
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer
|
|
||||||
type ReaderAt interface {
|
|
||||||
io.ReaderAt
|
|
||||||
io.Closer
|
|
||||||
Size() int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// Provider provides a reader interface for specific content
|
|
||||||
type Provider interface {
|
|
||||||
// ReaderAt only requires desc.Digest to be set.
|
|
||||||
// Other fields in the descriptor may be used internally for resolving
|
|
||||||
// the location of the actual data.
|
|
||||||
ReaderAt(ctx context.Context, desc ocispec.Descriptor) (ReaderAt, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ingester writes content
|
|
||||||
type Ingester interface {
|
|
||||||
// Some implementations require WithRef to be included in opts.
|
|
||||||
Writer(ctx context.Context, opts ...WriterOpt) (Writer, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Info holds content specific information
|
|
||||||
//
|
|
||||||
// TODO(stevvooe): Consider a very different name for this struct. Info is way
|
|
||||||
// to general. It also reads very weird in certain context, like pluralization.
|
|
||||||
type Info struct {
|
|
||||||
Digest digest.Digest
|
|
||||||
Size int64
|
|
||||||
CreatedAt time.Time
|
|
||||||
UpdatedAt time.Time
|
|
||||||
Labels map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Status of a content operation
|
|
||||||
type Status struct {
|
|
||||||
Ref string
|
|
||||||
Offset int64
|
|
||||||
Total int64
|
|
||||||
Expected digest.Digest
|
|
||||||
StartedAt time.Time
|
|
||||||
UpdatedAt time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
// WalkFunc defines the callback for a blob walk.
|
|
||||||
type WalkFunc func(Info) error
|
|
||||||
|
|
||||||
// Manager provides methods for inspecting, listing and removing content.
|
|
||||||
type Manager interface {
|
|
||||||
// Info will return metadata about content available in the content store.
|
|
||||||
//
|
|
||||||
// If the content is not present, ErrNotFound will be returned.
|
|
||||||
Info(ctx context.Context, dgst digest.Digest) (Info, error)
|
|
||||||
|
|
||||||
// Update updates mutable information related to content.
|
|
||||||
// If one or more fieldpaths are provided, only those
|
|
||||||
// fields will be updated.
|
|
||||||
// Mutable fields:
|
|
||||||
// labels.*
|
|
||||||
Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error)
|
|
||||||
|
|
||||||
// Walk will call fn for each item in the content store which
|
|
||||||
// match the provided filters. If no filters are given all
|
|
||||||
// items will be walked.
|
|
||||||
Walk(ctx context.Context, fn WalkFunc, filters ...string) error
|
|
||||||
|
|
||||||
// Delete removes the content from the store.
|
|
||||||
Delete(ctx context.Context, dgst digest.Digest) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// IngestManager provides methods for managing ingests.
|
|
||||||
type IngestManager interface {
|
|
||||||
// Status returns the status of the provided ref.
|
|
||||||
Status(ctx context.Context, ref string) (Status, error)
|
|
||||||
|
|
||||||
// ListStatuses returns the status of any active ingestions whose ref match the
|
|
||||||
// provided regular expression. If empty, all active ingestions will be
|
|
||||||
// returned.
|
|
||||||
ListStatuses(ctx context.Context, filters ...string) ([]Status, error)
|
|
||||||
|
|
||||||
// Abort completely cancels the ingest operation targeted by ref.
|
|
||||||
Abort(ctx context.Context, ref string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writer handles the write of content into a content store
|
|
||||||
type Writer interface {
|
|
||||||
// Close closes the writer, if the writer has not been
|
|
||||||
// committed this allows resuming or aborting.
|
|
||||||
// Calling Close on a closed writer will not error.
|
|
||||||
io.WriteCloser
|
|
||||||
|
|
||||||
// Digest may return empty digest or panics until committed.
|
|
||||||
Digest() digest.Digest
|
|
||||||
|
|
||||||
// Commit commits the blob (but no roll-back is guaranteed on an error).
|
|
||||||
// size and expected can be zero-value when unknown.
|
|
||||||
// Commit always closes the writer, even on error.
|
|
||||||
// ErrAlreadyExists aborts the writer.
|
|
||||||
Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error
|
|
||||||
|
|
||||||
// Status returns the current state of write
|
|
||||||
Status() (Status, error)
|
|
||||||
|
|
||||||
// Truncate updates the size of the target blob
|
|
||||||
Truncate(size int64) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store combines the methods of content-oriented interfaces into a set that
|
|
||||||
// are commonly provided by complete implementations.
|
|
||||||
type Store interface {
|
|
||||||
Manager
|
|
||||||
Provider
|
|
||||||
IngestManager
|
|
||||||
Ingester
|
|
||||||
}
|
|
||||||
|
|
||||||
// Opt is used to alter the mutable properties of content
|
|
||||||
type Opt func(*Info) error
|
|
||||||
|
|
||||||
// WithLabels allows labels to be set on content
|
|
||||||
func WithLabels(labels map[string]string) Opt {
|
|
||||||
return func(info *Info) error {
|
|
||||||
info.Labels = labels
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriterOpts is internally used by WriterOpt.
|
|
||||||
type WriterOpts struct {
|
|
||||||
Ref string
|
|
||||||
Desc ocispec.Descriptor
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriterOpt is used for passing options to Ingester.Writer.
|
|
||||||
type WriterOpt func(*WriterOpts) error
|
|
||||||
|
|
||||||
// WithDescriptor specifies an OCI descriptor.
|
|
||||||
// Writer may optionally use the descriptor internally for resolving
|
|
||||||
// the location of the actual data.
|
|
||||||
// Write does not require any field of desc to be set.
|
|
||||||
// If the data size is unknown, desc.Size should be set to 0.
|
|
||||||
// Some implementations may also accept negative values as "unknown".
|
|
||||||
func WithDescriptor(desc ocispec.Descriptor) WriterOpt {
|
|
||||||
return func(opts *WriterOpts) error {
|
|
||||||
opts.Desc = desc
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithRef specifies a ref string.
|
|
||||||
func WithRef(ref string) WriterOpt {
|
|
||||||
return func(opts *WriterOpts) error {
|
|
||||||
opts.Ref = ref
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,275 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package content
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
|
||||||
"github.com/opencontainers/go-digest"
|
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var bufPool = sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
buffer := make([]byte, 1<<20)
|
|
||||||
return &buffer
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReader returns a io.Reader from a ReaderAt
|
|
||||||
func NewReader(ra ReaderAt) io.Reader {
|
|
||||||
rd := io.NewSectionReader(ra, 0, ra.Size())
|
|
||||||
return rd
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadBlob retrieves the entire contents of the blob from the provider.
|
|
||||||
//
|
|
||||||
// Avoid using this for large blobs, such as layers.
|
|
||||||
func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ([]byte, error) {
|
|
||||||
ra, err := provider.ReaderAt(ctx, desc)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer ra.Close()
|
|
||||||
|
|
||||||
p := make([]byte, ra.Size())
|
|
||||||
|
|
||||||
n, err := ra.ReadAt(p, 0)
|
|
||||||
if err == io.EOF {
|
|
||||||
if int64(n) != ra.Size() {
|
|
||||||
err = io.ErrUnexpectedEOF
|
|
||||||
} else {
|
|
||||||
err = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return p, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteBlob writes data with the expected digest into the content store. If
|
|
||||||
// expected already exists, the method returns immediately and the reader will
|
|
||||||
// not be consumed.
|
|
||||||
//
|
|
||||||
// This is useful when the digest and size are known beforehand.
|
|
||||||
//
|
|
||||||
// Copy is buffered, so no need to wrap reader in buffered io.
|
|
||||||
func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc ocispec.Descriptor, opts ...Opt) error {
|
|
||||||
cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc))
|
|
||||||
if err != nil {
|
|
||||||
if !errdefs.IsAlreadyExists(err) {
|
|
||||||
return errors.Wrap(err, "failed to open writer")
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil // all ready present
|
|
||||||
}
|
|
||||||
defer cw.Close()
|
|
||||||
|
|
||||||
return Copy(ctx, cw, r, desc.Size, desc.Digest, opts...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenWriter opens a new writer for the given reference, retrying if the writer
|
|
||||||
// is locked until the reference is available or returns an error.
|
|
||||||
func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, error) {
|
|
||||||
var (
|
|
||||||
cw Writer
|
|
||||||
err error
|
|
||||||
retry = 16
|
|
||||||
)
|
|
||||||
for {
|
|
||||||
cw, err = cs.Writer(ctx, opts...)
|
|
||||||
if err != nil {
|
|
||||||
if !errdefs.IsUnavailable(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Check status to determine if the writer is active,
|
|
||||||
// continue waiting while active, otherwise return lock
|
|
||||||
// error or abort. Requires asserting for an ingest manager
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-time.After(time.Millisecond * time.Duration(rand.Intn(retry))):
|
|
||||||
if retry < 2048 {
|
|
||||||
retry = retry << 1
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
case <-ctx.Done():
|
|
||||||
// Propagate lock error
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
return cw, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Copy copies data with the expected digest from the reader into the
|
|
||||||
// provided content store writer. This copy commits the writer.
|
|
||||||
//
|
|
||||||
// This is useful when the digest and size are known beforehand. When
|
|
||||||
// the size or digest is unknown, these values may be empty.
|
|
||||||
//
|
|
||||||
// Copy is buffered, so no need to wrap reader in buffered io.
|
|
||||||
func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
|
|
||||||
ws, err := cw.Status()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "failed to get status")
|
|
||||||
}
|
|
||||||
|
|
||||||
if ws.Offset > 0 {
|
|
||||||
r, err = seekReader(r, ws.Offset, size)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := copyWithBuffer(cw, r); err != nil {
|
|
||||||
return errors.Wrap(err, "failed to copy")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := cw.Commit(ctx, size, expected, opts...); err != nil {
|
|
||||||
if !errdefs.IsAlreadyExists(err) {
|
|
||||||
return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyReaderAt copies to a writer from a given reader at for the given
|
|
||||||
// number of bytes. This copy does not commit the writer.
|
|
||||||
func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error {
|
|
||||||
ws, err := cw.Status()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyReader copies to a writer from a given reader, returning
|
|
||||||
// the number of bytes copied.
|
|
||||||
// Note: if the writer has a non-zero offset, the total number
|
|
||||||
// of bytes read may be greater than those copied if the reader
|
|
||||||
// is not an io.Seeker.
|
|
||||||
// This copy does not commit the writer.
|
|
||||||
func CopyReader(cw Writer, r io.Reader) (int64, error) {
|
|
||||||
ws, err := cw.Status()
|
|
||||||
if err != nil {
|
|
||||||
return 0, errors.Wrap(err, "failed to get status")
|
|
||||||
}
|
|
||||||
|
|
||||||
if ws.Offset > 0 {
|
|
||||||
r, err = seekReader(r, ws.Offset, 0)
|
|
||||||
if err != nil {
|
|
||||||
return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return copyWithBuffer(cw, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// seekReader attempts to seek the reader to the given offset, either by
|
|
||||||
// resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding
|
|
||||||
// up to the given offset.
|
|
||||||
func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
|
|
||||||
// attempt to resolve r as a seeker and setup the offset.
|
|
||||||
seeker, ok := r.(io.Seeker)
|
|
||||||
if ok {
|
|
||||||
nn, err := seeker.Seek(offset, io.SeekStart)
|
|
||||||
if nn != offset {
|
|
||||||
return nil, errors.Wrapf(err, "failed to seek to offset %v", offset)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ok, let's try io.ReaderAt!
|
|
||||||
readerAt, ok := r.(io.ReaderAt)
|
|
||||||
if ok && size > offset {
|
|
||||||
sr := io.NewSectionReader(readerAt, offset, size)
|
|
||||||
return sr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// well then, let's just discard up to the offset
|
|
||||||
n, err := copyWithBuffer(ioutil.Discard, io.LimitReader(r, offset))
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to discard to offset")
|
|
||||||
}
|
|
||||||
if n != offset {
|
|
||||||
return nil, errors.Errorf("unable to discard to offset")
|
|
||||||
}
|
|
||||||
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// copyWithBuffer is very similar to io.CopyBuffer https://golang.org/pkg/io/#CopyBuffer
|
|
||||||
// but instead of using Read to read from the src, we use ReadAtLeast to make sure we have
|
|
||||||
// a full buffer before we do a write operation to dst to reduce overheads associated
|
|
||||||
// with the write operations of small buffers.
|
|
||||||
func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) {
|
|
||||||
// If the reader has a WriteTo method, use it to do the copy.
|
|
||||||
// Avoids an allocation and a copy.
|
|
||||||
if wt, ok := src.(io.WriterTo); ok {
|
|
||||||
return wt.WriteTo(dst)
|
|
||||||
}
|
|
||||||
// Similarly, if the writer has a ReadFrom method, use it to do the copy.
|
|
||||||
if rt, ok := dst.(io.ReaderFrom); ok {
|
|
||||||
return rt.ReadFrom(src)
|
|
||||||
}
|
|
||||||
bufRef := bufPool.Get().(*[]byte)
|
|
||||||
defer bufPool.Put(bufRef)
|
|
||||||
buf := *bufRef
|
|
||||||
for {
|
|
||||||
nr, er := io.ReadAtLeast(src, buf, len(buf))
|
|
||||||
if nr > 0 {
|
|
||||||
nw, ew := dst.Write(buf[0:nr])
|
|
||||||
if nw > 0 {
|
|
||||||
written += int64(nw)
|
|
||||||
}
|
|
||||||
if ew != nil {
|
|
||||||
err = ew
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if nr != nw {
|
|
||||||
err = io.ErrShortWrite
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if er != nil {
|
|
||||||
// If an EOF happens after reading fewer than the requested bytes,
|
|
||||||
// ReadAtLeast returns ErrUnexpectedEOF.
|
|
||||||
if er != io.EOF && er != io.ErrUnexpectedEOF {
|
|
||||||
err = er
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
|
@ -1,56 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Handles locking references
|
|
||||||
|
|
||||||
type lock struct {
|
|
||||||
since time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// locks lets us lock in process
|
|
||||||
locks = make(map[string]*lock)
|
|
||||||
locksMu sync.Mutex
|
|
||||||
)
|
|
||||||
|
|
||||||
func tryLock(ref string) error {
|
|
||||||
locksMu.Lock()
|
|
||||||
defer locksMu.Unlock()
|
|
||||||
|
|
||||||
if v, ok := locks[ref]; ok {
|
|
||||||
return errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked since %s", ref, v.since)
|
|
||||||
}
|
|
||||||
|
|
||||||
locks[ref] = &lock{time.Now()}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func unlock(ref string) {
|
|
||||||
locksMu.Lock()
|
|
||||||
defer locksMu.Unlock()
|
|
||||||
|
|
||||||
delete(locks, ref)
|
|
||||||
}
|
|
|
@ -1,68 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/content"
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// readerat implements io.ReaderAt in a completely stateless manner by opening
|
|
||||||
// the referenced file for each call to ReadAt.
|
|
||||||
type sizeReaderAt struct {
|
|
||||||
size int64
|
|
||||||
fp *os.File
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenReader creates ReaderAt from a file
|
|
||||||
func OpenReader(p string) (content.ReaderAt, error) {
|
|
||||||
fi, err := os.Stat(p)
|
|
||||||
if err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
fp, err := os.Open(p)
|
|
||||||
if err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
return sizeReaderAt{size: fi.Size(), fp: fp}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra sizeReaderAt) ReadAt(p []byte, offset int64) (int, error) {
|
|
||||||
return ra.fp.ReadAt(p, offset)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra sizeReaderAt) Size() int64 {
|
|
||||||
return ra.size
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra sizeReaderAt) Close() error {
|
|
||||||
return ra.fp.Close()
|
|
||||||
}
|
|
|
@ -1,701 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"math/rand"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/content"
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
|
||||||
"github.com/containerd/containerd/filters"
|
|
||||||
"github.com/containerd/containerd/log"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
|
|
||||||
digest "github.com/opencontainers/go-digest"
|
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var bufPool = sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
buffer := make([]byte, 1<<20)
|
|
||||||
return &buffer
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// LabelStore is used to store mutable labels for digests
|
|
||||||
type LabelStore interface {
|
|
||||||
// Get returns all the labels for the given digest
|
|
||||||
Get(digest.Digest) (map[string]string, error)
|
|
||||||
|
|
||||||
// Set sets all the labels for a given digest
|
|
||||||
Set(digest.Digest, map[string]string) error
|
|
||||||
|
|
||||||
// Update replaces the given labels for a digest,
|
|
||||||
// a key with an empty value removes a label.
|
|
||||||
Update(digest.Digest, map[string]string) (map[string]string, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Store is digest-keyed store for content. All data written into the store is
|
|
||||||
// stored under a verifiable digest.
|
|
||||||
//
|
|
||||||
// Store can generally support multi-reader, single-writer ingest of data,
|
|
||||||
// including resumable ingest.
|
|
||||||
type store struct {
|
|
||||||
root string
|
|
||||||
ls LabelStore
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewStore returns a local content store
|
|
||||||
func NewStore(root string) (content.Store, error) {
|
|
||||||
return NewLabeledStore(root, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewLabeledStore returns a new content store using the provided label store
|
|
||||||
//
|
|
||||||
// Note: content stores which are used underneath a metadata store may not
|
|
||||||
// require labels and should use `NewStore`. `NewLabeledStore` is primarily
|
|
||||||
// useful for tests or standalone implementations.
|
|
||||||
func NewLabeledStore(root string, ls LabelStore) (content.Store, error) {
|
|
||||||
if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &store{
|
|
||||||
root: root,
|
|
||||||
ls: ls,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
|
|
||||||
p, err := s.blobPath(dgst)
|
|
||||||
if err != nil {
|
|
||||||
return content.Info{}, errors.Wrapf(err, "calculating blob info path")
|
|
||||||
}
|
|
||||||
|
|
||||||
fi, err := os.Stat(p)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
err = errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst)
|
|
||||||
}
|
|
||||||
|
|
||||||
return content.Info{}, err
|
|
||||||
}
|
|
||||||
var labels map[string]string
|
|
||||||
if s.ls != nil {
|
|
||||||
labels, err = s.ls.Get(dgst)
|
|
||||||
if err != nil {
|
|
||||||
return content.Info{}, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return s.info(dgst, fi, labels), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]string) content.Info {
|
|
||||||
return content.Info{
|
|
||||||
Digest: dgst,
|
|
||||||
Size: fi.Size(),
|
|
||||||
CreatedAt: fi.ModTime(),
|
|
||||||
UpdatedAt: getATime(fi),
|
|
||||||
Labels: labels,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReaderAt returns an io.ReaderAt for the blob.
|
|
||||||
func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
|
|
||||||
p, err := s.blobPath(desc.Digest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "calculating blob path for ReaderAt")
|
|
||||||
}
|
|
||||||
|
|
||||||
reader, err := OpenReader(p)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrapf(err, "blob %s expected at %s", desc.Digest, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
return reader, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes a blob by its digest.
|
|
||||||
//
|
|
||||||
// While this is safe to do concurrently, safe exist-removal logic must hold
|
|
||||||
// some global lock on the store.
|
|
||||||
func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
|
|
||||||
bp, err := s.blobPath(dgst)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrapf(err, "calculating blob path for delete")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.RemoveAll(bp); err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
|
|
||||||
if s.ls == nil {
|
|
||||||
return content.Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, "update not supported on immutable content store")
|
|
||||||
}
|
|
||||||
|
|
||||||
p, err := s.blobPath(info.Digest)
|
|
||||||
if err != nil {
|
|
||||||
return content.Info{}, errors.Wrapf(err, "calculating blob path for update")
|
|
||||||
}
|
|
||||||
|
|
||||||
fi, err := os.Stat(p)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
err = errors.Wrapf(errdefs.ErrNotFound, "content %v", info.Digest)
|
|
||||||
}
|
|
||||||
|
|
||||||
return content.Info{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
all bool
|
|
||||||
labels map[string]string
|
|
||||||
)
|
|
||||||
if len(fieldpaths) > 0 {
|
|
||||||
for _, path := range fieldpaths {
|
|
||||||
if strings.HasPrefix(path, "labels.") {
|
|
||||||
if labels == nil {
|
|
||||||
labels = map[string]string{}
|
|
||||||
}
|
|
||||||
|
|
||||||
key := strings.TrimPrefix(path, "labels.")
|
|
||||||
labels[key] = info.Labels[key]
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
switch path {
|
|
||||||
case "labels":
|
|
||||||
all = true
|
|
||||||
labels = info.Labels
|
|
||||||
default:
|
|
||||||
return content.Info{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on content info %q", path, info.Digest)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
all = true
|
|
||||||
labels = info.Labels
|
|
||||||
}
|
|
||||||
|
|
||||||
if all {
|
|
||||||
err = s.ls.Set(info.Digest, labels)
|
|
||||||
} else {
|
|
||||||
labels, err = s.ls.Update(info.Digest, labels)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return content.Info{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
info = s.info(info.Digest, fi, labels)
|
|
||||||
info.UpdatedAt = time.Now()
|
|
||||||
|
|
||||||
if err := os.Chtimes(p, info.UpdatedAt, info.CreatedAt); err != nil {
|
|
||||||
log.G(ctx).WithError(err).Warnf("could not change access time for %s", info.Digest)
|
|
||||||
}
|
|
||||||
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error {
|
|
||||||
root := filepath.Join(s.root, "blobs")
|
|
||||||
|
|
||||||
filter, err := filters.ParseAll(fs...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var alg digest.Algorithm
|
|
||||||
return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !fi.IsDir() && !alg.Available() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(stevvooe): There are few more cases with subdirs that should be
|
|
||||||
// handled in case the layout gets corrupted. This isn't strict enough
|
|
||||||
// and may spew bad data.
|
|
||||||
|
|
||||||
if path == root {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if filepath.Dir(path) == root {
|
|
||||||
alg = digest.Algorithm(filepath.Base(path))
|
|
||||||
|
|
||||||
if !alg.Available() {
|
|
||||||
alg = ""
|
|
||||||
return filepath.SkipDir
|
|
||||||
}
|
|
||||||
|
|
||||||
// descending into a hash directory
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path))
|
|
||||||
if err := dgst.Validate(); err != nil {
|
|
||||||
// log error but don't report
|
|
||||||
log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path")
|
|
||||||
// if we see this, it could mean some sort of corruption of the
|
|
||||||
// store or extra paths not expected previously.
|
|
||||||
}
|
|
||||||
|
|
||||||
var labels map[string]string
|
|
||||||
if s.ls != nil {
|
|
||||||
labels, err = s.ls.Get(dgst)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
info := s.info(dgst, fi, labels)
|
|
||||||
if !filter.Match(content.AdaptInfo(info)) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return fn(info)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) Status(ctx context.Context, ref string) (content.Status, error) {
|
|
||||||
return s.status(s.ingestRoot(ref))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) {
|
|
||||||
fp, err := os.Open(filepath.Join(s.root, "ingest"))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer fp.Close()
|
|
||||||
|
|
||||||
fis, err := fp.Readdir(-1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
filter, err := filters.ParseAll(fs...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var active []content.Status
|
|
||||||
for _, fi := range fis {
|
|
||||||
p := filepath.Join(s.root, "ingest", fi.Name())
|
|
||||||
stat, err := s.status(p)
|
|
||||||
if err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(stevvooe): This is a common error if uploads are being
|
|
||||||
// completed while making this listing. Need to consider taking a
|
|
||||||
// lock on the whole store to coordinate this aspect.
|
|
||||||
//
|
|
||||||
// Another option is to cleanup downloads asynchronously and
|
|
||||||
// coordinate this method with the cleanup process.
|
|
||||||
//
|
|
||||||
// For now, we just skip them, as they really don't exist.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if filter.Match(adaptStatus(stat)) {
|
|
||||||
active = append(active, stat)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return active, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// WalkStatusRefs is used to walk all status references
|
|
||||||
// Failed status reads will be logged and ignored, if
|
|
||||||
// this function is called while references are being altered,
|
|
||||||
// these error messages may be produced.
|
|
||||||
func (s *store) WalkStatusRefs(ctx context.Context, fn func(string) error) error {
|
|
||||||
fp, err := os.Open(filepath.Join(s.root, "ingest"))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
defer fp.Close()
|
|
||||||
|
|
||||||
fis, err := fp.Readdir(-1)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, fi := range fis {
|
|
||||||
rf := filepath.Join(s.root, "ingest", fi.Name(), "ref")
|
|
||||||
|
|
||||||
ref, err := readFileString(rf)
|
|
||||||
if err != nil {
|
|
||||||
log.G(ctx).WithError(err).WithField("path", rf).Error("failed to read ingest ref")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := fn(ref); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// status works like stat above except uses the path to the ingest.
|
|
||||||
func (s *store) status(ingestPath string) (content.Status, error) {
|
|
||||||
dp := filepath.Join(ingestPath, "data")
|
|
||||||
fi, err := os.Stat(dp)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
err = errors.Wrap(errdefs.ErrNotFound, err.Error())
|
|
||||||
}
|
|
||||||
return content.Status{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ref, err := readFileString(filepath.Join(ingestPath, "ref"))
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
err = errors.Wrap(errdefs.ErrNotFound, err.Error())
|
|
||||||
}
|
|
||||||
return content.Status{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat"))
|
|
||||||
if err != nil {
|
|
||||||
return content.Status{}, errors.Wrapf(err, "could not read startedat")
|
|
||||||
}
|
|
||||||
|
|
||||||
updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat"))
|
|
||||||
if err != nil {
|
|
||||||
return content.Status{}, errors.Wrapf(err, "could not read updatedat")
|
|
||||||
}
|
|
||||||
|
|
||||||
// because we don't write updatedat on every write, the mod time may
|
|
||||||
// actually be more up to date.
|
|
||||||
if fi.ModTime().After(updatedAt) {
|
|
||||||
updatedAt = fi.ModTime()
|
|
||||||
}
|
|
||||||
|
|
||||||
return content.Status{
|
|
||||||
Ref: ref,
|
|
||||||
Offset: fi.Size(),
|
|
||||||
Total: s.total(ingestPath),
|
|
||||||
UpdatedAt: updatedAt,
|
|
||||||
StartedAt: startedAt,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func adaptStatus(status content.Status) filters.Adaptor {
|
|
||||||
return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
|
|
||||||
if len(fieldpath) == 0 {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
switch fieldpath[0] {
|
|
||||||
case "ref":
|
|
||||||
return status.Ref, true
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", false
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// total attempts to resolve the total expected size for the write.
|
|
||||||
func (s *store) total(ingestPath string) int64 {
|
|
||||||
totalS, err := readFileString(filepath.Join(ingestPath, "total"))
|
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
total, err := strconv.ParseInt(totalS, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
// represents a corrupted file, should probably remove.
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
return total
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writer begins or resumes the active writer identified by ref. If the writer
|
|
||||||
// is already in use, an error is returned. Only one writer may be in use per
|
|
||||||
// ref at a time.
|
|
||||||
//
|
|
||||||
// The argument `ref` is used to uniquely identify a long-lived writer transaction.
|
|
||||||
func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
|
|
||||||
var wOpts content.WriterOpts
|
|
||||||
for _, opt := range opts {
|
|
||||||
if err := opt(&wOpts); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// TODO(AkihiroSuda): we could create a random string or one calculated based on the context
|
|
||||||
// https://github.com/containerd/containerd/issues/2129#issuecomment-380255019
|
|
||||||
if wOpts.Ref == "" {
|
|
||||||
return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty")
|
|
||||||
}
|
|
||||||
var lockErr error
|
|
||||||
for count := uint64(0); count < 10; count++ {
|
|
||||||
if err := tryLock(wOpts.Ref); err != nil {
|
|
||||||
if !errdefs.IsUnavailable(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
lockErr = err
|
|
||||||
} else {
|
|
||||||
lockErr = nil
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(time.Millisecond * time.Duration(rand.Intn(1<<count)))
|
|
||||||
}
|
|
||||||
|
|
||||||
if lockErr != nil {
|
|
||||||
return nil, lockErr
|
|
||||||
}
|
|
||||||
|
|
||||||
w, err := s.writer(ctx, wOpts.Ref, wOpts.Desc.Size, wOpts.Desc.Digest)
|
|
||||||
if err != nil {
|
|
||||||
unlock(wOpts.Ref)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return w, nil // lock is now held by w.
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) resumeStatus(ref string, total int64, digester digest.Digester) (content.Status, error) {
|
|
||||||
path, _, data := s.ingestPaths(ref)
|
|
||||||
status, err := s.status(path)
|
|
||||||
if err != nil {
|
|
||||||
return status, errors.Wrap(err, "failed reading status of resume write")
|
|
||||||
}
|
|
||||||
if ref != status.Ref {
|
|
||||||
// NOTE(stevvooe): This is fairly catastrophic. Either we have some
|
|
||||||
// layout corruption or a hash collision for the ref key.
|
|
||||||
return status, errors.Errorf("ref key does not match: %v != %v", ref, status.Ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
if total > 0 && status.Total > 0 && total != status.Total {
|
|
||||||
return status, errors.Errorf("provided total differs from status: %v != %v", total, status.Total)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes
|
|
||||||
fp, err := os.Open(data)
|
|
||||||
if err != nil {
|
|
||||||
return status, err
|
|
||||||
}
|
|
||||||
|
|
||||||
p := bufPool.Get().(*[]byte)
|
|
||||||
status.Offset, err = io.CopyBuffer(digester.Hash(), fp, *p)
|
|
||||||
bufPool.Put(p)
|
|
||||||
fp.Close()
|
|
||||||
return status, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// writer provides the main implementation of the Writer method. The caller
|
|
||||||
// must hold the lock correctly and release on error if there is a problem.
|
|
||||||
func (s *store) writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) {
|
|
||||||
// TODO(stevvooe): Need to actually store expected here. We have
|
|
||||||
// code in the service that shouldn't be dealing with this.
|
|
||||||
if expected != "" {
|
|
||||||
p, err := s.blobPath(expected)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "calculating expected blob path for writer")
|
|
||||||
}
|
|
||||||
if _, err := os.Stat(p); err == nil {
|
|
||||||
return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", expected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
path, refp, data := s.ingestPaths(ref)
|
|
||||||
|
|
||||||
var (
|
|
||||||
digester = digest.Canonical.Digester()
|
|
||||||
offset int64
|
|
||||||
startedAt time.Time
|
|
||||||
updatedAt time.Time
|
|
||||||
)
|
|
||||||
|
|
||||||
foundValidIngest := false
|
|
||||||
// ensure that the ingest path has been created.
|
|
||||||
if err := os.Mkdir(path, 0755); err != nil {
|
|
||||||
if !os.IsExist(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
status, err := s.resumeStatus(ref, total, digester)
|
|
||||||
if err == nil {
|
|
||||||
foundValidIngest = true
|
|
||||||
updatedAt = status.UpdatedAt
|
|
||||||
startedAt = status.StartedAt
|
|
||||||
total = status.Total
|
|
||||||
offset = status.Offset
|
|
||||||
} else {
|
|
||||||
logrus.Infof("failed to resume the status from path %s: %s. will recreate them", path, err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if !foundValidIngest {
|
|
||||||
startedAt = time.Now()
|
|
||||||
updatedAt = startedAt
|
|
||||||
|
|
||||||
// the ingest is new, we need to setup the target location.
|
|
||||||
// write the ref to a file for later use
|
|
||||||
if err := ioutil.WriteFile(refp, []byte(ref), 0666); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if total > 0 {
|
|
||||||
if err := ioutil.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to open data file")
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := fp.Seek(offset, io.SeekStart); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "could not seek to current write offset")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &writer{
|
|
||||||
s: s,
|
|
||||||
fp: fp,
|
|
||||||
ref: ref,
|
|
||||||
path: path,
|
|
||||||
offset: offset,
|
|
||||||
total: total,
|
|
||||||
digester: digester,
|
|
||||||
startedAt: startedAt,
|
|
||||||
updatedAt: updatedAt,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Abort an active transaction keyed by ref. If the ingest is active, it will
|
|
||||||
// be cancelled. Any resources associated with the ingest will be cleaned.
|
|
||||||
func (s *store) Abort(ctx context.Context, ref string) error {
|
|
||||||
root := s.ingestRoot(ref)
|
|
||||||
if err := os.RemoveAll(root); err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
return errors.Wrapf(errdefs.ErrNotFound, "ingest ref %q", ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) blobPath(dgst digest.Digest) (string, error) {
|
|
||||||
if err := dgst.Validate(); err != nil {
|
|
||||||
return "", errors.Wrapf(errdefs.ErrInvalidArgument, "cannot calculate blob path from invalid digest: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *store) ingestRoot(ref string) string {
|
|
||||||
// we take a digest of the ref to keep the ingest paths constant length.
|
|
||||||
// Note that this is not the current or potential digest of incoming content.
|
|
||||||
dgst := digest.FromString(ref)
|
|
||||||
return filepath.Join(s.root, "ingest", dgst.Hex())
|
|
||||||
}
|
|
||||||
|
|
||||||
// ingestPaths are returned. The paths are the following:
|
|
||||||
//
|
|
||||||
// - root: entire ingest directory
|
|
||||||
// - ref: name of the starting ref, must be unique
|
|
||||||
// - data: file where data is written
|
|
||||||
//
|
|
||||||
func (s *store) ingestPaths(ref string) (string, string, string) {
|
|
||||||
var (
|
|
||||||
fp = s.ingestRoot(ref)
|
|
||||||
rp = filepath.Join(fp, "ref")
|
|
||||||
dp = filepath.Join(fp, "data")
|
|
||||||
)
|
|
||||||
|
|
||||||
return fp, rp, dp
|
|
||||||
}
|
|
||||||
|
|
||||||
func readFileString(path string) (string, error) {
|
|
||||||
p, err := ioutil.ReadFile(path)
|
|
||||||
return string(p), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// readFileTimestamp reads a file with just a timestamp present.
|
|
||||||
func readFileTimestamp(p string) (time.Time, error) {
|
|
||||||
b, err := ioutil.ReadFile(p)
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
err = errors.Wrap(errdefs.ErrNotFound, err.Error())
|
|
||||||
}
|
|
||||||
return time.Time{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var t time.Time
|
|
||||||
if err := t.UnmarshalText(b); err != nil {
|
|
||||||
return time.Time{}, errors.Wrapf(err, "could not parse timestamp file %v", p)
|
|
||||||
}
|
|
||||||
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeTimestampFile(p string, t time.Time) error {
|
|
||||||
b, err := t.MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return atomicWrite(p, b, 0666)
|
|
||||||
}
|
|
||||||
|
|
||||||
func atomicWrite(path string, data []byte, mode os.FileMode) error {
|
|
||||||
tmp := fmt.Sprintf("%s.tmp", path)
|
|
||||||
f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "create tmp file")
|
|
||||||
}
|
|
||||||
_, err = f.Write(data)
|
|
||||||
f.Close()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "write atomic data")
|
|
||||||
}
|
|
||||||
return os.Rename(tmp, path)
|
|
||||||
}
|
|
|
@ -1,33 +0,0 @@
|
||||||
// +build darwin freebsd netbsd
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getATime(fi os.FileInfo) time.Time {
|
|
||||||
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
|
|
||||||
return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well.
|
|
||||||
}
|
|
||||||
|
|
||||||
return fi.ModTime()
|
|
||||||
}
|
|
33
vendor/github.com/containerd/containerd/content/local/store_openbsd.go
сгенерированный
поставляемый
33
vendor/github.com/containerd/containerd/content/local/store_openbsd.go
сгенерированный
поставляемый
|
@ -1,33 +0,0 @@
|
||||||
// +build openbsd
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getATime(fi os.FileInfo) time.Time {
|
|
||||||
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
|
|
||||||
return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well.
|
|
||||||
}
|
|
||||||
|
|
||||||
return fi.ModTime()
|
|
||||||
}
|
|
|
@ -1,33 +0,0 @@
|
||||||
// +build linux solaris
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getATime(fi os.FileInfo) time.Time {
|
|
||||||
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
|
|
||||||
return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well.
|
|
||||||
}
|
|
||||||
|
|
||||||
return fi.ModTime()
|
|
||||||
}
|
|
26
vendor/github.com/containerd/containerd/content/local/store_windows.go
сгенерированный
поставляемый
26
vendor/github.com/containerd/containerd/content/local/store_windows.go
сгенерированный
поставляемый
|
@ -1,26 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getATime(fi os.FileInfo) time.Time {
|
|
||||||
return fi.ModTime()
|
|
||||||
}
|
|
|
@ -1,207 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package local
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/content"
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
|
||||||
"github.com/containerd/containerd/log"
|
|
||||||
"github.com/opencontainers/go-digest"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// writer represents a write transaction against the blob store.
|
|
||||||
type writer struct {
|
|
||||||
s *store
|
|
||||||
fp *os.File // opened data file
|
|
||||||
path string // path to writer dir
|
|
||||||
ref string // ref key
|
|
||||||
offset int64
|
|
||||||
total int64
|
|
||||||
digester digest.Digester
|
|
||||||
startedAt time.Time
|
|
||||||
updatedAt time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Status() (content.Status, error) {
|
|
||||||
return content.Status{
|
|
||||||
Ref: w.ref,
|
|
||||||
Offset: w.offset,
|
|
||||||
Total: w.total,
|
|
||||||
StartedAt: w.startedAt,
|
|
||||||
UpdatedAt: w.updatedAt,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Digest returns the current digest of the content, up to the current write.
|
|
||||||
//
|
|
||||||
// Cannot be called concurrently with `Write`.
|
|
||||||
func (w *writer) Digest() digest.Digest {
|
|
||||||
return w.digester.Digest()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write p to the transaction.
|
|
||||||
//
|
|
||||||
// Note that writes are unbuffered to the backing file. When writing, it is
|
|
||||||
// recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer.
|
|
||||||
func (w *writer) Write(p []byte) (n int, err error) {
|
|
||||||
n, err = w.fp.Write(p)
|
|
||||||
w.digester.Hash().Write(p[:n])
|
|
||||||
w.offset += int64(len(p))
|
|
||||||
w.updatedAt = time.Now()
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
|
|
||||||
// Ensure even on error the writer is fully closed
|
|
||||||
defer unlock(w.ref)
|
|
||||||
|
|
||||||
var base content.Info
|
|
||||||
for _, opt := range opts {
|
|
||||||
if err := opt(&base); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fp := w.fp
|
|
||||||
w.fp = nil
|
|
||||||
|
|
||||||
if fp == nil {
|
|
||||||
return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := fp.Sync(); err != nil {
|
|
||||||
fp.Close()
|
|
||||||
return errors.Wrap(err, "sync failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
fi, err := fp.Stat()
|
|
||||||
closeErr := fp.Close()
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "stat on ingest file failed")
|
|
||||||
}
|
|
||||||
if closeErr != nil {
|
|
||||||
return errors.Wrap(err, "failed to close ingest file")
|
|
||||||
}
|
|
||||||
|
|
||||||
if size > 0 && size != fi.Size() {
|
|
||||||
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fi.Size(), size)
|
|
||||||
}
|
|
||||||
|
|
||||||
dgst := w.digester.Digest()
|
|
||||||
if expected != "" && expected != dgst {
|
|
||||||
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
ingest = filepath.Join(w.path, "data")
|
|
||||||
target, _ = w.s.blobPath(dgst) // ignore error because we calculated this dgst
|
|
||||||
)
|
|
||||||
|
|
||||||
// make sure parent directories of blob exist
|
|
||||||
if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := os.Stat(target); err == nil {
|
|
||||||
// collision with the target file!
|
|
||||||
if err := os.RemoveAll(w.path); err != nil {
|
|
||||||
log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory")
|
|
||||||
}
|
|
||||||
return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", dgst)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := os.Rename(ingest, target); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ingest has now been made available in the content store, attempt to complete
|
|
||||||
// setting metadata but errors should only be logged and not returned since
|
|
||||||
// the content store cannot be cleanly rolled back.
|
|
||||||
|
|
||||||
commitTime := time.Now()
|
|
||||||
if err := os.Chtimes(target, commitTime, commitTime); err != nil {
|
|
||||||
log.G(ctx).WithField("digest", dgst).Errorf("failed to change file time to commit time")
|
|
||||||
}
|
|
||||||
|
|
||||||
// clean up!!
|
|
||||||
if err := os.RemoveAll(w.path); err != nil {
|
|
||||||
log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory")
|
|
||||||
}
|
|
||||||
|
|
||||||
if w.s.ls != nil && base.Labels != nil {
|
|
||||||
if err := w.s.ls.Set(dgst, base.Labels); err != nil {
|
|
||||||
log.G(ctx).WithField("digest", dgst).Errorf("failed to set labels")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// change to readonly, more important for read, but provides _some_
|
|
||||||
// protection from this point on. We use the existing perms with a mask
|
|
||||||
// only allowing reads honoring the umask on creation.
|
|
||||||
//
|
|
||||||
// This removes write and exec, only allowing read per the creation umask.
|
|
||||||
//
|
|
||||||
// NOTE: Windows does not support this operation
|
|
||||||
if runtime.GOOS != "windows" {
|
|
||||||
if err := os.Chmod(target, (fi.Mode()&os.ModePerm)&^0333); err != nil {
|
|
||||||
log.G(ctx).WithField("ref", w.ref).Errorf("failed to make readonly")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close the writer, flushing any unwritten data and leaving the progress in
|
|
||||||
// tact.
|
|
||||||
//
|
|
||||||
// If one needs to resume the transaction, a new writer can be obtained from
|
|
||||||
// `Ingester.Writer` using the same key. The write can then be continued
|
|
||||||
// from it was left off.
|
|
||||||
//
|
|
||||||
// To abandon a transaction completely, first call close then `IngestManager.Abort` to
|
|
||||||
// clean up the associated resources.
|
|
||||||
func (w *writer) Close() (err error) {
|
|
||||||
if w.fp != nil {
|
|
||||||
w.fp.Sync()
|
|
||||||
err = w.fp.Close()
|
|
||||||
writeTimestampFile(filepath.Join(w.path, "updatedat"), w.updatedAt)
|
|
||||||
w.fp = nil
|
|
||||||
unlock(w.ref)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *writer) Truncate(size int64) error {
|
|
||||||
if size != 0 {
|
|
||||||
return errors.New("Truncate: unsupported size")
|
|
||||||
}
|
|
||||||
w.offset = 0
|
|
||||||
w.digester.Hash().Reset()
|
|
||||||
if _, err := w.fp.Seek(0, io.SeekStart); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return w.fp.Truncate(0)
|
|
||||||
}
|
|
71
vendor/github.com/containerd/containerd/content/proxy/content_reader.go
сгенерированный
поставляемый
71
vendor/github.com/containerd/containerd/content/proxy/content_reader.go
сгенерированный
поставляемый
|
@ -1,71 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package proxy
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
|
||||||
digest "github.com/opencontainers/go-digest"
|
|
||||||
)
|
|
||||||
|
|
||||||
type remoteReaderAt struct {
|
|
||||||
ctx context.Context
|
|
||||||
digest digest.Digest
|
|
||||||
size int64
|
|
||||||
client contentapi.ContentClient
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *remoteReaderAt) Size() int64 {
|
|
||||||
return ra.size
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *remoteReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
|
|
||||||
rr := &contentapi.ReadContentRequest{
|
|
||||||
Digest: ra.digest,
|
|
||||||
Offset: off,
|
|
||||||
Size_: int64(len(p)),
|
|
||||||
}
|
|
||||||
// we need a child context with cancel, or the eventually called
|
|
||||||
// grpc.NewStream will leak the goroutine until the whole thing is cleared.
|
|
||||||
// See comment at https://godoc.org/google.golang.org/grpc#ClientConn.NewStream
|
|
||||||
childCtx, cancel := context.WithCancel(ra.ctx)
|
|
||||||
// we MUST cancel the child context; see comment above
|
|
||||||
defer cancel()
|
|
||||||
rc, err := ra.client.Read(childCtx, rr)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for len(p) > 0 {
|
|
||||||
var resp *contentapi.ReadContentResponse
|
|
||||||
// fill our buffer up until we can fill p.
|
|
||||||
resp, err = rc.Recv()
|
|
||||||
if err != nil {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
copied := copy(p, resp.Data)
|
|
||||||
n += copied
|
|
||||||
p = p[copied:]
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ra *remoteReaderAt) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
234
vendor/github.com/containerd/containerd/content/proxy/content_store.go
сгенерированный
поставляемый
234
vendor/github.com/containerd/containerd/content/proxy/content_store.go
сгенерированный
поставляемый
|
@ -1,234 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package proxy
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
|
||||||
"github.com/containerd/containerd/content"
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
|
||||||
protobuftypes "github.com/gogo/protobuf/types"
|
|
||||||
digest "github.com/opencontainers/go-digest"
|
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
type proxyContentStore struct {
|
|
||||||
client contentapi.ContentClient
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewContentStore returns a new content store which communicates over a GRPC
|
|
||||||
// connection using the containerd content GRPC API.
|
|
||||||
func NewContentStore(client contentapi.ContentClient) content.Store {
|
|
||||||
return &proxyContentStore{
|
|
||||||
client: client,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pcs *proxyContentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
|
|
||||||
resp, err := pcs.client.Info(ctx, &contentapi.InfoRequest{
|
|
||||||
Digest: dgst,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return content.Info{}, errdefs.FromGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return infoFromGRPC(resp.Info), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pcs *proxyContentStore) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error {
|
|
||||||
session, err := pcs.client.List(ctx, &contentapi.ListContentRequest{
|
|
||||||
Filters: filters,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return errdefs.FromGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
msg, err := session.Recv()
|
|
||||||
if err != nil {
|
|
||||||
if err != io.EOF {
|
|
||||||
return errdefs.FromGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, info := range msg.Info {
|
|
||||||
if err := fn(infoFromGRPC(info)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pcs *proxyContentStore) Delete(ctx context.Context, dgst digest.Digest) error {
|
|
||||||
if _, err := pcs.client.Delete(ctx, &contentapi.DeleteContentRequest{
|
|
||||||
Digest: dgst,
|
|
||||||
}); err != nil {
|
|
||||||
return errdefs.FromGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReaderAt ignores MediaType.
|
|
||||||
func (pcs *proxyContentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
|
|
||||||
i, err := pcs.Info(ctx, desc.Digest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &remoteReaderAt{
|
|
||||||
ctx: ctx,
|
|
||||||
digest: desc.Digest,
|
|
||||||
size: i.Size,
|
|
||||||
client: pcs.client,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pcs *proxyContentStore) Status(ctx context.Context, ref string) (content.Status, error) {
|
|
||||||
resp, err := pcs.client.Status(ctx, &contentapi.StatusRequest{
|
|
||||||
Ref: ref,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return content.Status{}, errdefs.FromGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
status := resp.Status
|
|
||||||
return content.Status{
|
|
||||||
Ref: status.Ref,
|
|
||||||
StartedAt: status.StartedAt,
|
|
||||||
UpdatedAt: status.UpdatedAt,
|
|
||||||
Offset: status.Offset,
|
|
||||||
Total: status.Total,
|
|
||||||
Expected: status.Expected,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pcs *proxyContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
|
|
||||||
resp, err := pcs.client.Update(ctx, &contentapi.UpdateRequest{
|
|
||||||
Info: infoToGRPC(info),
|
|
||||||
UpdateMask: &protobuftypes.FieldMask{
|
|
||||||
Paths: fieldpaths,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return content.Info{}, errdefs.FromGRPC(err)
|
|
||||||
}
|
|
||||||
return infoFromGRPC(resp.Info), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pcs *proxyContentStore) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) {
|
|
||||||
resp, err := pcs.client.ListStatuses(ctx, &contentapi.ListStatusesRequest{
|
|
||||||
Filters: filters,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, errdefs.FromGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var statuses []content.Status
|
|
||||||
for _, status := range resp.Statuses {
|
|
||||||
statuses = append(statuses, content.Status{
|
|
||||||
Ref: status.Ref,
|
|
||||||
StartedAt: status.StartedAt,
|
|
||||||
UpdatedAt: status.UpdatedAt,
|
|
||||||
Offset: status.Offset,
|
|
||||||
Total: status.Total,
|
|
||||||
Expected: status.Expected,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return statuses, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writer ignores MediaType.
|
|
||||||
func (pcs *proxyContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
|
|
||||||
var wOpts content.WriterOpts
|
|
||||||
for _, opt := range opts {
|
|
||||||
if err := opt(&wOpts); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
wrclient, offset, err := pcs.negotiate(ctx, wOpts.Ref, wOpts.Desc.Size, wOpts.Desc.Digest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errdefs.FromGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &remoteWriter{
|
|
||||||
ref: wOpts.Ref,
|
|
||||||
client: wrclient,
|
|
||||||
offset: offset,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Abort implements asynchronous abort. It starts a new write session on the ref l
|
|
||||||
func (pcs *proxyContentStore) Abort(ctx context.Context, ref string) error {
|
|
||||||
if _, err := pcs.client.Abort(ctx, &contentapi.AbortRequest{
|
|
||||||
Ref: ref,
|
|
||||||
}); err != nil {
|
|
||||||
return errdefs.FromGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pcs *proxyContentStore) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) {
|
|
||||||
wrclient, err := pcs.client.Write(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := wrclient.Send(&contentapi.WriteContentRequest{
|
|
||||||
Action: contentapi.WriteActionStat,
|
|
||||||
Ref: ref,
|
|
||||||
Total: size,
|
|
||||||
Expected: expected,
|
|
||||||
}); err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := wrclient.Recv()
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return wrclient, resp.Offset, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func infoToGRPC(info content.Info) contentapi.Info {
|
|
||||||
return contentapi.Info{
|
|
||||||
Digest: info.Digest,
|
|
||||||
Size_: info.Size,
|
|
||||||
CreatedAt: info.CreatedAt,
|
|
||||||
UpdatedAt: info.UpdatedAt,
|
|
||||||
Labels: info.Labels,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func infoFromGRPC(info contentapi.Info) content.Info {
|
|
||||||
return content.Info{
|
|
||||||
Digest: info.Digest,
|
|
||||||
Size: info.Size_,
|
|
||||||
CreatedAt: info.CreatedAt,
|
|
||||||
UpdatedAt: info.UpdatedAt,
|
|
||||||
Labels: info.Labels,
|
|
||||||
}
|
|
||||||
}
|
|
146
vendor/github.com/containerd/containerd/content/proxy/content_writer.go
сгенерированный
поставляемый
146
vendor/github.com/containerd/containerd/content/proxy/content_writer.go
сгенерированный
поставляемый
|
@ -1,146 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package proxy
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
|
||||||
"github.com/containerd/containerd/content"
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
|
||||||
digest "github.com/opencontainers/go-digest"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
type remoteWriter struct {
|
|
||||||
ref string
|
|
||||||
client contentapi.Content_WriteClient
|
|
||||||
offset int64
|
|
||||||
digest digest.Digest
|
|
||||||
}
|
|
||||||
|
|
||||||
// send performs a synchronous req-resp cycle on the client.
|
|
||||||
func (rw *remoteWriter) send(req *contentapi.WriteContentRequest) (*contentapi.WriteContentResponse, error) {
|
|
||||||
if err := rw.client.Send(req); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := rw.client.Recv()
|
|
||||||
|
|
||||||
if err == nil {
|
|
||||||
// try to keep these in sync
|
|
||||||
if resp.Digest != "" {
|
|
||||||
rw.digest = resp.Digest
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return resp, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *remoteWriter) Status() (content.Status, error) {
|
|
||||||
resp, err := rw.send(&contentapi.WriteContentRequest{
|
|
||||||
Action: contentapi.WriteActionStat,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return content.Status{}, errors.Wrap(errdefs.FromGRPC(err), "error getting writer status")
|
|
||||||
}
|
|
||||||
|
|
||||||
return content.Status{
|
|
||||||
Ref: rw.ref,
|
|
||||||
Offset: resp.Offset,
|
|
||||||
Total: resp.Total,
|
|
||||||
StartedAt: resp.StartedAt,
|
|
||||||
UpdatedAt: resp.UpdatedAt,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *remoteWriter) Digest() digest.Digest {
|
|
||||||
return rw.digest
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *remoteWriter) Write(p []byte) (n int, err error) {
|
|
||||||
offset := rw.offset
|
|
||||||
|
|
||||||
resp, err := rw.send(&contentapi.WriteContentRequest{
|
|
||||||
Action: contentapi.WriteActionWrite,
|
|
||||||
Offset: offset,
|
|
||||||
Data: p,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return 0, errors.Wrap(errdefs.FromGRPC(err), "failed to send write")
|
|
||||||
}
|
|
||||||
|
|
||||||
n = int(resp.Offset - offset)
|
|
||||||
if n < len(p) {
|
|
||||||
err = io.ErrShortWrite
|
|
||||||
}
|
|
||||||
|
|
||||||
rw.offset += int64(n)
|
|
||||||
if resp.Digest != "" {
|
|
||||||
rw.digest = resp.Digest
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) (err error) {
|
|
||||||
defer func() {
|
|
||||||
err1 := rw.Close()
|
|
||||||
if err == nil {
|
|
||||||
err = err1
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
var base content.Info
|
|
||||||
for _, opt := range opts {
|
|
||||||
if err := opt(&base); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
resp, err := rw.send(&contentapi.WriteContentRequest{
|
|
||||||
Action: contentapi.WriteActionCommit,
|
|
||||||
Total: size,
|
|
||||||
Offset: rw.offset,
|
|
||||||
Expected: expected,
|
|
||||||
Labels: base.Labels,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(errdefs.FromGRPC(err), "commit failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
if size != 0 && resp.Offset != size {
|
|
||||||
return errors.Errorf("unexpected size: %v != %v", resp.Offset, size)
|
|
||||||
}
|
|
||||||
|
|
||||||
if expected != "" && resp.Digest != expected {
|
|
||||||
return errors.Errorf("unexpected digest: %v != %v", resp.Digest, expected)
|
|
||||||
}
|
|
||||||
|
|
||||||
rw.digest = resp.Digest
|
|
||||||
rw.offset = resp.Offset
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *remoteWriter) Truncate(size int64) error {
|
|
||||||
// This truncation won't actually be validated until a write is issued.
|
|
||||||
rw.offset = size
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *remoteWriter) Close() error {
|
|
||||||
return rw.client.CloseSend()
|
|
||||||
}
|
|
|
@ -1,32 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package defaults
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultMaxRecvMsgSize defines the default maximum message size for
|
|
||||||
// receiving protobufs passed over the GRPC API.
|
|
||||||
DefaultMaxRecvMsgSize = 16 << 20
|
|
||||||
// DefaultMaxSendMsgSize defines the default maximum message size for
|
|
||||||
// sending protobufs passed over the GRPC API.
|
|
||||||
DefaultMaxSendMsgSize = 16 << 20
|
|
||||||
// DefaultRuntimeNSLabel defines the namespace label to check for the
|
|
||||||
// default runtime
|
|
||||||
DefaultRuntimeNSLabel = "containerd.io/defaults/runtime"
|
|
||||||
// DefaultSnapshotterNSLabel defines the namespace label to check for the
|
|
||||||
// default snapshotter
|
|
||||||
DefaultSnapshotterNSLabel = "containerd.io/defaults/snapshotter"
|
|
||||||
)
|
|
|
@ -1,39 +0,0 @@
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package defaults
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultRootDir is the default location used by containerd to store
|
|
||||||
// persistent data
|
|
||||||
DefaultRootDir = "/var/lib/containerd"
|
|
||||||
// DefaultStateDir is the default location used by containerd to store
|
|
||||||
// transient data
|
|
||||||
DefaultStateDir = "/run/containerd"
|
|
||||||
// DefaultAddress is the default unix socket address
|
|
||||||
DefaultAddress = "/run/containerd/containerd.sock"
|
|
||||||
// DefaultDebugAddress is the default unix socket address for pprof data
|
|
||||||
DefaultDebugAddress = "/run/containerd/debug.sock"
|
|
||||||
// DefaultFIFODir is the default location used by client-side cio library
|
|
||||||
// to store FIFOs.
|
|
||||||
DefaultFIFODir = "/run/containerd/fifo"
|
|
||||||
// DefaultRuntime is the default linux runtime
|
|
||||||
DefaultRuntime = "io.containerd.runc.v2"
|
|
||||||
// DefaultConfigDir is the default location for config files.
|
|
||||||
DefaultConfigDir = "/etc/containerd"
|
|
||||||
)
|
|
48
vendor/github.com/containerd/containerd/defaults/defaults_windows.go
сгенерированный
поставляемый
48
vendor/github.com/containerd/containerd/defaults/defaults_windows.go
сгенерированный
поставляемый
|
@ -1,48 +0,0 @@
|
||||||
// +build windows
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package defaults
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// DefaultRootDir is the default location used by containerd to store
|
|
||||||
// persistent data
|
|
||||||
DefaultRootDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "root")
|
|
||||||
// DefaultStateDir is the default location used by containerd to store
|
|
||||||
// transient data
|
|
||||||
DefaultStateDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "state")
|
|
||||||
|
|
||||||
// DefaultConfigDir is the default location for config files.
|
|
||||||
DefaultConfigDir = filepath.Join(os.Getenv("programfiles"), "containerd")
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DefaultAddress is the default winpipe address
|
|
||||||
DefaultAddress = `\\.\pipe\containerd-containerd`
|
|
||||||
// DefaultDebugAddress is the default winpipe address for pprof data
|
|
||||||
DefaultDebugAddress = `\\.\pipe\containerd-debug`
|
|
||||||
// DefaultFIFODir is the default location used by client-side cio library
|
|
||||||
// to store FIFOs. Unused on Windows.
|
|
||||||
DefaultFIFODir = ""
|
|
||||||
// DefaultRuntime is the default windows runtime
|
|
||||||
DefaultRuntime = "io.containerd.runhcs.v1"
|
|
||||||
)
|
|
|
@ -1,19 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Package defaults provides several common defaults for interacting with
|
|
||||||
// containerd. These can be used on the client-side or server-side.
|
|
||||||
package defaults
|
|
|
@ -1,33 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package filters
|
|
||||||
|
|
||||||
// Adaptor specifies the mapping of fieldpaths to a type. For the given field
|
|
||||||
// path, the value and whether it is present should be returned. The mapping of
|
|
||||||
// the fieldpath to a field is deferred to the adaptor implementation, but
|
|
||||||
// should generally follow protobuf field path/mask semantics.
|
|
||||||
type Adaptor interface {
|
|
||||||
Field(fieldpath []string) (value string, present bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AdapterFunc allows implementation specific matching of fieldpaths
|
|
||||||
type AdapterFunc func(fieldpath []string) (string, bool)
|
|
||||||
|
|
||||||
// Field returns the field name and true if it exists
|
|
||||||
func (fn AdapterFunc) Field(fieldpath []string) (string, bool) {
|
|
||||||
return fn(fieldpath)
|
|
||||||
}
|
|
|
@ -1,179 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Package filters defines a syntax and parser that can be used for the
|
|
||||||
// filtration of items across the containerd API. The core is built on the
|
|
||||||
// concept of protobuf field paths, with quoting. Several operators allow the
|
|
||||||
// user to flexibly select items based on field presence, equality, inequality
|
|
||||||
// and regular expressions. Flexible adaptors support working with any type.
|
|
||||||
//
|
|
||||||
// The syntax is fairly familiar, if you've used container ecosystem
|
|
||||||
// projects. At the core, we base it on the concept of protobuf field
|
|
||||||
// paths, augmenting with the ability to quote portions of the field path
|
|
||||||
// to match arbitrary labels. These "selectors" come in the following
|
|
||||||
// syntax:
|
|
||||||
//
|
|
||||||
// ```
|
|
||||||
// <fieldpath>[<operator><value>]
|
|
||||||
// ```
|
|
||||||
//
|
|
||||||
// A basic example is as follows:
|
|
||||||
//
|
|
||||||
// ```
|
|
||||||
// name==foo
|
|
||||||
// ```
|
|
||||||
//
|
|
||||||
// This would match all objects that have a field `name` with the value
|
|
||||||
// `foo`. If we only want to test if the field is present, we can omit the
|
|
||||||
// operator. This is most useful for matching labels in containerd. The
|
|
||||||
// following will match objects that have the field "labels" and have the
|
|
||||||
// label "foo" defined:
|
|
||||||
//
|
|
||||||
// ```
|
|
||||||
// labels.foo
|
|
||||||
// ```
|
|
||||||
//
|
|
||||||
// We also allow for quoting of parts of the field path to allow matching
|
|
||||||
// of arbitrary items:
|
|
||||||
//
|
|
||||||
// ```
|
|
||||||
// labels."very complex label"==something
|
|
||||||
// ```
|
|
||||||
//
|
|
||||||
// We also define `!=` and `~=` as operators. The `!=` will match all
|
|
||||||
// objects that don't match the value for a field and `~=` will compile the
|
|
||||||
// target value as a regular expression and match the field value against that.
|
|
||||||
//
|
|
||||||
// Selectors can be combined using a comma, such that the resulting
|
|
||||||
// selector will require all selectors are matched for the object to match.
|
|
||||||
// The following example will match objects that are named `foo` and have
|
|
||||||
// the label `bar`:
|
|
||||||
//
|
|
||||||
// ```
|
|
||||||
// name==foo,labels.bar
|
|
||||||
// ```
|
|
||||||
//
|
|
||||||
package filters
|
|
||||||
|
|
||||||
import (
|
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Filter matches specific resources based the provided filter
|
|
||||||
type Filter interface {
|
|
||||||
Match(adaptor Adaptor) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilterFunc is a function that handles matching with an adaptor
|
|
||||||
type FilterFunc func(Adaptor) bool
|
|
||||||
|
|
||||||
// Match matches the FilterFunc returning true if the object matches the filter
|
|
||||||
func (fn FilterFunc) Match(adaptor Adaptor) bool {
|
|
||||||
return fn(adaptor)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Always is a filter that always returns true for any type of object
|
|
||||||
var Always FilterFunc = func(adaptor Adaptor) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Any allows multiple filters to be matched against the object
|
|
||||||
type Any []Filter
|
|
||||||
|
|
||||||
// Match returns true if any of the provided filters are true
|
|
||||||
func (m Any) Match(adaptor Adaptor) bool {
|
|
||||||
for _, m := range m {
|
|
||||||
if m.Match(adaptor) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// All allows multiple filters to be matched against the object
|
|
||||||
type All []Filter
|
|
||||||
|
|
||||||
// Match only returns true if all filters match the object
|
|
||||||
func (m All) Match(adaptor Adaptor) bool {
|
|
||||||
for _, m := range m {
|
|
||||||
if !m.Match(adaptor) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
type operator int
|
|
||||||
|
|
||||||
const (
|
|
||||||
operatorPresent = iota
|
|
||||||
operatorEqual
|
|
||||||
operatorNotEqual
|
|
||||||
operatorMatches
|
|
||||||
)
|
|
||||||
|
|
||||||
func (op operator) String() string {
|
|
||||||
switch op {
|
|
||||||
case operatorPresent:
|
|
||||||
return "?"
|
|
||||||
case operatorEqual:
|
|
||||||
return "=="
|
|
||||||
case operatorNotEqual:
|
|
||||||
return "!="
|
|
||||||
case operatorMatches:
|
|
||||||
return "~="
|
|
||||||
}
|
|
||||||
|
|
||||||
return "unknown"
|
|
||||||
}
|
|
||||||
|
|
||||||
type selector struct {
|
|
||||||
fieldpath []string
|
|
||||||
operator operator
|
|
||||||
value string
|
|
||||||
re *regexp.Regexp
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m selector) Match(adaptor Adaptor) bool {
|
|
||||||
value, present := adaptor.Field(m.fieldpath)
|
|
||||||
|
|
||||||
switch m.operator {
|
|
||||||
case operatorPresent:
|
|
||||||
return present
|
|
||||||
case operatorEqual:
|
|
||||||
return present && value == m.value
|
|
||||||
case operatorNotEqual:
|
|
||||||
return value != m.value
|
|
||||||
case operatorMatches:
|
|
||||||
if m.re == nil {
|
|
||||||
r, err := regexp.Compile(m.value)
|
|
||||||
if err != nil {
|
|
||||||
log.L.Errorf("error compiling regexp %q", m.value)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
m.re = r
|
|
||||||
}
|
|
||||||
|
|
||||||
return m.re.MatchString(value)
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,292 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package filters
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
/*
|
|
||||||
Parse the strings into a filter that may be used with an adaptor.
|
|
||||||
|
|
||||||
The filter is made up of zero or more selectors.
|
|
||||||
|
|
||||||
The format is a comma separated list of expressions, in the form of
|
|
||||||
`<fieldpath><op><value>`, known as selectors. All selectors must match the
|
|
||||||
target object for the filter to be true.
|
|
||||||
|
|
||||||
We define the operators "==" for equality, "!=" for not equal and "~=" for a
|
|
||||||
regular expression. If the operator and value are not present, the matcher will
|
|
||||||
test for the presence of a value, as defined by the target object.
|
|
||||||
|
|
||||||
The formal grammar is as follows:
|
|
||||||
|
|
||||||
selectors := selector ("," selector)*
|
|
||||||
selector := fieldpath (operator value)
|
|
||||||
fieldpath := field ('.' field)*
|
|
||||||
field := quoted | [A-Za-z] [A-Za-z0-9_]+
|
|
||||||
operator := "==" | "!=" | "~="
|
|
||||||
value := quoted | [^\s,]+
|
|
||||||
quoted := <go string syntax>
|
|
||||||
|
|
||||||
*/
|
|
||||||
func Parse(s string) (Filter, error) {
|
|
||||||
// special case empty to match all
|
|
||||||
if s == "" {
|
|
||||||
return Always, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
p := parser{input: s}
|
|
||||||
return p.parse()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseAll parses each filter in ss and returns a filter that will return true
|
|
||||||
// if any filter matches the expression.
|
|
||||||
//
|
|
||||||
// If no filters are provided, the filter will match anything.
|
|
||||||
func ParseAll(ss ...string) (Filter, error) {
|
|
||||||
if len(ss) == 0 {
|
|
||||||
return Always, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var fs []Filter
|
|
||||||
for _, s := range ss {
|
|
||||||
f, err := Parse(s)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
fs = append(fs, f)
|
|
||||||
}
|
|
||||||
|
|
||||||
return Any(fs), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type parser struct {
|
|
||||||
input string
|
|
||||||
scanner scanner
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) parse() (Filter, error) {
|
|
||||||
p.scanner.init(p.input)
|
|
||||||
|
|
||||||
ss, err := p.selectors()
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "filters")
|
|
||||||
}
|
|
||||||
|
|
||||||
return ss, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) selectors() (Filter, error) {
|
|
||||||
s, err := p.selector()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ss := All{s}
|
|
||||||
|
|
||||||
loop:
|
|
||||||
for {
|
|
||||||
tok := p.scanner.peek()
|
|
||||||
switch tok {
|
|
||||||
case ',':
|
|
||||||
pos, tok, _ := p.scanner.scan()
|
|
||||||
if tok != tokenSeparator {
|
|
||||||
return nil, p.mkerr(pos, "expected a separator")
|
|
||||||
}
|
|
||||||
|
|
||||||
s, err := p.selector()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ss = append(ss, s)
|
|
||||||
case tokenEOF:
|
|
||||||
break loop
|
|
||||||
default:
|
|
||||||
return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ss, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) selector() (selector, error) {
|
|
||||||
fieldpath, err := p.fieldpath()
|
|
||||||
if err != nil {
|
|
||||||
return selector{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
switch p.scanner.peek() {
|
|
||||||
case ',', tokenSeparator, tokenEOF:
|
|
||||||
return selector{
|
|
||||||
fieldpath: fieldpath,
|
|
||||||
operator: operatorPresent,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
op, err := p.operator()
|
|
||||||
if err != nil {
|
|
||||||
return selector{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var allowAltQuotes bool
|
|
||||||
if op == operatorMatches {
|
|
||||||
allowAltQuotes = true
|
|
||||||
}
|
|
||||||
|
|
||||||
value, err := p.value(allowAltQuotes)
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
return selector{}, io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
return selector{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return selector{
|
|
||||||
fieldpath: fieldpath,
|
|
||||||
value: value,
|
|
||||||
operator: op,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) fieldpath() ([]string, error) {
|
|
||||||
f, err := p.field()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
fs := []string{f}
|
|
||||||
loop:
|
|
||||||
for {
|
|
||||||
tok := p.scanner.peek() // lookahead to consume field separator
|
|
||||||
|
|
||||||
switch tok {
|
|
||||||
case '.':
|
|
||||||
pos, tok, _ := p.scanner.scan() // consume separator
|
|
||||||
if tok != tokenSeparator {
|
|
||||||
return nil, p.mkerr(pos, "expected a field separator (`.`)")
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := p.field()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
fs = append(fs, f)
|
|
||||||
default:
|
|
||||||
// let the layer above handle the other bad cases.
|
|
||||||
break loop
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return fs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) field() (string, error) {
|
|
||||||
pos, tok, s := p.scanner.scan()
|
|
||||||
switch tok {
|
|
||||||
case tokenField:
|
|
||||||
return s, nil
|
|
||||||
case tokenQuoted:
|
|
||||||
return p.unquote(pos, s, false)
|
|
||||||
case tokenIllegal:
|
|
||||||
return "", p.mkerr(pos, p.scanner.err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", p.mkerr(pos, "expected field or quoted")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) operator() (operator, error) {
|
|
||||||
pos, tok, s := p.scanner.scan()
|
|
||||||
switch tok {
|
|
||||||
case tokenOperator:
|
|
||||||
switch s {
|
|
||||||
case "==":
|
|
||||||
return operatorEqual, nil
|
|
||||||
case "!=":
|
|
||||||
return operatorNotEqual, nil
|
|
||||||
case "~=":
|
|
||||||
return operatorMatches, nil
|
|
||||||
default:
|
|
||||||
return 0, p.mkerr(pos, "unsupported operator %q", s)
|
|
||||||
}
|
|
||||||
case tokenIllegal:
|
|
||||||
return 0, p.mkerr(pos, p.scanner.err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) value(allowAltQuotes bool) (string, error) {
|
|
||||||
pos, tok, s := p.scanner.scan()
|
|
||||||
|
|
||||||
switch tok {
|
|
||||||
case tokenValue, tokenField:
|
|
||||||
return s, nil
|
|
||||||
case tokenQuoted:
|
|
||||||
return p.unquote(pos, s, allowAltQuotes)
|
|
||||||
case tokenIllegal:
|
|
||||||
return "", p.mkerr(pos, p.scanner.err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", p.mkerr(pos, "expected value or quoted")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) {
|
|
||||||
if !allowAlts && s[0] != '\'' && s[0] != '"' {
|
|
||||||
return "", p.mkerr(pos, "invalid quote encountered")
|
|
||||||
}
|
|
||||||
|
|
||||||
uq, err := unquote(s)
|
|
||||||
if err != nil {
|
|
||||||
return "", p.mkerr(pos, "unquoting failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return uq, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type parseError struct {
|
|
||||||
input string
|
|
||||||
pos int
|
|
||||||
msg string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pe parseError) Error() string {
|
|
||||||
if pe.pos < len(pe.input) {
|
|
||||||
before := pe.input[:pe.pos]
|
|
||||||
location := pe.input[pe.pos : pe.pos+1] // need to handle end
|
|
||||||
after := pe.input[pe.pos+1:]
|
|
||||||
|
|
||||||
return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("[%s]: %v", pe.input, pe.msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) mkerr(pos int, format string, args ...interface{}) error {
|
|
||||||
return errors.Wrap(parseError{
|
|
||||||
input: p.input,
|
|
||||||
pos: pos,
|
|
||||||
msg: fmt.Sprintf(format, args...),
|
|
||||||
}, "parse error")
|
|
||||||
}
|
|
|
@ -1,253 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package filters
|
|
||||||
|
|
||||||
import (
|
|
||||||
"unicode/utf8"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NOTE(stevvooe): Most of this code in this file is copied from the stdlib
|
|
||||||
// strconv package and modified to be able to handle quoting with `/` and `|`
|
|
||||||
// as delimiters. The copyright is held by the Go authors.
|
|
||||||
|
|
||||||
var errQuoteSyntax = errors.New("quote syntax error")
|
|
||||||
|
|
||||||
// UnquoteChar decodes the first character or byte in the escaped string
|
|
||||||
// or character literal represented by the string s.
|
|
||||||
// It returns four values:
|
|
||||||
//
|
|
||||||
// 1) value, the decoded Unicode code point or byte value;
|
|
||||||
// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
|
|
||||||
// 3) tail, the remainder of the string after the character; and
|
|
||||||
// 4) an error that will be nil if the character is syntactically valid.
|
|
||||||
//
|
|
||||||
// The second argument, quote, specifies the type of literal being parsed
|
|
||||||
// and therefore which escaped quote character is permitted.
|
|
||||||
// If set to a single quote, it permits the sequence \' and disallows unescaped '.
|
|
||||||
// If set to a double quote, it permits \" and disallows unescaped ".
|
|
||||||
// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped.
|
|
||||||
//
|
|
||||||
// This is from Go strconv package, modified to support `|` and `/` as double
|
|
||||||
// quotes for use with regular expressions.
|
|
||||||
func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
|
|
||||||
// easy cases
|
|
||||||
switch c := s[0]; {
|
|
||||||
case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'):
|
|
||||||
err = errQuoteSyntax
|
|
||||||
return
|
|
||||||
case c >= utf8.RuneSelf:
|
|
||||||
r, size := utf8.DecodeRuneInString(s)
|
|
||||||
return r, true, s[size:], nil
|
|
||||||
case c != '\\':
|
|
||||||
return rune(s[0]), false, s[1:], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// hard case: c is backslash
|
|
||||||
if len(s) <= 1 {
|
|
||||||
err = errQuoteSyntax
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c := s[1]
|
|
||||||
s = s[2:]
|
|
||||||
|
|
||||||
switch c {
|
|
||||||
case 'a':
|
|
||||||
value = '\a'
|
|
||||||
case 'b':
|
|
||||||
value = '\b'
|
|
||||||
case 'f':
|
|
||||||
value = '\f'
|
|
||||||
case 'n':
|
|
||||||
value = '\n'
|
|
||||||
case 'r':
|
|
||||||
value = '\r'
|
|
||||||
case 't':
|
|
||||||
value = '\t'
|
|
||||||
case 'v':
|
|
||||||
value = '\v'
|
|
||||||
case 'x', 'u', 'U':
|
|
||||||
n := 0
|
|
||||||
switch c {
|
|
||||||
case 'x':
|
|
||||||
n = 2
|
|
||||||
case 'u':
|
|
||||||
n = 4
|
|
||||||
case 'U':
|
|
||||||
n = 8
|
|
||||||
}
|
|
||||||
var v rune
|
|
||||||
if len(s) < n {
|
|
||||||
err = errQuoteSyntax
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for j := 0; j < n; j++ {
|
|
||||||
x, ok := unhex(s[j])
|
|
||||||
if !ok {
|
|
||||||
err = errQuoteSyntax
|
|
||||||
return
|
|
||||||
}
|
|
||||||
v = v<<4 | x
|
|
||||||
}
|
|
||||||
s = s[n:]
|
|
||||||
if c == 'x' {
|
|
||||||
// single-byte string, possibly not UTF-8
|
|
||||||
value = v
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if v > utf8.MaxRune {
|
|
||||||
err = errQuoteSyntax
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value = v
|
|
||||||
multibyte = true
|
|
||||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
|
||||||
v := rune(c) - '0'
|
|
||||||
if len(s) < 2 {
|
|
||||||
err = errQuoteSyntax
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for j := 0; j < 2; j++ { // one digit already; two more
|
|
||||||
x := rune(s[j]) - '0'
|
|
||||||
if x < 0 || x > 7 {
|
|
||||||
err = errQuoteSyntax
|
|
||||||
return
|
|
||||||
}
|
|
||||||
v = (v << 3) | x
|
|
||||||
}
|
|
||||||
s = s[2:]
|
|
||||||
if v > 255 {
|
|
||||||
err = errQuoteSyntax
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value = v
|
|
||||||
case '\\':
|
|
||||||
value = '\\'
|
|
||||||
case '\'', '"', '|', '/':
|
|
||||||
if c != quote {
|
|
||||||
err = errQuoteSyntax
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value = rune(c)
|
|
||||||
default:
|
|
||||||
err = errQuoteSyntax
|
|
||||||
return
|
|
||||||
}
|
|
||||||
tail = s
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// unquote interprets s as a single-quoted, double-quoted,
|
|
||||||
// or backquoted Go string literal, returning the string value
|
|
||||||
// that s quotes. (If s is single-quoted, it would be a Go
|
|
||||||
// character literal; Unquote returns the corresponding
|
|
||||||
// one-character string.)
|
|
||||||
//
|
|
||||||
// This is modified from the standard library to support `|` and `/` as quote
|
|
||||||
// characters for use with regular expressions.
|
|
||||||
func unquote(s string) (string, error) {
|
|
||||||
n := len(s)
|
|
||||||
if n < 2 {
|
|
||||||
return "", errQuoteSyntax
|
|
||||||
}
|
|
||||||
quote := s[0]
|
|
||||||
if quote != s[n-1] {
|
|
||||||
return "", errQuoteSyntax
|
|
||||||
}
|
|
||||||
s = s[1 : n-1]
|
|
||||||
|
|
||||||
if quote == '`' {
|
|
||||||
if contains(s, '`') {
|
|
||||||
return "", errQuoteSyntax
|
|
||||||
}
|
|
||||||
if contains(s, '\r') {
|
|
||||||
// -1 because we know there is at least one \r to remove.
|
|
||||||
buf := make([]byte, 0, len(s)-1)
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
if s[i] != '\r' {
|
|
||||||
buf = append(buf, s[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(buf), nil
|
|
||||||
}
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
if quote != '"' && quote != '\'' && quote != '|' && quote != '/' {
|
|
||||||
return "", errQuoteSyntax
|
|
||||||
}
|
|
||||||
if contains(s, '\n') {
|
|
||||||
return "", errQuoteSyntax
|
|
||||||
}
|
|
||||||
|
|
||||||
// Is it trivial? Avoid allocation.
|
|
||||||
if !contains(s, '\\') && !contains(s, quote) {
|
|
||||||
switch quote {
|
|
||||||
case '"', '/', '|': // pipe and slash are treated like double quote
|
|
||||||
return s, nil
|
|
||||||
case '\'':
|
|
||||||
r, size := utf8.DecodeRuneInString(s)
|
|
||||||
if size == len(s) && (r != utf8.RuneError || size != 1) {
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var runeTmp [utf8.UTFMax]byte
|
|
||||||
buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
|
|
||||||
for len(s) > 0 {
|
|
||||||
c, multibyte, ss, err := unquoteChar(s, quote)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
s = ss
|
|
||||||
if c < utf8.RuneSelf || !multibyte {
|
|
||||||
buf = append(buf, byte(c))
|
|
||||||
} else {
|
|
||||||
n := utf8.EncodeRune(runeTmp[:], c)
|
|
||||||
buf = append(buf, runeTmp[:n]...)
|
|
||||||
}
|
|
||||||
if quote == '\'' && len(s) != 0 {
|
|
||||||
// single-quoted must be single character
|
|
||||||
return "", errQuoteSyntax
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(buf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// contains reports whether the string contains the byte c.
|
|
||||||
func contains(s string, c byte) bool {
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
if s[i] == c {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func unhex(b byte) (v rune, ok bool) {
|
|
||||||
c := rune(b)
|
|
||||||
switch {
|
|
||||||
case '0' <= c && c <= '9':
|
|
||||||
return c - '0', true
|
|
||||||
case 'a' <= c && c <= 'f':
|
|
||||||
return c - 'a' + 10, true
|
|
||||||
case 'A' <= c && c <= 'F':
|
|
||||||
return c - 'A' + 10, true
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
|
@ -1,297 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package filters
|
|
||||||
|
|
||||||
import (
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
tokenEOF = -(iota + 1)
|
|
||||||
tokenQuoted
|
|
||||||
tokenValue
|
|
||||||
tokenField
|
|
||||||
tokenSeparator
|
|
||||||
tokenOperator
|
|
||||||
tokenIllegal
|
|
||||||
)
|
|
||||||
|
|
||||||
type token rune
|
|
||||||
|
|
||||||
func (t token) String() string {
|
|
||||||
switch t {
|
|
||||||
case tokenEOF:
|
|
||||||
return "EOF"
|
|
||||||
case tokenQuoted:
|
|
||||||
return "Quoted"
|
|
||||||
case tokenValue:
|
|
||||||
return "Value"
|
|
||||||
case tokenField:
|
|
||||||
return "Field"
|
|
||||||
case tokenSeparator:
|
|
||||||
return "Separator"
|
|
||||||
case tokenOperator:
|
|
||||||
return "Operator"
|
|
||||||
case tokenIllegal:
|
|
||||||
return "Illegal"
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t token) GoString() string {
|
|
||||||
return "token" + t.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
type scanner struct {
|
|
||||||
input string
|
|
||||||
pos int
|
|
||||||
ppos int // bounds the current rune in the string
|
|
||||||
value bool
|
|
||||||
err string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *scanner) init(input string) {
|
|
||||||
s.input = input
|
|
||||||
s.pos = 0
|
|
||||||
s.ppos = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *scanner) next() rune {
|
|
||||||
if s.pos >= len(s.input) {
|
|
||||||
return tokenEOF
|
|
||||||
}
|
|
||||||
s.pos = s.ppos
|
|
||||||
|
|
||||||
r, w := utf8.DecodeRuneInString(s.input[s.ppos:])
|
|
||||||
s.ppos += w
|
|
||||||
if r == utf8.RuneError {
|
|
||||||
if w > 0 {
|
|
||||||
s.error("rune error")
|
|
||||||
return tokenIllegal
|
|
||||||
}
|
|
||||||
return tokenEOF
|
|
||||||
}
|
|
||||||
|
|
||||||
if r == 0 {
|
|
||||||
s.error("unexpected null")
|
|
||||||
return tokenIllegal
|
|
||||||
}
|
|
||||||
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *scanner) peek() rune {
|
|
||||||
pos := s.pos
|
|
||||||
ppos := s.ppos
|
|
||||||
ch := s.next()
|
|
||||||
s.pos = pos
|
|
||||||
s.ppos = ppos
|
|
||||||
return ch
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *scanner) scan() (nextp int, tk token, text string) {
|
|
||||||
var (
|
|
||||||
ch = s.next()
|
|
||||||
pos = s.pos
|
|
||||||
)
|
|
||||||
|
|
||||||
chomp:
|
|
||||||
switch {
|
|
||||||
case ch == tokenEOF:
|
|
||||||
case ch == tokenIllegal:
|
|
||||||
case isQuoteRune(ch):
|
|
||||||
if !s.scanQuoted(ch) {
|
|
||||||
return pos, tokenIllegal, s.input[pos:s.ppos]
|
|
||||||
}
|
|
||||||
return pos, tokenQuoted, s.input[pos:s.ppos]
|
|
||||||
case isSeparatorRune(ch):
|
|
||||||
s.value = false
|
|
||||||
return pos, tokenSeparator, s.input[pos:s.ppos]
|
|
||||||
case isOperatorRune(ch):
|
|
||||||
s.scanOperator()
|
|
||||||
s.value = true
|
|
||||||
return pos, tokenOperator, s.input[pos:s.ppos]
|
|
||||||
case unicode.IsSpace(ch):
|
|
||||||
// chomp
|
|
||||||
ch = s.next()
|
|
||||||
pos = s.pos
|
|
||||||
goto chomp
|
|
||||||
case s.value:
|
|
||||||
s.scanValue()
|
|
||||||
s.value = false
|
|
||||||
return pos, tokenValue, s.input[pos:s.ppos]
|
|
||||||
case isFieldRune(ch):
|
|
||||||
s.scanField()
|
|
||||||
return pos, tokenField, s.input[pos:s.ppos]
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.pos, token(ch), ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *scanner) scanField() {
|
|
||||||
for {
|
|
||||||
ch := s.peek()
|
|
||||||
if !isFieldRune(ch) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s.next()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *scanner) scanOperator() {
|
|
||||||
for {
|
|
||||||
ch := s.peek()
|
|
||||||
switch ch {
|
|
||||||
case '=', '!', '~':
|
|
||||||
s.next()
|
|
||||||
default:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *scanner) scanValue() {
|
|
||||||
for {
|
|
||||||
ch := s.peek()
|
|
||||||
if !isValueRune(ch) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
s.next()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *scanner) scanQuoted(quote rune) bool {
|
|
||||||
var illegal bool
|
|
||||||
ch := s.next() // read character after quote
|
|
||||||
for ch != quote {
|
|
||||||
if ch == '\n' || ch < 0 {
|
|
||||||
s.error("quoted literal not terminated")
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if ch == '\\' {
|
|
||||||
var legal bool
|
|
||||||
ch, legal = s.scanEscape(quote)
|
|
||||||
if !legal {
|
|
||||||
illegal = true
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ch = s.next()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return !illegal
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *scanner) scanEscape(quote rune) (ch rune, legal bool) {
|
|
||||||
ch = s.next() // read character after '/'
|
|
||||||
switch ch {
|
|
||||||
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
|
|
||||||
// nothing to do
|
|
||||||
ch = s.next()
|
|
||||||
legal = true
|
|
||||||
case '0', '1', '2', '3', '4', '5', '6', '7':
|
|
||||||
ch, legal = s.scanDigits(ch, 8, 3)
|
|
||||||
case 'x':
|
|
||||||
ch, legal = s.scanDigits(s.next(), 16, 2)
|
|
||||||
case 'u':
|
|
||||||
ch, legal = s.scanDigits(s.next(), 16, 4)
|
|
||||||
case 'U':
|
|
||||||
ch, legal = s.scanDigits(s.next(), 16, 8)
|
|
||||||
default:
|
|
||||||
s.error("illegal escape sequence")
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *scanner) scanDigits(ch rune, base, n int) (rune, bool) {
|
|
||||||
for n > 0 && digitVal(ch) < base {
|
|
||||||
ch = s.next()
|
|
||||||
n--
|
|
||||||
}
|
|
||||||
if n > 0 {
|
|
||||||
s.error("illegal numeric escape sequence")
|
|
||||||
return ch, false
|
|
||||||
}
|
|
||||||
return ch, true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *scanner) error(msg string) {
|
|
||||||
if s.err == "" {
|
|
||||||
s.err = msg
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func digitVal(ch rune) int {
|
|
||||||
switch {
|
|
||||||
case '0' <= ch && ch <= '9':
|
|
||||||
return int(ch - '0')
|
|
||||||
case 'a' <= ch && ch <= 'f':
|
|
||||||
return int(ch - 'a' + 10)
|
|
||||||
case 'A' <= ch && ch <= 'F':
|
|
||||||
return int(ch - 'A' + 10)
|
|
||||||
}
|
|
||||||
return 16 // larger than any legal digit val
|
|
||||||
}
|
|
||||||
|
|
||||||
func isFieldRune(r rune) bool {
|
|
||||||
return (r == '_' || isAlphaRune(r) || isDigitRune(r))
|
|
||||||
}
|
|
||||||
|
|
||||||
func isAlphaRune(r rune) bool {
|
|
||||||
return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z'
|
|
||||||
}
|
|
||||||
|
|
||||||
func isDigitRune(r rune) bool {
|
|
||||||
return r >= '0' && r <= '9'
|
|
||||||
}
|
|
||||||
|
|
||||||
func isOperatorRune(r rune) bool {
|
|
||||||
switch r {
|
|
||||||
case '=', '!', '~':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func isQuoteRune(r rune) bool {
|
|
||||||
switch r {
|
|
||||||
case '/', '|', '"': // maybe add single quoting?
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func isSeparatorRune(r rune) bool {
|
|
||||||
switch r {
|
|
||||||
case ',', '.':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func isValueRune(r rune) bool {
|
|
||||||
return r != ',' && !unicode.IsSpace(r) &&
|
|
||||||
(unicode.IsLetter(r) ||
|
|
||||||
unicode.IsDigit(r) ||
|
|
||||||
unicode.IsNumber(r) ||
|
|
||||||
unicode.IsGraphic(r) ||
|
|
||||||
unicode.IsPunct(r))
|
|
||||||
}
|
|
209
vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go
сгенерированный
поставляемый
209
vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go
сгенерированный
поставляемый
|
@ -1,209 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/containerd/containerd/log"
|
|
||||||
remoteserrors "github.com/containerd/containerd/remotes/errors"
|
|
||||||
"github.com/containerd/containerd/version"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"golang.org/x/net/context/ctxhttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrNoToken is returned if a request is successful but the body does not
|
|
||||||
// contain an authorization token.
|
|
||||||
ErrNoToken = errors.New("authorization server did not include a token in the response")
|
|
||||||
)
|
|
||||||
|
|
||||||
// GenerateTokenOptions generates options for fetching a token based on a challenge
|
|
||||||
func GenerateTokenOptions(ctx context.Context, host, username, secret string, c Challenge) (TokenOptions, error) {
|
|
||||||
realm, ok := c.Parameters["realm"]
|
|
||||||
if !ok {
|
|
||||||
return TokenOptions{}, errors.New("no realm specified for token auth challenge")
|
|
||||||
}
|
|
||||||
|
|
||||||
realmURL, err := url.Parse(realm)
|
|
||||||
if err != nil {
|
|
||||||
return TokenOptions{}, errors.Wrap(err, "invalid token auth challenge realm")
|
|
||||||
}
|
|
||||||
|
|
||||||
to := TokenOptions{
|
|
||||||
Realm: realmURL.String(),
|
|
||||||
Service: c.Parameters["service"],
|
|
||||||
Username: username,
|
|
||||||
Secret: secret,
|
|
||||||
}
|
|
||||||
|
|
||||||
scope, ok := c.Parameters["scope"]
|
|
||||||
if ok {
|
|
||||||
to.Scopes = append(to.Scopes, scope)
|
|
||||||
} else {
|
|
||||||
log.G(ctx).WithField("host", host).Debug("no scope specified for token auth challenge")
|
|
||||||
}
|
|
||||||
|
|
||||||
return to, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TokenOptions are options for requesting a token
|
|
||||||
type TokenOptions struct {
|
|
||||||
Realm string
|
|
||||||
Service string
|
|
||||||
Scopes []string
|
|
||||||
Username string
|
|
||||||
Secret string
|
|
||||||
}
|
|
||||||
|
|
||||||
// OAuthTokenResponse is response from fetching token with a OAuth POST request
|
|
||||||
type OAuthTokenResponse struct {
|
|
||||||
AccessToken string `json:"access_token"`
|
|
||||||
RefreshToken string `json:"refresh_token"`
|
|
||||||
ExpiresIn int `json:"expires_in"`
|
|
||||||
IssuedAt time.Time `json:"issued_at"`
|
|
||||||
Scope string `json:"scope"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchTokenWithOAuth fetches a token using a POST request
|
|
||||||
func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http.Header, clientID string, to TokenOptions) (*OAuthTokenResponse, error) {
|
|
||||||
form := url.Values{}
|
|
||||||
if len(to.Scopes) > 0 {
|
|
||||||
form.Set("scope", strings.Join(to.Scopes, " "))
|
|
||||||
}
|
|
||||||
form.Set("service", to.Service)
|
|
||||||
form.Set("client_id", clientID)
|
|
||||||
|
|
||||||
if to.Username == "" {
|
|
||||||
form.Set("grant_type", "refresh_token")
|
|
||||||
form.Set("refresh_token", to.Secret)
|
|
||||||
} else {
|
|
||||||
form.Set("grant_type", "password")
|
|
||||||
form.Set("username", to.Username)
|
|
||||||
form.Set("password", to.Secret)
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequest("POST", to.Realm, strings.NewReader(form.Encode()))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
|
|
||||||
for k, v := range headers {
|
|
||||||
req.Header[k] = append(req.Header[k], v...)
|
|
||||||
}
|
|
||||||
if len(req.Header.Get("User-Agent")) == 0 {
|
|
||||||
req.Header.Set("User-Agent", "containerd/"+version.Version)
|
|
||||||
}
|
|
||||||
|
|
||||||
resp, err := ctxhttp.Do(ctx, client, req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
|
||||||
return nil, errors.WithStack(remoteserrors.NewUnexpectedStatusErr(resp))
|
|
||||||
}
|
|
||||||
|
|
||||||
decoder := json.NewDecoder(resp.Body)
|
|
||||||
|
|
||||||
var tr OAuthTokenResponse
|
|
||||||
if err = decoder.Decode(&tr); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "unable to decode token response")
|
|
||||||
}
|
|
||||||
|
|
||||||
if tr.AccessToken == "" {
|
|
||||||
return nil, errors.WithStack(ErrNoToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &tr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchTokenResponse is response from fetching token with GET request
|
|
||||||
type FetchTokenResponse struct {
|
|
||||||
Token string `json:"token"`
|
|
||||||
AccessToken string `json:"access_token"`
|
|
||||||
ExpiresIn int `json:"expires_in"`
|
|
||||||
IssuedAt time.Time `json:"issued_at"`
|
|
||||||
RefreshToken string `json:"refresh_token"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// FetchToken fetches a token using a GET request
|
|
||||||
func FetchToken(ctx context.Context, client *http.Client, headers http.Header, to TokenOptions) (*FetchTokenResponse, error) {
|
|
||||||
req, err := http.NewRequest("GET", to.Realm, nil)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range headers {
|
|
||||||
req.Header[k] = append(req.Header[k], v...)
|
|
||||||
}
|
|
||||||
if len(req.Header.Get("User-Agent")) == 0 {
|
|
||||||
req.Header.Set("User-Agent", "containerd/"+version.Version)
|
|
||||||
}
|
|
||||||
|
|
||||||
reqParams := req.URL.Query()
|
|
||||||
|
|
||||||
if to.Service != "" {
|
|
||||||
reqParams.Add("service", to.Service)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, scope := range to.Scopes {
|
|
||||||
reqParams.Add("scope", scope)
|
|
||||||
}
|
|
||||||
|
|
||||||
if to.Secret != "" {
|
|
||||||
req.SetBasicAuth(to.Username, to.Secret)
|
|
||||||
}
|
|
||||||
|
|
||||||
req.URL.RawQuery = reqParams.Encode()
|
|
||||||
|
|
||||||
resp, err := ctxhttp.Do(ctx, client, req)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
|
|
||||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
|
||||||
return nil, errors.WithStack(remoteserrors.NewUnexpectedStatusErr(resp))
|
|
||||||
}
|
|
||||||
|
|
||||||
decoder := json.NewDecoder(resp.Body)
|
|
||||||
|
|
||||||
var tr FetchTokenResponse
|
|
||||||
if err = decoder.Decode(&tr); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "unable to decode token response")
|
|
||||||
}
|
|
||||||
|
|
||||||
// `access_token` is equivalent to `token` and if both are specified
|
|
||||||
// the choice is undefined. Canonicalize `access_token` by sticking
|
|
||||||
// things in `token`.
|
|
||||||
if tr.AccessToken != "" {
|
|
||||||
tr.Token = tr.AccessToken
|
|
||||||
}
|
|
||||||
|
|
||||||
if tr.Token == "" {
|
|
||||||
return nil, errors.WithStack(ErrNoToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &tr, nil
|
|
||||||
}
|
|
203
vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go
сгенерированный
поставляемый
203
vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go
сгенерированный
поставляемый
|
@ -1,203 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package auth
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AuthenticationScheme defines scheme of the authentication method
|
|
||||||
type AuthenticationScheme byte
|
|
||||||
|
|
||||||
const (
|
|
||||||
// BasicAuth is scheme for Basic HTTP Authentication RFC 7617
|
|
||||||
BasicAuth AuthenticationScheme = 1 << iota
|
|
||||||
// DigestAuth is scheme for HTTP Digest Access Authentication RFC 7616
|
|
||||||
DigestAuth
|
|
||||||
// BearerAuth is scheme for OAuth 2.0 Bearer Tokens RFC 6750
|
|
||||||
BearerAuth
|
|
||||||
)
|
|
||||||
|
|
||||||
// Challenge carries information from a WWW-Authenticate response header.
|
|
||||||
// See RFC 2617.
|
|
||||||
type Challenge struct {
|
|
||||||
// scheme is the auth-scheme according to RFC 2617
|
|
||||||
Scheme AuthenticationScheme
|
|
||||||
|
|
||||||
// parameters are the auth-params according to RFC 2617
|
|
||||||
Parameters map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
type byScheme []Challenge
|
|
||||||
|
|
||||||
func (bs byScheme) Len() int { return len(bs) }
|
|
||||||
func (bs byScheme) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] }
|
|
||||||
|
|
||||||
// Sort in priority order: token > digest > basic
|
|
||||||
func (bs byScheme) Less(i, j int) bool { return bs[i].Scheme > bs[j].Scheme }
|
|
||||||
|
|
||||||
// Octet types from RFC 2616.
|
|
||||||
type octetType byte
|
|
||||||
|
|
||||||
var octetTypes [256]octetType
|
|
||||||
|
|
||||||
const (
|
|
||||||
isToken octetType = 1 << iota
|
|
||||||
isSpace
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// OCTET = <any 8-bit sequence of data>
|
|
||||||
// CHAR = <any US-ASCII character (octets 0 - 127)>
|
|
||||||
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
|
|
||||||
// CR = <US-ASCII CR, carriage return (13)>
|
|
||||||
// LF = <US-ASCII LF, linefeed (10)>
|
|
||||||
// SP = <US-ASCII SP, space (32)>
|
|
||||||
// HT = <US-ASCII HT, horizontal-tab (9)>
|
|
||||||
// <"> = <US-ASCII double-quote mark (34)>
|
|
||||||
// CRLF = CR LF
|
|
||||||
// LWS = [CRLF] 1*( SP | HT )
|
|
||||||
// TEXT = <any OCTET except CTLs, but including LWS>
|
|
||||||
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
|
|
||||||
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
|
|
||||||
// token = 1*<any CHAR except CTLs or separators>
|
|
||||||
// qdtext = <any TEXT except <">>
|
|
||||||
|
|
||||||
for c := 0; c < 256; c++ {
|
|
||||||
var t octetType
|
|
||||||
isCtl := c <= 31 || c == 127
|
|
||||||
isChar := 0 <= c && c <= 127
|
|
||||||
isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
|
|
||||||
if strings.ContainsRune(" \t\r\n", rune(c)) {
|
|
||||||
t |= isSpace
|
|
||||||
}
|
|
||||||
if isChar && !isCtl && !isSeparator {
|
|
||||||
t |= isToken
|
|
||||||
}
|
|
||||||
octetTypes[c] = t
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseAuthHeader parses challenges from WWW-Authenticate header
|
|
||||||
func ParseAuthHeader(header http.Header) []Challenge {
|
|
||||||
challenges := []Challenge{}
|
|
||||||
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
|
|
||||||
v, p := parseValueAndParams(h)
|
|
||||||
var s AuthenticationScheme
|
|
||||||
switch v {
|
|
||||||
case "basic":
|
|
||||||
s = BasicAuth
|
|
||||||
case "digest":
|
|
||||||
s = DigestAuth
|
|
||||||
case "bearer":
|
|
||||||
s = BearerAuth
|
|
||||||
default:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
challenges = append(challenges, Challenge{Scheme: s, Parameters: p})
|
|
||||||
}
|
|
||||||
sort.Stable(byScheme(challenges))
|
|
||||||
return challenges
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseValueAndParams(header string) (value string, params map[string]string) {
|
|
||||||
params = make(map[string]string)
|
|
||||||
value, s := expectToken(header)
|
|
||||||
if value == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
value = strings.ToLower(value)
|
|
||||||
for {
|
|
||||||
var pkey string
|
|
||||||
pkey, s = expectToken(skipSpace(s))
|
|
||||||
if pkey == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !strings.HasPrefix(s, "=") {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var pvalue string
|
|
||||||
pvalue, s = expectTokenOrQuoted(s[1:])
|
|
||||||
if pvalue == "" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
pkey = strings.ToLower(pkey)
|
|
||||||
params[pkey] = pvalue
|
|
||||||
s = skipSpace(s)
|
|
||||||
if !strings.HasPrefix(s, ",") {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s = s[1:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func skipSpace(s string) (rest string) {
|
|
||||||
i := 0
|
|
||||||
for ; i < len(s); i++ {
|
|
||||||
if octetTypes[s[i]]&isSpace == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return s[i:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func expectToken(s string) (token, rest string) {
|
|
||||||
i := 0
|
|
||||||
for ; i < len(s); i++ {
|
|
||||||
if octetTypes[s[i]]&isToken == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return s[:i], s[i:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func expectTokenOrQuoted(s string) (value string, rest string) {
|
|
||||||
if !strings.HasPrefix(s, "\"") {
|
|
||||||
return expectToken(s)
|
|
||||||
}
|
|
||||||
s = s[1:]
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
switch s[i] {
|
|
||||||
case '"':
|
|
||||||
return s[:i], s[i+1:]
|
|
||||||
case '\\':
|
|
||||||
p := make([]byte, len(s)-1)
|
|
||||||
j := copy(p, s[:i])
|
|
||||||
escape := true
|
|
||||||
for i = i + 1; i < len(s); i++ {
|
|
||||||
b := s[i]
|
|
||||||
switch {
|
|
||||||
case escape:
|
|
||||||
escape = false
|
|
||||||
p[j] = b
|
|
||||||
j++
|
|
||||||
case b == '\\':
|
|
||||||
escape = true
|
|
||||||
case b == '"':
|
|
||||||
return string(p[:j]), s[i+1:]
|
|
||||||
default:
|
|
||||||
p[j] = b
|
|
||||||
j++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return "", ""
|
|
||||||
}
|
|
|
@ -1,56 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package errors
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ error = ErrUnexpectedStatus{}
|
|
||||||
|
|
||||||
// ErrUnexpectedStatus is returned if a registry API request returned with unexpected HTTP status
|
|
||||||
type ErrUnexpectedStatus struct {
|
|
||||||
Status string
|
|
||||||
StatusCode int
|
|
||||||
Body []byte
|
|
||||||
RequestURL, RequestMethod string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e ErrUnexpectedStatus) Error() string {
|
|
||||||
return fmt.Sprintf("unexpected status: %s", e.Status)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewUnexpectedStatusErr creates an ErrUnexpectedStatus from HTTP response
|
|
||||||
func NewUnexpectedStatusErr(resp *http.Response) error {
|
|
||||||
var b []byte
|
|
||||||
if resp.Body != nil {
|
|
||||||
b, _ = ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB
|
|
||||||
}
|
|
||||||
err := ErrUnexpectedStatus{
|
|
||||||
Body: b,
|
|
||||||
Status: resp.Status,
|
|
||||||
StatusCode: resp.StatusCode,
|
|
||||||
RequestMethod: resp.Request.Method,
|
|
||||||
}
|
|
||||||
if resp.Request.URL != nil {
|
|
||||||
err.RequestURL = resp.Request.URL.String()
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
463
vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go
сгенерированный
поставляемый
463
vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go
сгенерированный
поставляемый
|
@ -1,463 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package contentserver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
api "github.com/containerd/containerd/api/services/content/v1"
|
|
||||||
"github.com/containerd/containerd/content"
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
|
||||||
"github.com/containerd/containerd/log"
|
|
||||||
ptypes "github.com/gogo/protobuf/types"
|
|
||||||
digest "github.com/opencontainers/go-digest"
|
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"google.golang.org/grpc"
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
type service struct {
|
|
||||||
store content.Store
|
|
||||||
}
|
|
||||||
|
|
||||||
var bufPool = sync.Pool{
|
|
||||||
New: func() interface{} {
|
|
||||||
buffer := make([]byte, 1<<20)
|
|
||||||
return &buffer
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns the content GRPC server
|
|
||||||
func New(cs content.Store) api.ContentServer {
|
|
||||||
return &service{store: cs}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) Register(server *grpc.Server) error {
|
|
||||||
api.RegisterContentServer(server, s)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResponse, error) {
|
|
||||||
if err := req.Digest.Validate(); err != nil {
|
|
||||||
return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest)
|
|
||||||
}
|
|
||||||
|
|
||||||
bi, err := s.store.Info(ctx, req.Digest)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &api.InfoResponse{
|
|
||||||
Info: infoToGRPC(bi),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) Update(ctx context.Context, req *api.UpdateRequest) (*api.UpdateResponse, error) {
|
|
||||||
if err := req.Info.Digest.Validate(); err != nil {
|
|
||||||
return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Info.Digest)
|
|
||||||
}
|
|
||||||
|
|
||||||
info, err := s.store.Update(ctx, infoFromGRPC(req.Info), req.UpdateMask.GetPaths()...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &api.UpdateResponse{
|
|
||||||
Info: infoToGRPC(info),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) List(req *api.ListContentRequest, session api.Content_ListServer) error {
|
|
||||||
var (
|
|
||||||
buffer []api.Info
|
|
||||||
sendBlock = func(block []api.Info) error {
|
|
||||||
// send last block
|
|
||||||
return session.Send(&api.ListContentResponse{
|
|
||||||
Info: block,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
if err := s.store.Walk(session.Context(), func(info content.Info) error {
|
|
||||||
buffer = append(buffer, api.Info{
|
|
||||||
Digest: info.Digest,
|
|
||||||
Size_: info.Size,
|
|
||||||
CreatedAt: info.CreatedAt,
|
|
||||||
Labels: info.Labels,
|
|
||||||
})
|
|
||||||
|
|
||||||
if len(buffer) >= 100 {
|
|
||||||
if err := sendBlock(buffer); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer = buffer[:0]
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}, req.Filters...); err != nil {
|
|
||||||
return errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(buffer) > 0 {
|
|
||||||
// send last block
|
|
||||||
if err := sendBlock(buffer); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*ptypes.Empty, error) {
|
|
||||||
log.G(ctx).WithField("digest", req.Digest).Debugf("delete content")
|
|
||||||
if err := req.Digest.Validate(); err != nil {
|
|
||||||
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.store.Delete(ctx, req.Digest); err != nil {
|
|
||||||
return nil, errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ptypes.Empty{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) Read(req *api.ReadContentRequest, session api.Content_ReadServer) error {
|
|
||||||
if err := req.Digest.Validate(); err != nil {
|
|
||||||
return status.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
oi, err := s.store.Info(session.Context(), req.Digest)
|
|
||||||
if err != nil {
|
|
||||||
return errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ra, err := s.store.ReaderAt(session.Context(), ocispec.Descriptor{Digest: req.Digest})
|
|
||||||
if err != nil {
|
|
||||||
return errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
defer ra.Close()
|
|
||||||
|
|
||||||
var (
|
|
||||||
offset = req.Offset
|
|
||||||
// size is read size, not the expected size of the blob (oi.Size), which the caller might not be aware of.
|
|
||||||
// offset+size can be larger than oi.Size.
|
|
||||||
size = req.Size_
|
|
||||||
|
|
||||||
// TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably
|
|
||||||
// little inefficient for work over a fast network. We can tune this later.
|
|
||||||
p = bufPool.Get().(*[]byte)
|
|
||||||
)
|
|
||||||
defer bufPool.Put(p)
|
|
||||||
|
|
||||||
if offset < 0 {
|
|
||||||
offset = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
if offset > oi.Size {
|
|
||||||
return status.Errorf(codes.OutOfRange, "read past object length %v bytes", oi.Size)
|
|
||||||
}
|
|
||||||
|
|
||||||
if size <= 0 || offset+size > oi.Size {
|
|
||||||
size = oi.Size - offset
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = io.CopyBuffer(
|
|
||||||
&readResponseWriter{session: session},
|
|
||||||
io.NewSectionReader(ra, offset, size), *p)
|
|
||||||
return errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// readResponseWriter is a writer that places the output into ReadContentRequest messages.
|
|
||||||
//
|
|
||||||
// This allows io.CopyBuffer to do the heavy lifting of chunking the responses
|
|
||||||
// into the buffer size.
|
|
||||||
type readResponseWriter struct {
|
|
||||||
offset int64
|
|
||||||
session api.Content_ReadServer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rw *readResponseWriter) Write(p []byte) (n int, err error) {
|
|
||||||
if err := rw.session.Send(&api.ReadContentResponse{
|
|
||||||
Offset: rw.offset,
|
|
||||||
Data: p,
|
|
||||||
}); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
rw.offset += int64(len(p))
|
|
||||||
return len(p), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) Status(ctx context.Context, req *api.StatusRequest) (*api.StatusResponse, error) {
|
|
||||||
status, err := s.store.Status(ctx, req.Ref)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errdefs.ToGRPCf(err, "could not get status for ref %q", req.Ref)
|
|
||||||
}
|
|
||||||
|
|
||||||
var resp api.StatusResponse
|
|
||||||
resp.Status = &api.Status{
|
|
||||||
StartedAt: status.StartedAt,
|
|
||||||
UpdatedAt: status.UpdatedAt,
|
|
||||||
Ref: status.Ref,
|
|
||||||
Offset: status.Offset,
|
|
||||||
Total: status.Total,
|
|
||||||
Expected: status.Expected,
|
|
||||||
}
|
|
||||||
|
|
||||||
return &resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) ListStatuses(ctx context.Context, req *api.ListStatusesRequest) (*api.ListStatusesResponse, error) {
|
|
||||||
statuses, err := s.store.ListStatuses(ctx, req.Filters...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var resp api.ListStatusesResponse
|
|
||||||
for _, status := range statuses {
|
|
||||||
resp.Statuses = append(resp.Statuses, api.Status{
|
|
||||||
StartedAt: status.StartedAt,
|
|
||||||
UpdatedAt: status.UpdatedAt,
|
|
||||||
Ref: status.Ref,
|
|
||||||
Offset: status.Offset,
|
|
||||||
Total: status.Total,
|
|
||||||
Expected: status.Expected,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return &resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) Write(session api.Content_WriteServer) (err error) {
|
|
||||||
var (
|
|
||||||
ctx = session.Context()
|
|
||||||
msg api.WriteContentResponse
|
|
||||||
req *api.WriteContentRequest
|
|
||||||
ref string
|
|
||||||
total int64
|
|
||||||
expected digest.Digest
|
|
||||||
)
|
|
||||||
|
|
||||||
defer func(msg *api.WriteContentResponse) {
|
|
||||||
// pump through the last message if no error was encountered
|
|
||||||
if err != nil {
|
|
||||||
if s, ok := status.FromError(err); ok && s.Code() != codes.AlreadyExists {
|
|
||||||
// TODO(stevvooe): Really need a log line here to track which
|
|
||||||
// errors are actually causing failure on the server side. May want
|
|
||||||
// to configure the service with an interceptor to make this work
|
|
||||||
// identically across all GRPC methods.
|
|
||||||
//
|
|
||||||
// This is pretty noisy, so we can remove it but leave it for now.
|
|
||||||
log.G(ctx).WithError(err).Error("(*service).Write failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = session.Send(msg)
|
|
||||||
}(&msg)
|
|
||||||
|
|
||||||
// handle the very first request!
|
|
||||||
req, err = session.Recv()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
ref = req.Ref
|
|
||||||
|
|
||||||
if ref == "" {
|
|
||||||
return status.Errorf(codes.InvalidArgument, "first message must have a reference")
|
|
||||||
}
|
|
||||||
|
|
||||||
fields := logrus.Fields{
|
|
||||||
"ref": ref,
|
|
||||||
}
|
|
||||||
total = req.Total
|
|
||||||
expected = req.Expected
|
|
||||||
if total > 0 {
|
|
||||||
fields["total"] = total
|
|
||||||
}
|
|
||||||
|
|
||||||
if expected != "" {
|
|
||||||
fields["expected"] = expected
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(fields))
|
|
||||||
|
|
||||||
log.G(ctx).Debug("(*service).Write started")
|
|
||||||
// this action locks the writer for the session.
|
|
||||||
wr, err := s.store.Writer(ctx,
|
|
||||||
content.WithRef(ref),
|
|
||||||
content.WithDescriptor(ocispec.Descriptor{Size: total, Digest: expected}))
|
|
||||||
if err != nil {
|
|
||||||
return errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
defer wr.Close()
|
|
||||||
|
|
||||||
for {
|
|
||||||
msg.Action = req.Action
|
|
||||||
ws, err := wr.Status()
|
|
||||||
if err != nil {
|
|
||||||
return errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
msg.Offset = ws.Offset // always set the offset.
|
|
||||||
|
|
||||||
// NOTE(stevvooe): In general, there are two cases underwhich a remote
|
|
||||||
// writer is used.
|
|
||||||
//
|
|
||||||
// For pull, we almost always have this before fetching large content,
|
|
||||||
// through descriptors. We allow predeclaration of the expected size
|
|
||||||
// and digest.
|
|
||||||
//
|
|
||||||
// For push, it is more complex. If we want to cut through content into
|
|
||||||
// storage, we may have no expectation until we are done processing the
|
|
||||||
// content. The case here is the following:
|
|
||||||
//
|
|
||||||
// 1. Start writing content.
|
|
||||||
// 2. Compress inline.
|
|
||||||
// 3. Validate digest and size (maybe).
|
|
||||||
//
|
|
||||||
// Supporting these two paths is quite awkward but it lets both API
|
|
||||||
// users use the same writer style for each with a minimum of overhead.
|
|
||||||
if req.Expected != "" {
|
|
||||||
if expected != "" && expected != req.Expected {
|
|
||||||
log.G(ctx).Debugf("commit digest differs from writer digest: %v != %v", req.Expected, expected)
|
|
||||||
}
|
|
||||||
expected = req.Expected
|
|
||||||
|
|
||||||
if _, err := s.store.Info(session.Context(), req.Expected); err == nil {
|
|
||||||
if err := wr.Close(); err != nil {
|
|
||||||
log.G(ctx).WithError(err).Error("failed to close writer")
|
|
||||||
}
|
|
||||||
if err := s.store.Abort(session.Context(), ref); err != nil {
|
|
||||||
log.G(ctx).WithError(err).Error("failed to abort write")
|
|
||||||
}
|
|
||||||
|
|
||||||
return status.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.Expected)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.Total > 0 {
|
|
||||||
// Update the expected total. Typically, this could be seen at
|
|
||||||
// negotiation time or on a commit message.
|
|
||||||
if total > 0 && req.Total != total {
|
|
||||||
log.G(ctx).Debugf("commit size differs from writer size: %v != %v", req.Total, total)
|
|
||||||
}
|
|
||||||
total = req.Total
|
|
||||||
}
|
|
||||||
|
|
||||||
switch req.Action {
|
|
||||||
case api.WriteActionStat:
|
|
||||||
msg.Digest = wr.Digest()
|
|
||||||
msg.StartedAt = ws.StartedAt
|
|
||||||
msg.UpdatedAt = ws.UpdatedAt
|
|
||||||
msg.Total = total
|
|
||||||
case api.WriteActionWrite, api.WriteActionCommit:
|
|
||||||
if req.Offset > 0 {
|
|
||||||
// validate the offset if provided
|
|
||||||
if req.Offset != ws.Offset {
|
|
||||||
return status.Errorf(codes.OutOfRange, "write @%v must occur at current offset %v", req.Offset, ws.Offset)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.Offset == 0 && ws.Offset > 0 {
|
|
||||||
if err := wr.Truncate(req.Offset); err != nil {
|
|
||||||
return errors.Wrapf(err, "truncate failed")
|
|
||||||
}
|
|
||||||
msg.Offset = req.Offset
|
|
||||||
}
|
|
||||||
|
|
||||||
// issue the write if we actually have data.
|
|
||||||
if len(req.Data) > 0 {
|
|
||||||
// While this looks like we could use io.WriterAt here, because we
|
|
||||||
// maintain the offset as append only, we just issue the write.
|
|
||||||
n, err := wr.Write(req.Data)
|
|
||||||
if err != nil {
|
|
||||||
return errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if n != len(req.Data) {
|
|
||||||
// TODO(stevvooe): Perhaps, we can recover this by including it
|
|
||||||
// in the offset on the write return.
|
|
||||||
return status.Errorf(codes.DataLoss, "wrote %v of %v bytes", n, len(req.Data))
|
|
||||||
}
|
|
||||||
|
|
||||||
msg.Offset += int64(n)
|
|
||||||
}
|
|
||||||
|
|
||||||
if req.Action == api.WriteActionCommit {
|
|
||||||
var opts []content.Opt
|
|
||||||
if req.Labels != nil {
|
|
||||||
opts = append(opts, content.WithLabels(req.Labels))
|
|
||||||
}
|
|
||||||
if err := wr.Commit(ctx, total, expected, opts...); err != nil {
|
|
||||||
return errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
msg.Digest = wr.Digest()
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := session.Send(&msg); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err = session.Recv()
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *service) Abort(ctx context.Context, req *api.AbortRequest) (*ptypes.Empty, error) {
|
|
||||||
if err := s.store.Abort(ctx, req.Ref); err != nil {
|
|
||||||
return nil, errdefs.ToGRPC(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ptypes.Empty{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func infoToGRPC(info content.Info) api.Info {
|
|
||||||
return api.Info{
|
|
||||||
Digest: info.Digest,
|
|
||||||
Size_: info.Size,
|
|
||||||
CreatedAt: info.CreatedAt,
|
|
||||||
UpdatedAt: info.UpdatedAt,
|
|
||||||
Labels: info.Labels,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func infoFromGRPC(info api.Info) content.Info {
|
|
||||||
return content.Info{
|
|
||||||
Digest: info.Digest,
|
|
||||||
Size: info.Size_,
|
|
||||||
CreatedAt: info.CreatedAt,
|
|
||||||
UpdatedAt: info.UpdatedAt,
|
|
||||||
Labels: info.Labels,
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,34 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package version
|
|
||||||
|
|
||||||
import "runtime"
|
|
||||||
|
|
||||||
var (
|
|
||||||
// Package is filled at linking time
|
|
||||||
Package = "github.com/containerd/containerd"
|
|
||||||
|
|
||||||
// Version holds the complete version number. Filled in at linking time.
|
|
||||||
Version = "1.5.5+unknown"
|
|
||||||
|
|
||||||
// Revision is filled with the VCS (e.g. git) revision being used to build
|
|
||||||
// the program at linking time.
|
|
||||||
Revision = ""
|
|
||||||
|
|
||||||
// GoVersion is Go tree's version.
|
|
||||||
GoVersion = runtime.Version()
|
|
||||||
)
|
|
|
@ -1,40 +0,0 @@
|
||||||
Aaron Lehmann <aaron.lehmann@docker.com>
|
|
||||||
Akash Gupta <akagup@microsoft.com>
|
|
||||||
Akihiro Suda <akihiro.suda.cz@hco.ntt.co.jp>
|
|
||||||
Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
|
|
||||||
Akihiro Suda <suda.kyoto@gmail.com>
|
|
||||||
Andrew Pennebaker <apennebaker@datapipe.com>
|
|
||||||
Brandon Philips <brandon.philips@coreos.com>
|
|
||||||
Brian Goff <cpuguy83@gmail.com>
|
|
||||||
Christopher Jones <tophj@linux.vnet.ibm.com>
|
|
||||||
Daniel, Dao Quang Minh <dqminh89@gmail.com>
|
|
||||||
Darren Stahl <darst@microsoft.com>
|
|
||||||
Derek McGowan <derek@mcg.dev>
|
|
||||||
Derek McGowan <derek@mcgstyle.net>
|
|
||||||
Edward Pilatowicz <edward.pilatowicz@oracle.com>
|
|
||||||
Ian Campbell <ijc@docker.com>
|
|
||||||
Ivan Markin <sw@nogoegst.net>
|
|
||||||
Justin Cormack <justin.cormack@docker.com>
|
|
||||||
Justin Cummins <sul3n3t@gmail.com>
|
|
||||||
Kasper Fabæch Brandt <poizan@poizan.dk>
|
|
||||||
Kir Kolyshkin <kolyshkin@gmail.com>
|
|
||||||
Michael Crosby <crosbymichael@gmail.com>
|
|
||||||
Michael Crosby <michael@thepasture.io>
|
|
||||||
Michael Wan <zirenwan@gmail.com>
|
|
||||||
Mike Brown <brownwm@us.ibm.com>
|
|
||||||
Niels de Vos <ndevos@redhat.com>
|
|
||||||
Phil Estes <estesp@gmail.com>
|
|
||||||
Phil Estes <estesp@linux.vnet.ibm.com>
|
|
||||||
Samuel Karp <me@samuelkarp.com>
|
|
||||||
Sam Whited <sam@samwhited.com>
|
|
||||||
Sebastiaan van Stijn <github@gone.nl>
|
|
||||||
Shengjing Zhu <zhsj@debian.org>
|
|
||||||
Stephen J Day <stephen.day@docker.com>
|
|
||||||
Tibor Vass <tibor@docker.com>
|
|
||||||
Tobias Klauser <tklauser@distanz.ch>
|
|
||||||
Tom Faulhaber <tffaulha@amazon.com>
|
|
||||||
Tonis Tiigi <tonistiigi@gmail.com>
|
|
||||||
Trevor Porter <trkporter@ucdavis.edu>
|
|
||||||
Wei Fu <fuweid89@gmail.com>
|
|
||||||
Wilbert van de Ridder <wilbert.ridder@gmail.com>
|
|
||||||
Xiaodong Ye <xiaodongy@vmware.com>
|
|
|
@ -1,191 +0,0 @@
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
https://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
Copyright The containerd Authors
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
https://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
|
@ -1,3 +0,0 @@
|
||||||
This package is for internal use only. It is intended to only have
|
|
||||||
temporary changes before they are upstreamed to golang.org/x/sys/
|
|
||||||
(a.k.a. https://github.com/golang/sys).
|
|
|
@ -1,52 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright The containerd Authors.
|
|
||||||
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
mksyscall="$(go env GOROOT)/src/syscall/mksyscall.pl"
|
|
||||||
|
|
||||||
fix() {
|
|
||||||
sed 's,^package syscall$,package sysx,' \
|
|
||||||
| sed 's,^import "unsafe"$,import (\n\t"syscall"\n\t"unsafe"\n),' \
|
|
||||||
| gofmt -r='BytePtrFromString -> syscall.BytePtrFromString' \
|
|
||||||
| gofmt -r='Syscall6 -> syscall.Syscall6' \
|
|
||||||
| gofmt -r='Syscall -> syscall.Syscall' \
|
|
||||||
| gofmt -r='SYS_GETXATTR -> syscall.SYS_GETXATTR' \
|
|
||||||
| gofmt -r='SYS_LISTXATTR -> syscall.SYS_LISTXATTR' \
|
|
||||||
| gofmt -r='SYS_SETXATTR -> syscall.SYS_SETXATTR' \
|
|
||||||
| gofmt -r='SYS_REMOVEXATTR -> syscall.SYS_REMOVEXATTR' \
|
|
||||||
| gofmt -r='SYS_LGETXATTR -> syscall.SYS_LGETXATTR' \
|
|
||||||
| gofmt -r='SYS_LLISTXATTR -> syscall.SYS_LLISTXATTR' \
|
|
||||||
| gofmt -r='SYS_LSETXATTR -> syscall.SYS_LSETXATTR' \
|
|
||||||
| gofmt -r='SYS_LREMOVEXATTR -> syscall.SYS_LREMOVEXATTR'
|
|
||||||
}
|
|
||||||
|
|
||||||
if [ "$GOARCH" == "" ] || [ "$GOOS" == "" ]; then
|
|
||||||
echo "Must specify \$GOARCH and \$GOOS"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
mkargs=""
|
|
||||||
|
|
||||||
if [ "$GOARCH" == "386" ] || [ "$GOARCH" == "arm" ]; then
|
|
||||||
mkargs="-l32"
|
|
||||||
fi
|
|
||||||
|
|
||||||
for f in "$@"; do
|
|
||||||
$mksyscall $mkargs "${f}_${GOOS}.go" | fix > "${f}_${GOOS}_${GOARCH}.go"
|
|
||||||
done
|
|
||||||
|
|
|
@ -1,23 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package sysx
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
const ENODATA = syscall.ENODATA
|
|
|
@ -1,24 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package sysx
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// This should actually be a set that contains ENOENT and EPERM
|
|
||||||
const ENODATA = syscall.ENOENT
|
|
|
@ -1,25 +0,0 @@
|
||||||
// +build darwin freebsd openbsd
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package sysx
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
const ENODATA = syscall.ENOATTR
|
|
|
@ -1,117 +0,0 @@
|
||||||
// +build linux darwin
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package sysx
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Listxattr calls syscall listxattr and reads all content
|
|
||||||
// and returns a string array
|
|
||||||
func Listxattr(path string) ([]string, error) {
|
|
||||||
return listxattrAll(path, unix.Listxattr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Removexattr calls syscall removexattr
|
|
||||||
func Removexattr(path string, attr string) (err error) {
|
|
||||||
return unix.Removexattr(path, attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setxattr calls syscall setxattr
|
|
||||||
func Setxattr(path string, attr string, data []byte, flags int) (err error) {
|
|
||||||
return unix.Setxattr(path, attr, data, flags)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Getxattr calls syscall getxattr
|
|
||||||
func Getxattr(path, attr string) ([]byte, error) {
|
|
||||||
return getxattrAll(path, attr, unix.Getxattr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LListxattr lists xattrs, not following symlinks
|
|
||||||
func LListxattr(path string) ([]string, error) {
|
|
||||||
return listxattrAll(path, unix.Llistxattr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LRemovexattr removes an xattr, not following symlinks
|
|
||||||
func LRemovexattr(path string, attr string) (err error) {
|
|
||||||
return unix.Lremovexattr(path, attr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LSetxattr sets an xattr, not following symlinks
|
|
||||||
func LSetxattr(path string, attr string, data []byte, flags int) (err error) {
|
|
||||||
return unix.Lsetxattr(path, attr, data, flags)
|
|
||||||
}
|
|
||||||
|
|
||||||
// LGetxattr gets an xattr, not following symlinks
|
|
||||||
func LGetxattr(path, attr string) ([]byte, error) {
|
|
||||||
return getxattrAll(path, attr, unix.Lgetxattr)
|
|
||||||
}
|
|
||||||
|
|
||||||
const defaultXattrBufferSize = 128
|
|
||||||
|
|
||||||
type listxattrFunc func(path string, dest []byte) (int, error)
|
|
||||||
|
|
||||||
func listxattrAll(path string, listFunc listxattrFunc) ([]string, error) {
|
|
||||||
buf := make([]byte, defaultXattrBufferSize)
|
|
||||||
n, err := listFunc(path, buf)
|
|
||||||
for err == unix.ERANGE {
|
|
||||||
// Buffer too small, use zero-sized buffer to get the actual size
|
|
||||||
n, err = listFunc(path, []byte{})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
buf = make([]byte, n)
|
|
||||||
n, err = listFunc(path, buf)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ps := bytes.Split(bytes.TrimSuffix(buf[:n], []byte{0}), []byte{0})
|
|
||||||
var entries []string
|
|
||||||
for _, p := range ps {
|
|
||||||
if len(p) > 0 {
|
|
||||||
entries = append(entries, string(p))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return entries, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type getxattrFunc func(string, string, []byte) (int, error)
|
|
||||||
|
|
||||||
func getxattrAll(path, attr string, getFunc getxattrFunc) ([]byte, error) {
|
|
||||||
buf := make([]byte, defaultXattrBufferSize)
|
|
||||||
n, err := getFunc(path, attr, buf)
|
|
||||||
for err == unix.ERANGE {
|
|
||||||
// Buffer too small, use zero-sized buffer to get the actual size
|
|
||||||
n, err = getFunc(path, attr, []byte{})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
buf = make([]byte, n)
|
|
||||||
n, err = getFunc(path, attr, buf)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return buf[:n], nil
|
|
||||||
}
|
|
|
@ -1,67 +0,0 @@
|
||||||
// +build !linux,!darwin
|
|
||||||
|
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package sysx
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
var errUnsupported = errors.New("extended attributes unsupported on " + runtime.GOOS)
|
|
||||||
|
|
||||||
// Listxattr calls syscall listxattr and reads all content
|
|
||||||
// and returns a string array
|
|
||||||
func Listxattr(path string) ([]string, error) {
|
|
||||||
return []string{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Removexattr calls syscall removexattr
|
|
||||||
func Removexattr(path string, attr string) (err error) {
|
|
||||||
return errUnsupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// Setxattr calls syscall setxattr
|
|
||||||
func Setxattr(path string, attr string, data []byte, flags int) (err error) {
|
|
||||||
return errUnsupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// Getxattr calls syscall getxattr
|
|
||||||
func Getxattr(path, attr string) ([]byte, error) {
|
|
||||||
return []byte{}, errUnsupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// LListxattr lists xattrs, not following symlinks
|
|
||||||
func LListxattr(path string) ([]string, error) {
|
|
||||||
return []string{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// LRemovexattr removes an xattr, not following symlinks
|
|
||||||
func LRemovexattr(path string, attr string) (err error) {
|
|
||||||
return errUnsupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// LSetxattr sets an xattr, not following symlinks
|
|
||||||
func LSetxattr(path string, attr string, data []byte, flags int) (err error) {
|
|
||||||
return errUnsupported
|
|
||||||
}
|
|
||||||
|
|
||||||
// LGetxattr gets an xattr, not following symlinks
|
|
||||||
func LGetxattr(path, attr string) ([]byte, error) {
|
|
||||||
return []byte{}, nil
|
|
||||||
}
|
|
|
@ -1,2 +0,0 @@
|
||||||
*.test
|
|
||||||
coverage.txt
|
|
|
@ -1,191 +0,0 @@
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
https://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
Copyright The containerd Authors
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
https://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
|
@ -1,20 +0,0 @@
|
||||||
# typeurl
|
|
||||||
|
|
||||||
[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/typeurl)](https://pkg.go.dev/github.com/containerd/typeurl)
|
|
||||||
[![Build Status](https://github.com/containerd/typeurl/workflows/CI/badge.svg)](https://github.com/containerd/typeurl/actions?query=workflow%3ACI)
|
|
||||||
[![codecov](https://codecov.io/gh/containerd/typeurl/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl)
|
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/typeurl)](https://goreportcard.com/report/github.com/containerd/typeurl)
|
|
||||||
|
|
||||||
A Go package for managing the registration, marshaling, and unmarshaling of encoded types.
|
|
||||||
|
|
||||||
This package helps when types are sent over a GRPC API and marshaled as a [protobuf.Any](https://github.com/gogo/protobuf/blob/master/protobuf/google/protobuf/any.proto).
|
|
||||||
|
|
||||||
## Project details
|
|
||||||
|
|
||||||
**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
|
|
||||||
As a containerd sub-project, you will find the:
|
|
||||||
* [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md),
|
|
||||||
* [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS),
|
|
||||||
* and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md)
|
|
||||||
|
|
||||||
information in our [`containerd/project`](https://github.com/containerd/project) repository.
|
|
|
@ -1,83 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package typeurl
|
|
||||||
|
|
||||||
// Package typeurl assists with managing the registration, marshaling, and
|
|
||||||
// unmarshaling of types encoded as protobuf.Any.
|
|
||||||
//
|
|
||||||
// A protobuf.Any is a proto message that can contain any arbitrary data. It
|
|
||||||
// consists of two components, a TypeUrl and a Value, and its proto definition
|
|
||||||
// looks like this:
|
|
||||||
//
|
|
||||||
// message Any {
|
|
||||||
// string type_url = 1;
|
|
||||||
// bytes value = 2;
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// The TypeUrl is used to distinguish the contents from other proto.Any
|
|
||||||
// messages. This typeurl library manages these URLs to enable automagic
|
|
||||||
// marshaling and unmarshaling of the contents.
|
|
||||||
//
|
|
||||||
// For example, consider this go struct:
|
|
||||||
//
|
|
||||||
// type Foo struct {
|
|
||||||
// Field1 string
|
|
||||||
// Field2 string
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// To use typeurl, types must first be registered. This is typically done in
|
|
||||||
// the init function
|
|
||||||
//
|
|
||||||
// func init() {
|
|
||||||
// typeurl.Register(&Foo{}, "Foo")
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// This will register the type Foo with the url path "Foo". The arguments to
|
|
||||||
// Register are variadic, and are used to construct a url path. Consider this
|
|
||||||
// example, from the github.com/containerd/containerd/client package:
|
|
||||||
//
|
|
||||||
// func init() {
|
|
||||||
// const prefix = "types.containerd.io"
|
|
||||||
// // register TypeUrls for commonly marshaled external types
|
|
||||||
// major := strconv.Itoa(specs.VersionMajor)
|
|
||||||
// typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec")
|
|
||||||
// // this function has more Register calls, which are elided.
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// This registers several types under a more complex url, which ends up mapping
|
|
||||||
// to `types.containerd.io/opencontainers/runtime-spec/1/Spec` (or some other
|
|
||||||
// value for major).
|
|
||||||
//
|
|
||||||
// Once a type is registered, it can be marshaled to a proto.Any message simply
|
|
||||||
// by calling `MarshalAny`, like this:
|
|
||||||
//
|
|
||||||
// foo := &Foo{Field1: "value1", Field2: "value2"}
|
|
||||||
// anyFoo, err := typeurl.MarshalAny(foo)
|
|
||||||
//
|
|
||||||
// MarshalAny will resolve the correct URL for the type. If the type in
|
|
||||||
// question implements the proto.Message interface, then it will be marshaled
|
|
||||||
// as a proto message. Otherwise, it will be marshaled as json. This means that
|
|
||||||
// typeurl will work on any arbitrary data, whether or not it has a proto
|
|
||||||
// definition, as long as it can be serialized to json.
|
|
||||||
//
|
|
||||||
// To unmarshal, the process is simply inverse:
|
|
||||||
//
|
|
||||||
// iface, err := typeurl.UnmarshalAny(anyFoo)
|
|
||||||
// foo := iface.(*Foo)
|
|
||||||
//
|
|
||||||
// The correct type is automatically chosen from the type registry, and the
|
|
||||||
// returned interface can be cast straight to that type.
|
|
|
@ -1,8 +0,0 @@
|
||||||
module github.com/containerd/typeurl
|
|
||||||
|
|
||||||
go 1.13
|
|
||||||
|
|
||||||
require (
|
|
||||||
github.com/gogo/protobuf v1.3.2
|
|
||||||
github.com/pkg/errors v0.9.1
|
|
||||||
)
|
|
|
@ -1,33 +0,0 @@
|
||||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
|
||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
|
||||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
|
||||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
|
||||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
|
||||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
|
|
@ -1,214 +0,0 @@
|
||||||
/*
|
|
||||||
Copyright The containerd Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package typeurl
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"path"
|
|
||||||
"reflect"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/gogo/protobuf/proto"
|
|
||||||
"github.com/gogo/protobuf/types"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
mu sync.RWMutex
|
|
||||||
registry = make(map[reflect.Type]string)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Definitions of common error types used throughout typeurl.
|
|
||||||
//
|
|
||||||
// These error types are used with errors.Wrap and errors.Wrapf to add context
|
|
||||||
// to an error.
|
|
||||||
//
|
|
||||||
// To detect an error class, use errors.Is() functions to tell whether an
|
|
||||||
// error is of this type.
|
|
||||||
var (
|
|
||||||
ErrNotFound = errors.New("not found")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Register a type with a base URL for JSON marshaling. When the MarshalAny and
|
|
||||||
// UnmarshalAny functions are called they will treat the Any type value as JSON.
|
|
||||||
// To use protocol buffers for handling the Any value the proto.Register
|
|
||||||
// function should be used instead of this function.
|
|
||||||
func Register(v interface{}, args ...string) {
|
|
||||||
var (
|
|
||||||
t = tryDereference(v)
|
|
||||||
p = path.Join(args...)
|
|
||||||
)
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
if et, ok := registry[t]; ok {
|
|
||||||
if et != p {
|
|
||||||
panic(errors.Errorf("type registered with alternate path %q != %q", et, p))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
registry[t] = p
|
|
||||||
}
|
|
||||||
|
|
||||||
// TypeURL returns the type url for a registered type.
|
|
||||||
func TypeURL(v interface{}) (string, error) {
|
|
||||||
mu.RLock()
|
|
||||||
u, ok := registry[tryDereference(v)]
|
|
||||||
mu.RUnlock()
|
|
||||||
if !ok {
|
|
||||||
// fallback to the proto registry if it is a proto message
|
|
||||||
pb, ok := v.(proto.Message)
|
|
||||||
if !ok {
|
|
||||||
return "", errors.Wrapf(ErrNotFound, "type %s", reflect.TypeOf(v))
|
|
||||||
}
|
|
||||||
return proto.MessageName(pb), nil
|
|
||||||
}
|
|
||||||
return u, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Is returns true if the type of the Any is the same as v.
|
|
||||||
func Is(any *types.Any, v interface{}) bool {
|
|
||||||
// call to check that v is a pointer
|
|
||||||
tryDereference(v)
|
|
||||||
url, err := TypeURL(v)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return any.TypeUrl == url
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalAny marshals the value v into an any with the correct TypeUrl.
|
|
||||||
// If the provided object is already a proto.Any message, then it will be
|
|
||||||
// returned verbatim. If it is of type proto.Message, it will be marshaled as a
|
|
||||||
// protocol buffer. Otherwise, the object will be marshaled to json.
|
|
||||||
func MarshalAny(v interface{}) (*types.Any, error) {
|
|
||||||
var marshal func(v interface{}) ([]byte, error)
|
|
||||||
switch t := v.(type) {
|
|
||||||
case *types.Any:
|
|
||||||
// avoid reserializing the type if we have an any.
|
|
||||||
return t, nil
|
|
||||||
case proto.Message:
|
|
||||||
marshal = func(v interface{}) ([]byte, error) {
|
|
||||||
return proto.Marshal(t)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
marshal = json.Marshal
|
|
||||||
}
|
|
||||||
|
|
||||||
url, err := TypeURL(v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := marshal(v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &types.Any{
|
|
||||||
TypeUrl: url,
|
|
||||||
Value: data,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalAny unmarshals the any type into a concrete type.
|
|
||||||
func UnmarshalAny(any *types.Any) (interface{}, error) {
|
|
||||||
return UnmarshalByTypeURL(any.TypeUrl, any.Value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalByTypeURL unmarshals the given type and value to into a concrete type.
|
|
||||||
func UnmarshalByTypeURL(typeURL string, value []byte) (interface{}, error) {
|
|
||||||
return unmarshal(typeURL, value, nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalTo unmarshals the any type into a concrete type passed in the out
|
|
||||||
// argument. It is identical to UnmarshalAny, but lets clients provide a
|
|
||||||
// destination type through the out argument.
|
|
||||||
func UnmarshalTo(any *types.Any, out interface{}) error {
|
|
||||||
return UnmarshalToByTypeURL(any.TypeUrl, any.Value, out)
|
|
||||||
}
|
|
||||||
|
|
||||||
// UnmarshalTo unmarshals the given type and value into a concrete type passed
|
|
||||||
// in the out argument. It is identical to UnmarshalByTypeURL, but lets clients
|
|
||||||
// provide a destination type through the out argument.
|
|
||||||
func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error {
|
|
||||||
_, err := unmarshal(typeURL, value, out)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) {
|
|
||||||
t, err := getTypeByUrl(typeURL)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if v == nil {
|
|
||||||
v = reflect.New(t.t).Interface()
|
|
||||||
} else {
|
|
||||||
// Validate interface type provided by client
|
|
||||||
vURL, err := TypeURL(v)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if typeURL != vURL {
|
|
||||||
return nil, errors.Errorf("can't unmarshal type %q to output %q", typeURL, vURL)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if t.isProto {
|
|
||||||
err = proto.Unmarshal(value, v.(proto.Message))
|
|
||||||
} else {
|
|
||||||
err = json.Unmarshal(value, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
return v, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type urlType struct {
|
|
||||||
t reflect.Type
|
|
||||||
isProto bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func getTypeByUrl(url string) (urlType, error) {
|
|
||||||
mu.RLock()
|
|
||||||
for t, u := range registry {
|
|
||||||
if u == url {
|
|
||||||
mu.RUnlock()
|
|
||||||
return urlType{
|
|
||||||
t: t,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mu.RUnlock()
|
|
||||||
// fallback to proto registry
|
|
||||||
t := proto.MessageType(url)
|
|
||||||
if t != nil {
|
|
||||||
return urlType{
|
|
||||||
// get the underlying Elem because proto returns a pointer to the type
|
|
||||||
t: t.Elem(),
|
|
||||||
isProto: true,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
return urlType{}, errors.Wrapf(ErrNotFound, "type with url %s", url)
|
|
||||||
}
|
|
||||||
|
|
||||||
func tryDereference(v interface{}) reflect.Type {
|
|
||||||
t := reflect.TypeOf(v)
|
|
||||||
if t.Kind() == reflect.Ptr {
|
|
||||||
// require check of pointer but dereference to register
|
|
||||||
return t.Elem()
|
|
||||||
}
|
|
||||||
panic("v is not a pointer to a type")
|
|
||||||
}
|
|
|
@ -1,24 +0,0 @@
|
||||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
|
||||||
*.o
|
|
||||||
*.a
|
|
||||||
*.so
|
|
||||||
|
|
||||||
# Folders
|
|
||||||
_obj
|
|
||||||
_test
|
|
||||||
|
|
||||||
# Architecture specific extensions/prefixes
|
|
||||||
*.[568vq]
|
|
||||||
[568vq].out
|
|
||||||
|
|
||||||
*.cgo1.go
|
|
||||||
*.cgo2.c
|
|
||||||
_cgo_defun.c
|
|
||||||
_cgo_gotypes.go
|
|
||||||
_cgo_export.*
|
|
||||||
|
|
||||||
_testmain.go
|
|
||||||
|
|
||||||
*.exe
|
|
||||||
*.test
|
|
||||||
*.prof
|
|
|
@ -1,10 +0,0 @@
|
||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.10.x
|
|
||||||
- 1.11.x
|
|
||||||
script: go test -v -check.vv -race ./...
|
|
||||||
sudo: false
|
|
||||||
notifications:
|
|
||||||
email:
|
|
||||||
on_success: never
|
|
||||||
on_failure: always
|
|
|
@ -1,27 +0,0 @@
|
||||||
Copyright (c) 2015-2020, Tim Heckman
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright notice, this
|
|
||||||
list of conditions and the following disclaimer.
|
|
||||||
|
|
||||||
* Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
this list of conditions and the following disclaimer in the documentation
|
|
||||||
and/or other materials provided with the distribution.
|
|
||||||
|
|
||||||
* Neither the name of gofrs nor the names of its contributors may be used
|
|
||||||
to endorse or promote products derived from this software without
|
|
||||||
specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
||||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
||||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
||||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
||||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
||||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
||||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
||||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
||||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
@ -1,41 +0,0 @@
|
||||||
# flock
|
|
||||||
[![TravisCI Build Status](https://img.shields.io/travis/gofrs/flock/master.svg?style=flat)](https://travis-ci.org/gofrs/flock)
|
|
||||||
[![GoDoc](https://img.shields.io/badge/godoc-flock-blue.svg?style=flat)](https://godoc.org/github.com/gofrs/flock)
|
|
||||||
[![License](https://img.shields.io/badge/license-BSD_3--Clause-brightgreen.svg?style=flat)](https://github.com/gofrs/flock/blob/master/LICENSE)
|
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/gofrs/flock)](https://goreportcard.com/report/github.com/gofrs/flock)
|
|
||||||
|
|
||||||
`flock` implements a thread-safe sync.Locker interface for file locking. It also
|
|
||||||
includes a non-blocking TryLock() function to allow locking without blocking execution.
|
|
||||||
|
|
||||||
## License
|
|
||||||
`flock` is released under the BSD 3-Clause License. See the `LICENSE` file for more details.
|
|
||||||
|
|
||||||
## Go Compatibility
|
|
||||||
This package makes use of the `context` package that was introduced in Go 1.7. As such, this
|
|
||||||
package has an implicit dependency on Go 1.7+.
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
```
|
|
||||||
go get -u github.com/gofrs/flock
|
|
||||||
```
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
```Go
|
|
||||||
import "github.com/gofrs/flock"
|
|
||||||
|
|
||||||
fileLock := flock.New("/var/lock/go-lock.lock")
|
|
||||||
|
|
||||||
locked, err := fileLock.TryLock()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
// handle locking error
|
|
||||||
}
|
|
||||||
|
|
||||||
if locked {
|
|
||||||
// do work
|
|
||||||
fileLock.Unlock()
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
For more detailed usage information take a look at the package API docs on
|
|
||||||
[GoDoc](https://godoc.org/github.com/gofrs/flock).
|
|
|
@ -1,25 +0,0 @@
|
||||||
version: '{build}'
|
|
||||||
|
|
||||||
build: false
|
|
||||||
deploy: false
|
|
||||||
|
|
||||||
clone_folder: 'c:\gopath\src\github.com\gofrs\flock'
|
|
||||||
|
|
||||||
environment:
|
|
||||||
GOPATH: 'c:\gopath'
|
|
||||||
GOVERSION: '1.11'
|
|
||||||
|
|
||||||
init:
|
|
||||||
- git config --global core.autocrlf input
|
|
||||||
|
|
||||||
install:
|
|
||||||
- rmdir c:\go /s /q
|
|
||||||
- appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi
|
|
||||||
- msiexec /i go%GOVERSION%.windows-amd64.msi /q
|
|
||||||
- set Path=c:\go\bin;c:\gopath\bin;%Path%
|
|
||||||
- go version
|
|
||||||
- go env
|
|
||||||
|
|
||||||
test_script:
|
|
||||||
- go get -t ./...
|
|
||||||
- go test -race -v ./...
|
|
|
@ -1,135 +0,0 @@
|
||||||
// Copyright 2015 Tim Heckman. All rights reserved.
|
|
||||||
// Use of this source code is governed by the BSD 3-Clause
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package flock implements a thread-safe interface for file locking.
|
|
||||||
// It also includes a non-blocking TryLock() function to allow locking
|
|
||||||
// without blocking execution.
|
|
||||||
//
|
|
||||||
// Package flock is released under the BSD 3-Clause License. See the LICENSE file
|
|
||||||
// for more details.
|
|
||||||
//
|
|
||||||
// While using this library, remember that the locking behaviors are not
|
|
||||||
// guaranteed to be the same on each platform. For example, some UNIX-like
|
|
||||||
// operating systems will transparently convert a shared lock to an exclusive
|
|
||||||
// lock. If you Unlock() the flock from a location where you believe that you
|
|
||||||
// have the shared lock, you may accidentally drop the exclusive lock.
|
|
||||||
package flock
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Flock is the struct type to handle file locking. All fields are unexported,
|
|
||||||
// with access to some of the fields provided by getter methods (Path() and Locked()).
|
|
||||||
type Flock struct {
|
|
||||||
path string
|
|
||||||
m sync.RWMutex
|
|
||||||
fh *os.File
|
|
||||||
l bool
|
|
||||||
r bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns a new instance of *Flock. The only parameter
|
|
||||||
// it takes is the path to the desired lockfile.
|
|
||||||
func New(path string) *Flock {
|
|
||||||
return &Flock{path: path}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFlock returns a new instance of *Flock. The only parameter
|
|
||||||
// it takes is the path to the desired lockfile.
|
|
||||||
//
|
|
||||||
// Deprecated: Use New instead.
|
|
||||||
func NewFlock(path string) *Flock {
|
|
||||||
return New(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close is equivalent to calling Unlock.
|
|
||||||
//
|
|
||||||
// This will release the lock and close the underlying file descriptor.
|
|
||||||
// It will not remove the file from disk, that's up to your application.
|
|
||||||
func (f *Flock) Close() error {
|
|
||||||
return f.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Path returns the path as provided in NewFlock().
|
|
||||||
func (f *Flock) Path() string {
|
|
||||||
return f.path
|
|
||||||
}
|
|
||||||
|
|
||||||
// Locked returns the lock state (locked: true, unlocked: false).
|
|
||||||
//
|
|
||||||
// Warning: by the time you use the returned value, the state may have changed.
|
|
||||||
func (f *Flock) Locked() bool {
|
|
||||||
f.m.RLock()
|
|
||||||
defer f.m.RUnlock()
|
|
||||||
return f.l
|
|
||||||
}
|
|
||||||
|
|
||||||
// RLocked returns the read lock state (locked: true, unlocked: false).
|
|
||||||
//
|
|
||||||
// Warning: by the time you use the returned value, the state may have changed.
|
|
||||||
func (f *Flock) RLocked() bool {
|
|
||||||
f.m.RLock()
|
|
||||||
defer f.m.RUnlock()
|
|
||||||
return f.r
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Flock) String() string {
|
|
||||||
return f.path
|
|
||||||
}
|
|
||||||
|
|
||||||
// TryLockContext repeatedly tries to take an exclusive lock until one of the
|
|
||||||
// conditions is met: TryLock succeeds, TryLock fails with error, or Context
|
|
||||||
// Done channel is closed.
|
|
||||||
func (f *Flock) TryLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) {
|
|
||||||
return tryCtx(ctx, f.TryLock, retryDelay)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TryRLockContext repeatedly tries to take a shared lock until one of the
|
|
||||||
// conditions is met: TryRLock succeeds, TryRLock fails with error, or Context
|
|
||||||
// Done channel is closed.
|
|
||||||
func (f *Flock) TryRLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) {
|
|
||||||
return tryCtx(ctx, f.TryRLock, retryDelay)
|
|
||||||
}
|
|
||||||
|
|
||||||
func tryCtx(ctx context.Context, fn func() (bool, error), retryDelay time.Duration) (bool, error) {
|
|
||||||
if ctx.Err() != nil {
|
|
||||||
return false, ctx.Err()
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
if ok, err := fn(); ok || err != nil {
|
|
||||||
return ok, err
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return false, ctx.Err()
|
|
||||||
case <-time.After(retryDelay):
|
|
||||||
// try again
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Flock) setFh() error {
|
|
||||||
// open a new os.File instance
|
|
||||||
// create it if it doesn't exist, and open the file read-only.
|
|
||||||
fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDONLY, os.FileMode(0600))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// set the filehandle on the struct
|
|
||||||
f.fh = fh
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensure the file handle is closed if no lock is held
|
|
||||||
func (f *Flock) ensureFhState() {
|
|
||||||
if !f.l && !f.r && f.fh != nil {
|
|
||||||
f.fh.Close()
|
|
||||||
f.fh = nil
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,197 +0,0 @@
|
||||||
// Copyright 2015 Tim Heckman. All rights reserved.
|
|
||||||
// Use of this source code is governed by the BSD 3-Clause
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build !windows
|
|
||||||
|
|
||||||
package flock
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Lock is a blocking call to try and take an exclusive file lock. It will wait
|
|
||||||
// until it is able to obtain the exclusive file lock. It's recommended that
|
|
||||||
// TryLock() be used over this function. This function may block the ability to
|
|
||||||
// query the current Locked() or RLocked() status due to a RW-mutex lock.
|
|
||||||
//
|
|
||||||
// If we are already exclusive-locked, this function short-circuits and returns
|
|
||||||
// immediately assuming it can take the mutex lock.
|
|
||||||
//
|
|
||||||
// If the *Flock has a shared lock (RLock), this may transparently replace the
|
|
||||||
// shared lock with an exclusive lock on some UNIX-like operating systems. Be
|
|
||||||
// careful when using exclusive locks in conjunction with shared locks
|
|
||||||
// (RLock()), because calling Unlock() may accidentally release the exclusive
|
|
||||||
// lock that was once a shared lock.
|
|
||||||
func (f *Flock) Lock() error {
|
|
||||||
return f.lock(&f.l, syscall.LOCK_EX)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RLock is a blocking call to try and take a shared file lock. It will wait
|
|
||||||
// until it is able to obtain the shared file lock. It's recommended that
|
|
||||||
// TryRLock() be used over this function. This function may block the ability to
|
|
||||||
// query the current Locked() or RLocked() status due to a RW-mutex lock.
|
|
||||||
//
|
|
||||||
// If we are already shared-locked, this function short-circuits and returns
|
|
||||||
// immediately assuming it can take the mutex lock.
|
|
||||||
func (f *Flock) RLock() error {
|
|
||||||
return f.lock(&f.r, syscall.LOCK_SH)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Flock) lock(locked *bool, flag int) error {
|
|
||||||
f.m.Lock()
|
|
||||||
defer f.m.Unlock()
|
|
||||||
|
|
||||||
if *locked {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.fh == nil {
|
|
||||||
if err := f.setFh(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.ensureFhState()
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := syscall.Flock(int(f.fh.Fd()), flag); err != nil {
|
|
||||||
shouldRetry, reopenErr := f.reopenFDOnError(err)
|
|
||||||
if reopenErr != nil {
|
|
||||||
return reopenErr
|
|
||||||
}
|
|
||||||
|
|
||||||
if !shouldRetry {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = syscall.Flock(int(f.fh.Fd()), flag); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
*locked = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so
|
|
||||||
// while it is running the Locked() and RLocked() functions will be blocked.
|
|
||||||
//
|
|
||||||
// This function short-circuits if we are unlocked already. If not, it calls
|
|
||||||
// syscall.LOCK_UN on the file and closes the file descriptor. It does not
|
|
||||||
// remove the file from disk. It's up to your application to do.
|
|
||||||
//
|
|
||||||
// Please note, if your shared lock became an exclusive lock this may
|
|
||||||
// unintentionally drop the exclusive lock if called by the consumer that
|
|
||||||
// believes they have a shared lock. Please see Lock() for more details.
|
|
||||||
func (f *Flock) Unlock() error {
|
|
||||||
f.m.Lock()
|
|
||||||
defer f.m.Unlock()
|
|
||||||
|
|
||||||
// if we aren't locked or if the lockfile instance is nil
|
|
||||||
// just return a nil error because we are unlocked
|
|
||||||
if (!f.l && !f.r) || f.fh == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// mark the file as unlocked
|
|
||||||
if err := syscall.Flock(int(f.fh.Fd()), syscall.LOCK_UN); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
f.fh.Close()
|
|
||||||
|
|
||||||
f.l = false
|
|
||||||
f.r = false
|
|
||||||
f.fh = nil
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TryLock is the preferred function for taking an exclusive file lock. This
|
|
||||||
// function takes an RW-mutex lock before it tries to lock the file, so there is
|
|
||||||
// the possibility that this function may block for a short time if another
|
|
||||||
// goroutine is trying to take any action.
|
|
||||||
//
|
|
||||||
// The actual file lock is non-blocking. If we are unable to get the exclusive
|
|
||||||
// file lock, the function will return false instead of waiting for the lock. If
|
|
||||||
// we get the lock, we also set the *Flock instance as being exclusive-locked.
|
|
||||||
func (f *Flock) TryLock() (bool, error) {
|
|
||||||
return f.try(&f.l, syscall.LOCK_EX)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TryRLock is the preferred function for taking a shared file lock. This
|
|
||||||
// function takes an RW-mutex lock before it tries to lock the file, so there is
|
|
||||||
// the possibility that this function may block for a short time if another
|
|
||||||
// goroutine is trying to take any action.
|
|
||||||
//
|
|
||||||
// The actual file lock is non-blocking. If we are unable to get the shared file
|
|
||||||
// lock, the function will return false instead of waiting for the lock. If we
|
|
||||||
// get the lock, we also set the *Flock instance as being share-locked.
|
|
||||||
func (f *Flock) TryRLock() (bool, error) {
|
|
||||||
return f.try(&f.r, syscall.LOCK_SH)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Flock) try(locked *bool, flag int) (bool, error) {
|
|
||||||
f.m.Lock()
|
|
||||||
defer f.m.Unlock()
|
|
||||||
|
|
||||||
if *locked {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.fh == nil {
|
|
||||||
if err := f.setFh(); err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
defer f.ensureFhState()
|
|
||||||
}
|
|
||||||
|
|
||||||
var retried bool
|
|
||||||
retry:
|
|
||||||
err := syscall.Flock(int(f.fh.Fd()), flag|syscall.LOCK_NB)
|
|
||||||
|
|
||||||
switch err {
|
|
||||||
case syscall.EWOULDBLOCK:
|
|
||||||
return false, nil
|
|
||||||
case nil:
|
|
||||||
*locked = true
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
if !retried {
|
|
||||||
if shouldRetry, reopenErr := f.reopenFDOnError(err); reopenErr != nil {
|
|
||||||
return false, reopenErr
|
|
||||||
} else if shouldRetry {
|
|
||||||
retried = true
|
|
||||||
goto retry
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// reopenFDOnError determines whether we should reopen the file handle
|
|
||||||
// in readwrite mode and try again. This comes from util-linux/sys-utils/flock.c:
|
|
||||||
// Since Linux 3.4 (commit 55725513)
|
|
||||||
// Probably NFSv4 where flock() is emulated by fcntl().
|
|
||||||
func (f *Flock) reopenFDOnError(err error) (bool, error) {
|
|
||||||
if err != syscall.EIO && err != syscall.EBADF {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
if st, err := f.fh.Stat(); err == nil {
|
|
||||||
// if the file is able to be read and written
|
|
||||||
if st.Mode()&0600 == 0600 {
|
|
||||||
f.fh.Close()
|
|
||||||
f.fh = nil
|
|
||||||
|
|
||||||
// reopen in read-write mode and set the filehandle
|
|
||||||
fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDWR, os.FileMode(0600))
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
f.fh = fh
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, nil
|
|
||||||
}
|
|
|
@ -1,76 +0,0 @@
|
||||||
// Copyright 2015 Tim Heckman. All rights reserved.
|
|
||||||
// Use of this source code is governed by the BSD 3-Clause
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package flock
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
kernel32, _ = syscall.LoadLibrary("kernel32.dll")
|
|
||||||
procLockFileEx, _ = syscall.GetProcAddress(kernel32, "LockFileEx")
|
|
||||||
procUnlockFileEx, _ = syscall.GetProcAddress(kernel32, "UnlockFileEx")
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
winLockfileFailImmediately = 0x00000001
|
|
||||||
winLockfileExclusiveLock = 0x00000002
|
|
||||||
winLockfileSharedLock = 0x00000000
|
|
||||||
)
|
|
||||||
|
|
||||||
// Use of 0x00000000 for the shared lock is a guess based on some the MS Windows
|
|
||||||
// `LockFileEX` docs, which document the `LOCKFILE_EXCLUSIVE_LOCK` flag as:
|
|
||||||
//
|
|
||||||
// > The function requests an exclusive lock. Otherwise, it requests a shared
|
|
||||||
// > lock.
|
|
||||||
//
|
|
||||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
|
|
||||||
|
|
||||||
func lockFileEx(handle syscall.Handle, flags uint32, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) {
|
|
||||||
r1, _, errNo := syscall.Syscall6(
|
|
||||||
uintptr(procLockFileEx),
|
|
||||||
6,
|
|
||||||
uintptr(handle),
|
|
||||||
uintptr(flags),
|
|
||||||
uintptr(reserved),
|
|
||||||
uintptr(numberOfBytesToLockLow),
|
|
||||||
uintptr(numberOfBytesToLockHigh),
|
|
||||||
uintptr(unsafe.Pointer(offset)))
|
|
||||||
|
|
||||||
if r1 != 1 {
|
|
||||||
if errNo == 0 {
|
|
||||||
return false, syscall.EINVAL
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, errNo
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func unlockFileEx(handle syscall.Handle, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) {
|
|
||||||
r1, _, errNo := syscall.Syscall6(
|
|
||||||
uintptr(procUnlockFileEx),
|
|
||||||
5,
|
|
||||||
uintptr(handle),
|
|
||||||
uintptr(reserved),
|
|
||||||
uintptr(numberOfBytesToLockLow),
|
|
||||||
uintptr(numberOfBytesToLockHigh),
|
|
||||||
uintptr(unsafe.Pointer(offset)),
|
|
||||||
0)
|
|
||||||
|
|
||||||
if r1 != 1 {
|
|
||||||
if errNo == 0 {
|
|
||||||
return false, syscall.EINVAL
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, errNo
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, 0
|
|
||||||
}
|
|
|
@ -1,142 +0,0 @@
|
||||||
// Copyright 2015 Tim Heckman. All rights reserved.
|
|
||||||
// Use of this source code is governed by the BSD 3-Clause
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package flock
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrorLockViolation is the error code returned from the Windows syscall when a
|
|
||||||
// lock would block and you ask to fail immediately.
|
|
||||||
const ErrorLockViolation syscall.Errno = 0x21 // 33
|
|
||||||
|
|
||||||
// Lock is a blocking call to try and take an exclusive file lock. It will wait
|
|
||||||
// until it is able to obtain the exclusive file lock. It's recommended that
|
|
||||||
// TryLock() be used over this function. This function may block the ability to
|
|
||||||
// query the current Locked() or RLocked() status due to a RW-mutex lock.
|
|
||||||
//
|
|
||||||
// If we are already locked, this function short-circuits and returns
|
|
||||||
// immediately assuming it can take the mutex lock.
|
|
||||||
func (f *Flock) Lock() error {
|
|
||||||
return f.lock(&f.l, winLockfileExclusiveLock)
|
|
||||||
}
|
|
||||||
|
|
||||||
// RLock is a blocking call to try and take a shared file lock. It will wait
|
|
||||||
// until it is able to obtain the shared file lock. It's recommended that
|
|
||||||
// TryRLock() be used over this function. This function may block the ability to
|
|
||||||
// query the current Locked() or RLocked() status due to a RW-mutex lock.
|
|
||||||
//
|
|
||||||
// If we are already locked, this function short-circuits and returns
|
|
||||||
// immediately assuming it can take the mutex lock.
|
|
||||||
func (f *Flock) RLock() error {
|
|
||||||
return f.lock(&f.r, winLockfileSharedLock)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Flock) lock(locked *bool, flag uint32) error {
|
|
||||||
f.m.Lock()
|
|
||||||
defer f.m.Unlock()
|
|
||||||
|
|
||||||
if *locked {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.fh == nil {
|
|
||||||
if err := f.setFh(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer f.ensureFhState()
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}); errNo > 0 {
|
|
||||||
return errNo
|
|
||||||
}
|
|
||||||
|
|
||||||
*locked = true
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so
|
|
||||||
// while it is running the Locked() and RLocked() functions will be blocked.
|
|
||||||
//
|
|
||||||
// This function short-circuits if we are unlocked already. If not, it calls
|
|
||||||
// UnlockFileEx() on the file and closes the file descriptor. It does not remove
|
|
||||||
// the file from disk. It's up to your application to do.
|
|
||||||
func (f *Flock) Unlock() error {
|
|
||||||
f.m.Lock()
|
|
||||||
defer f.m.Unlock()
|
|
||||||
|
|
||||||
// if we aren't locked or if the lockfile instance is nil
|
|
||||||
// just return a nil error because we are unlocked
|
|
||||||
if (!f.l && !f.r) || f.fh == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// mark the file as unlocked
|
|
||||||
if _, errNo := unlockFileEx(syscall.Handle(f.fh.Fd()), 0, 1, 0, &syscall.Overlapped{}); errNo > 0 {
|
|
||||||
return errNo
|
|
||||||
}
|
|
||||||
|
|
||||||
f.fh.Close()
|
|
||||||
|
|
||||||
f.l = false
|
|
||||||
f.r = false
|
|
||||||
f.fh = nil
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TryLock is the preferred function for taking an exclusive file lock. This
|
|
||||||
// function does take a RW-mutex lock before it tries to lock the file, so there
|
|
||||||
// is the possibility that this function may block for a short time if another
|
|
||||||
// goroutine is trying to take any action.
|
|
||||||
//
|
|
||||||
// The actual file lock is non-blocking. If we are unable to get the exclusive
|
|
||||||
// file lock, the function will return false instead of waiting for the lock. If
|
|
||||||
// we get the lock, we also set the *Flock instance as being exclusive-locked.
|
|
||||||
func (f *Flock) TryLock() (bool, error) {
|
|
||||||
return f.try(&f.l, winLockfileExclusiveLock)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TryRLock is the preferred function for taking a shared file lock. This
|
|
||||||
// function does take a RW-mutex lock before it tries to lock the file, so there
|
|
||||||
// is the possibility that this function may block for a short time if another
|
|
||||||
// goroutine is trying to take any action.
|
|
||||||
//
|
|
||||||
// The actual file lock is non-blocking. If we are unable to get the shared file
|
|
||||||
// lock, the function will return false instead of waiting for the lock. If we
|
|
||||||
// get the lock, we also set the *Flock instance as being shared-locked.
|
|
||||||
func (f *Flock) TryRLock() (bool, error) {
|
|
||||||
return f.try(&f.r, winLockfileSharedLock)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *Flock) try(locked *bool, flag uint32) (bool, error) {
|
|
||||||
f.m.Lock()
|
|
||||||
defer f.m.Unlock()
|
|
||||||
|
|
||||||
if *locked {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.fh == nil {
|
|
||||||
if err := f.setFh(); err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
defer f.ensureFhState()
|
|
||||||
}
|
|
||||||
|
|
||||||
_, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag|winLockfileFailImmediately, 0, 1, 0, &syscall.Overlapped{})
|
|
||||||
|
|
||||||
if errNo > 0 {
|
|
||||||
if errNo == ErrorLockViolation || errNo == syscall.ERROR_IO_PENDING {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return false, errNo
|
|
||||||
}
|
|
||||||
|
|
||||||
*locked = true
|
|
||||||
|
|
||||||
return true, nil
|
|
||||||
}
|
|
|
@ -1,203 +0,0 @@
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright 2015, Google Inc
|
|
||||||
Copyright 2018, GoGo Authors
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
|
|
|
@ -1,257 +0,0 @@
|
||||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
|
||||||
// source: google/rpc/code.proto
|
|
||||||
|
|
||||||
package rpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
fmt "fmt"
|
|
||||||
proto "github.com/gogo/protobuf/proto"
|
|
||||||
math "math"
|
|
||||||
strconv "strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
|
||||||
var _ = proto.Marshal
|
|
||||||
var _ = fmt.Errorf
|
|
||||||
var _ = math.Inf
|
|
||||||
|
|
||||||
// This is a compile-time assertion to ensure that this generated file
|
|
||||||
// is compatible with the proto package it is being compiled against.
|
|
||||||
// A compilation error at this line likely means your copy of the
|
|
||||||
// proto package needs to be updated.
|
|
||||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
|
||||||
|
|
||||||
// The canonical error codes for Google APIs.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Sometimes multiple error codes may apply. Services should return
|
|
||||||
// the most specific error code that applies. For example, prefer
|
|
||||||
// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply.
|
|
||||||
// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`.
|
|
||||||
type Code int32
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Not an error; returned on success
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 200 OK
|
|
||||||
OK Code = 0
|
|
||||||
// The operation was cancelled, typically by the caller.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 499 Client Closed Request
|
|
||||||
CANCELLED Code = 1
|
|
||||||
// Unknown error. For example, this error may be returned when
|
|
||||||
// a `Status` value received from another address space belongs to
|
|
||||||
// an error space that is not known in this address space. Also
|
|
||||||
// errors raised by APIs that do not return enough error information
|
|
||||||
// may be converted to this error.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 500 Internal Server Error
|
|
||||||
UNKNOWN Code = 2
|
|
||||||
// The client specified an invalid argument. Note that this differs
|
|
||||||
// from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments
|
|
||||||
// that are problematic regardless of the state of the system
|
|
||||||
// (e.g., a malformed file name).
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 400 Bad Request
|
|
||||||
INVALID_ARGUMENT Code = 3
|
|
||||||
// The deadline expired before the operation could complete. For operations
|
|
||||||
// that change the state of the system, this error may be returned
|
|
||||||
// even if the operation has completed successfully. For example, a
|
|
||||||
// successful response from a server could have been delayed long
|
|
||||||
// enough for the deadline to expire.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 504 Gateway Timeout
|
|
||||||
DEADLINE_EXCEEDED Code = 4
|
|
||||||
// Some requested entity (e.g., file or directory) was not found.
|
|
||||||
//
|
|
||||||
// Note to server developers: if a request is denied for an entire class
|
|
||||||
// of users, such as gradual feature rollout or undocumented whitelist,
|
|
||||||
// `NOT_FOUND` may be used. If a request is denied for some users within
|
|
||||||
// a class of users, such as user-based access control, `PERMISSION_DENIED`
|
|
||||||
// must be used.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 404 Not Found
|
|
||||||
NOT_FOUND Code = 5
|
|
||||||
// The entity that a client attempted to create (e.g., file or directory)
|
|
||||||
// already exists.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 409 Conflict
|
|
||||||
ALREADY_EXISTS Code = 6
|
|
||||||
// The caller does not have permission to execute the specified
|
|
||||||
// operation. `PERMISSION_DENIED` must not be used for rejections
|
|
||||||
// caused by exhausting some resource (use `RESOURCE_EXHAUSTED`
|
|
||||||
// instead for those errors). `PERMISSION_DENIED` must not be
|
|
||||||
// used if the caller can not be identified (use `UNAUTHENTICATED`
|
|
||||||
// instead for those errors). This error code does not imply the
|
|
||||||
// request is valid or the requested entity exists or satisfies
|
|
||||||
// other pre-conditions.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 403 Forbidden
|
|
||||||
PERMISSION_DENIED Code = 7
|
|
||||||
// The request does not have valid authentication credentials for the
|
|
||||||
// operation.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 401 Unauthorized
|
|
||||||
UNAUTHENTICATED Code = 16
|
|
||||||
// Some resource has been exhausted, perhaps a per-user quota, or
|
|
||||||
// perhaps the entire file system is out of space.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 429 Too Many Requests
|
|
||||||
RESOURCE_EXHAUSTED Code = 8
|
|
||||||
// The operation was rejected because the system is not in a state
|
|
||||||
// required for the operation's execution. For example, the directory
|
|
||||||
// to be deleted is non-empty, an rmdir operation is applied to
|
|
||||||
// a non-directory, etc.
|
|
||||||
//
|
|
||||||
// Service implementors can use the following guidelines to decide
|
|
||||||
// between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`:
|
|
||||||
// (a) Use `UNAVAILABLE` if the client can retry just the failing call.
|
|
||||||
// (b) Use `ABORTED` if the client should retry at a higher level
|
|
||||||
// (e.g., when a client-specified test-and-set fails, indicating the
|
|
||||||
// client should restart a read-modify-write sequence).
|
|
||||||
// (c) Use `FAILED_PRECONDITION` if the client should not retry until
|
|
||||||
// the system state has been explicitly fixed. E.g., if an "rmdir"
|
|
||||||
// fails because the directory is non-empty, `FAILED_PRECONDITION`
|
|
||||||
// should be returned since the client should not retry unless
|
|
||||||
// the files are deleted from the directory.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 400 Bad Request
|
|
||||||
FAILED_PRECONDITION Code = 9
|
|
||||||
// The operation was aborted, typically due to a concurrency issue such as
|
|
||||||
// a sequencer check failure or transaction abort.
|
|
||||||
//
|
|
||||||
// See the guidelines above for deciding between `FAILED_PRECONDITION`,
|
|
||||||
// `ABORTED`, and `UNAVAILABLE`.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 409 Conflict
|
|
||||||
ABORTED Code = 10
|
|
||||||
// The operation was attempted past the valid range. E.g., seeking or
|
|
||||||
// reading past end-of-file.
|
|
||||||
//
|
|
||||||
// Unlike `INVALID_ARGUMENT`, this error indicates a problem that may
|
|
||||||
// be fixed if the system state changes. For example, a 32-bit file
|
|
||||||
// system will generate `INVALID_ARGUMENT` if asked to read at an
|
|
||||||
// offset that is not in the range [0,2^32-1], but it will generate
|
|
||||||
// `OUT_OF_RANGE` if asked to read from an offset past the current
|
|
||||||
// file size.
|
|
||||||
//
|
|
||||||
// There is a fair bit of overlap between `FAILED_PRECONDITION` and
|
|
||||||
// `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific
|
|
||||||
// error) when it applies so that callers who are iterating through
|
|
||||||
// a space can easily look for an `OUT_OF_RANGE` error to detect when
|
|
||||||
// they are done.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 400 Bad Request
|
|
||||||
OUT_OF_RANGE Code = 11
|
|
||||||
// The operation is not implemented or is not supported/enabled in this
|
|
||||||
// service.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 501 Not Implemented
|
|
||||||
UNIMPLEMENTED Code = 12
|
|
||||||
// Internal errors. This means that some invariants expected by the
|
|
||||||
// underlying system have been broken. This error code is reserved
|
|
||||||
// for serious errors.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 500 Internal Server Error
|
|
||||||
INTERNAL Code = 13
|
|
||||||
// The service is currently unavailable. This is most likely a
|
|
||||||
// transient condition, which can be corrected by retrying with
|
|
||||||
// a backoff.
|
|
||||||
//
|
|
||||||
// See the guidelines above for deciding between `FAILED_PRECONDITION`,
|
|
||||||
// `ABORTED`, and `UNAVAILABLE`.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 503 Service Unavailable
|
|
||||||
UNAVAILABLE Code = 14
|
|
||||||
// Unrecoverable data loss or corruption.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 500 Internal Server Error
|
|
||||||
DATA_LOSS Code = 15
|
|
||||||
)
|
|
||||||
|
|
||||||
var Code_name = map[int32]string{
|
|
||||||
0: "OK",
|
|
||||||
1: "CANCELLED",
|
|
||||||
2: "UNKNOWN",
|
|
||||||
3: "INVALID_ARGUMENT",
|
|
||||||
4: "DEADLINE_EXCEEDED",
|
|
||||||
5: "NOT_FOUND",
|
|
||||||
6: "ALREADY_EXISTS",
|
|
||||||
7: "PERMISSION_DENIED",
|
|
||||||
16: "UNAUTHENTICATED",
|
|
||||||
8: "RESOURCE_EXHAUSTED",
|
|
||||||
9: "FAILED_PRECONDITION",
|
|
||||||
10: "ABORTED",
|
|
||||||
11: "OUT_OF_RANGE",
|
|
||||||
12: "UNIMPLEMENTED",
|
|
||||||
13: "INTERNAL",
|
|
||||||
14: "UNAVAILABLE",
|
|
||||||
15: "DATA_LOSS",
|
|
||||||
}
|
|
||||||
|
|
||||||
var Code_value = map[string]int32{
|
|
||||||
"OK": 0,
|
|
||||||
"CANCELLED": 1,
|
|
||||||
"UNKNOWN": 2,
|
|
||||||
"INVALID_ARGUMENT": 3,
|
|
||||||
"DEADLINE_EXCEEDED": 4,
|
|
||||||
"NOT_FOUND": 5,
|
|
||||||
"ALREADY_EXISTS": 6,
|
|
||||||
"PERMISSION_DENIED": 7,
|
|
||||||
"UNAUTHENTICATED": 16,
|
|
||||||
"RESOURCE_EXHAUSTED": 8,
|
|
||||||
"FAILED_PRECONDITION": 9,
|
|
||||||
"ABORTED": 10,
|
|
||||||
"OUT_OF_RANGE": 11,
|
|
||||||
"UNIMPLEMENTED": 12,
|
|
||||||
"INTERNAL": 13,
|
|
||||||
"UNAVAILABLE": 14,
|
|
||||||
"DATA_LOSS": 15,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (Code) EnumDescriptor() ([]byte, []int) {
|
|
||||||
return fileDescriptor_fe593a732623ccf0, []int{0}
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
proto.RegisterEnum("google.rpc.Code", Code_name, Code_value)
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() { proto.RegisterFile("google/rpc/code.proto", fileDescriptor_fe593a732623ccf0) }
|
|
||||||
|
|
||||||
var fileDescriptor_fe593a732623ccf0 = []byte{
|
|
||||||
// 393 bytes of a gzipped FileDescriptorProto
|
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x91, 0x3d, 0x6e, 0x13, 0x41,
|
|
||||||
0x14, 0xc7, 0x3d, 0x76, 0x70, 0xe2, 0xf1, 0xd7, 0xcb, 0x84, 0x40, 0x37, 0x07, 0xa0, 0x70, 0x0a,
|
|
||||||
0x4e, 0xf0, 0xbc, 0xf3, 0x9c, 0x8c, 0x32, 0x7e, 0xb3, 0x9a, 0x9d, 0x09, 0x01, 0x21, 0xad, 0xc4,
|
|
||||||
0xc6, 0x4a, 0x03, 0x5a, 0xcb, 0xe2, 0x00, 0x9c, 0x85, 0x8a, 0x1b, 0x70, 0x85, 0x94, 0x29, 0x29,
|
|
||||||
0xf1, 0xa6, 0xa1, 0x74, 0x49, 0x89, 0x06, 0x0a, 0xda, 0x9f, 0xde, 0xc7, 0xff, 0x43, 0x9e, 0xdf,
|
|
||||||
0xb7, 0xed, 0xfd, 0xc7, 0xcd, 0xc5, 0x6e, 0xdb, 0x5c, 0x34, 0xed, 0xdd, 0x66, 0xb1, 0xdd, 0xb5,
|
|
||||||
0x9f, 0x5b, 0x25, 0xff, 0xe1, 0xc5, 0x6e, 0xdb, 0xbc, 0xfa, 0xde, 0x97, 0x47, 0x45, 0x7b, 0xb7,
|
|
||||||
0x51, 0x43, 0xd9, 0xf7, 0xd7, 0xd0, 0x53, 0x53, 0x39, 0x2a, 0x90, 0x0b, 0x72, 0x8e, 0x0c, 0x08,
|
|
||||||
0x35, 0x96, 0xc7, 0x89, 0xaf, 0xd9, 0xbf, 0x61, 0xe8, 0xab, 0xe7, 0x12, 0x2c, 0xdf, 0xa0, 0xb3,
|
|
||||||
0xa6, 0xc6, 0x70, 0x99, 0xd6, 0xc4, 0x11, 0x06, 0xea, 0x5c, 0x9e, 0x1a, 0x42, 0xe3, 0x2c, 0x53,
|
|
||||||
0x4d, 0xb7, 0x05, 0x91, 0x21, 0x03, 0x47, 0xf9, 0x10, 0xfb, 0x58, 0xaf, 0x7c, 0x62, 0x03, 0xcf,
|
|
||||||
0x94, 0x92, 0x33, 0x74, 0x81, 0xd0, 0xbc, 0xad, 0xe9, 0xd6, 0x56, 0xb1, 0x82, 0x61, 0xde, 0x2c,
|
|
||||||
0x29, 0xac, 0x6d, 0x55, 0x59, 0xcf, 0xb5, 0x21, 0xb6, 0x64, 0xe0, 0x58, 0x9d, 0xc9, 0x79, 0x62,
|
|
||||||
0x4c, 0xf1, 0x8a, 0x38, 0xda, 0x02, 0x23, 0x19, 0x00, 0xf5, 0x42, 0xaa, 0x40, 0x95, 0x4f, 0xa1,
|
|
||||||
0xc8, 0x5f, 0xae, 0x30, 0x55, 0x99, 0x9f, 0xa8, 0x97, 0xf2, 0x6c, 0x85, 0xd6, 0x91, 0xa9, 0xcb,
|
|
||||||
0x40, 0x85, 0x67, 0x63, 0xa3, 0xf5, 0x0c, 0xa3, 0xac, 0x1c, 0x97, 0x3e, 0xe4, 0x29, 0xa9, 0x40,
|
|
||||||
0x4e, 0x7c, 0x8a, 0xb5, 0x5f, 0xd5, 0x01, 0xf9, 0x92, 0x60, 0xac, 0x4e, 0xe5, 0x34, 0xb1, 0x5d,
|
|
||||||
0x97, 0x8e, 0xb2, 0x0d, 0x32, 0x30, 0x51, 0x13, 0x79, 0x62, 0x39, 0x52, 0x60, 0x74, 0x30, 0x55,
|
|
||||||
0x73, 0x39, 0x4e, 0x8c, 0x37, 0x68, 0x1d, 0x2e, 0x1d, 0xc1, 0x2c, 0x1b, 0x32, 0x18, 0xb1, 0x76,
|
|
||||||
0xbe, 0xaa, 0x60, 0xbe, 0x7c, 0xff, 0xb8, 0xd7, 0xbd, 0x1f, 0x7b, 0xdd, 0x3b, 0xec, 0xb5, 0xf8,
|
|
||||||
0xbd, 0xd7, 0xe2, 0x4b, 0xa7, 0xc5, 0xb7, 0x4e, 0x8b, 0x87, 0x4e, 0x8b, 0xc7, 0x4e, 0x8b, 0x9f,
|
|
||||||
0x9d, 0x16, 0xbf, 0x3a, 0xdd, 0x3b, 0x64, 0xfe, 0xa4, 0xc5, 0xc3, 0x93, 0x16, 0x72, 0xd6, 0xb4,
|
|
||||||
0x9f, 0x16, 0xff, 0xf3, 0x5f, 0x8e, 0x72, 0xf8, 0x65, 0xae, 0xa5, 0x14, 0xef, 0x06, 0xbb, 0x6d,
|
|
||||||
0xf3, 0xb5, 0x3f, 0x08, 0x65, 0xf1, 0x61, 0xf8, 0xb7, 0xaa, 0xd7, 0x7f, 0x02, 0x00, 0x00, 0xff,
|
|
||||||
0xff, 0x03, 0xd4, 0x27, 0xff, 0xc3, 0x01, 0x00, 0x00,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x Code) String() string {
|
|
||||||
s, ok := Code_name[int32(x)]
|
|
||||||
if ok {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return strconv.Itoa(int(x))
|
|
||||||
}
|
|
|
@ -1,185 +0,0 @@
|
||||||
// Copyright 2017 Google Inc.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package google.rpc;
|
|
||||||
|
|
||||||
option go_package = "rpc";
|
|
||||||
option java_multiple_files = true;
|
|
||||||
option java_outer_classname = "CodeProto";
|
|
||||||
option java_package = "com.google.rpc";
|
|
||||||
option objc_class_prefix = "RPC";
|
|
||||||
|
|
||||||
// The canonical error codes for Google APIs.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Sometimes multiple error codes may apply. Services should return
|
|
||||||
// the most specific error code that applies. For example, prefer
|
|
||||||
// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply.
|
|
||||||
// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`.
|
|
||||||
enum Code {
|
|
||||||
// Not an error; returned on success
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 200 OK
|
|
||||||
OK = 0;
|
|
||||||
|
|
||||||
// The operation was cancelled, typically by the caller.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 499 Client Closed Request
|
|
||||||
CANCELLED = 1;
|
|
||||||
|
|
||||||
// Unknown error. For example, this error may be returned when
|
|
||||||
// a `Status` value received from another address space belongs to
|
|
||||||
// an error space that is not known in this address space. Also
|
|
||||||
// errors raised by APIs that do not return enough error information
|
|
||||||
// may be converted to this error.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 500 Internal Server Error
|
|
||||||
UNKNOWN = 2;
|
|
||||||
|
|
||||||
// The client specified an invalid argument. Note that this differs
|
|
||||||
// from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments
|
|
||||||
// that are problematic regardless of the state of the system
|
|
||||||
// (e.g., a malformed file name).
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 400 Bad Request
|
|
||||||
INVALID_ARGUMENT = 3;
|
|
||||||
|
|
||||||
// The deadline expired before the operation could complete. For operations
|
|
||||||
// that change the state of the system, this error may be returned
|
|
||||||
// even if the operation has completed successfully. For example, a
|
|
||||||
// successful response from a server could have been delayed long
|
|
||||||
// enough for the deadline to expire.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 504 Gateway Timeout
|
|
||||||
DEADLINE_EXCEEDED = 4;
|
|
||||||
|
|
||||||
// Some requested entity (e.g., file or directory) was not found.
|
|
||||||
//
|
|
||||||
// Note to server developers: if a request is denied for an entire class
|
|
||||||
// of users, such as gradual feature rollout or undocumented whitelist,
|
|
||||||
// `NOT_FOUND` may be used. If a request is denied for some users within
|
|
||||||
// a class of users, such as user-based access control, `PERMISSION_DENIED`
|
|
||||||
// must be used.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 404 Not Found
|
|
||||||
NOT_FOUND = 5;
|
|
||||||
|
|
||||||
// The entity that a client attempted to create (e.g., file or directory)
|
|
||||||
// already exists.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 409 Conflict
|
|
||||||
ALREADY_EXISTS = 6;
|
|
||||||
|
|
||||||
// The caller does not have permission to execute the specified
|
|
||||||
// operation. `PERMISSION_DENIED` must not be used for rejections
|
|
||||||
// caused by exhausting some resource (use `RESOURCE_EXHAUSTED`
|
|
||||||
// instead for those errors). `PERMISSION_DENIED` must not be
|
|
||||||
// used if the caller can not be identified (use `UNAUTHENTICATED`
|
|
||||||
// instead for those errors). This error code does not imply the
|
|
||||||
// request is valid or the requested entity exists or satisfies
|
|
||||||
// other pre-conditions.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 403 Forbidden
|
|
||||||
PERMISSION_DENIED = 7;
|
|
||||||
|
|
||||||
// The request does not have valid authentication credentials for the
|
|
||||||
// operation.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 401 Unauthorized
|
|
||||||
UNAUTHENTICATED = 16;
|
|
||||||
|
|
||||||
// Some resource has been exhausted, perhaps a per-user quota, or
|
|
||||||
// perhaps the entire file system is out of space.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 429 Too Many Requests
|
|
||||||
RESOURCE_EXHAUSTED = 8;
|
|
||||||
|
|
||||||
// The operation was rejected because the system is not in a state
|
|
||||||
// required for the operation's execution. For example, the directory
|
|
||||||
// to be deleted is non-empty, an rmdir operation is applied to
|
|
||||||
// a non-directory, etc.
|
|
||||||
//
|
|
||||||
// Service implementors can use the following guidelines to decide
|
|
||||||
// between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`:
|
|
||||||
// (a) Use `UNAVAILABLE` if the client can retry just the failing call.
|
|
||||||
// (b) Use `ABORTED` if the client should retry at a higher level
|
|
||||||
// (e.g., when a client-specified test-and-set fails, indicating the
|
|
||||||
// client should restart a read-modify-write sequence).
|
|
||||||
// (c) Use `FAILED_PRECONDITION` if the client should not retry until
|
|
||||||
// the system state has been explicitly fixed. E.g., if an "rmdir"
|
|
||||||
// fails because the directory is non-empty, `FAILED_PRECONDITION`
|
|
||||||
// should be returned since the client should not retry unless
|
|
||||||
// the files are deleted from the directory.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 400 Bad Request
|
|
||||||
FAILED_PRECONDITION = 9;
|
|
||||||
|
|
||||||
// The operation was aborted, typically due to a concurrency issue such as
|
|
||||||
// a sequencer check failure or transaction abort.
|
|
||||||
//
|
|
||||||
// See the guidelines above for deciding between `FAILED_PRECONDITION`,
|
|
||||||
// `ABORTED`, and `UNAVAILABLE`.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 409 Conflict
|
|
||||||
ABORTED = 10;
|
|
||||||
|
|
||||||
// The operation was attempted past the valid range. E.g., seeking or
|
|
||||||
// reading past end-of-file.
|
|
||||||
//
|
|
||||||
// Unlike `INVALID_ARGUMENT`, this error indicates a problem that may
|
|
||||||
// be fixed if the system state changes. For example, a 32-bit file
|
|
||||||
// system will generate `INVALID_ARGUMENT` if asked to read at an
|
|
||||||
// offset that is not in the range [0,2^32-1], but it will generate
|
|
||||||
// `OUT_OF_RANGE` if asked to read from an offset past the current
|
|
||||||
// file size.
|
|
||||||
//
|
|
||||||
// There is a fair bit of overlap between `FAILED_PRECONDITION` and
|
|
||||||
// `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific
|
|
||||||
// error) when it applies so that callers who are iterating through
|
|
||||||
// a space can easily look for an `OUT_OF_RANGE` error to detect when
|
|
||||||
// they are done.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 400 Bad Request
|
|
||||||
OUT_OF_RANGE = 11;
|
|
||||||
|
|
||||||
// The operation is not implemented or is not supported/enabled in this
|
|
||||||
// service.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 501 Not Implemented
|
|
||||||
UNIMPLEMENTED = 12;
|
|
||||||
|
|
||||||
// Internal errors. This means that some invariants expected by the
|
|
||||||
// underlying system have been broken. This error code is reserved
|
|
||||||
// for serious errors.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 500 Internal Server Error
|
|
||||||
INTERNAL = 13;
|
|
||||||
|
|
||||||
// The service is currently unavailable. This is most likely a
|
|
||||||
// transient condition, which can be corrected by retrying with
|
|
||||||
// a backoff.
|
|
||||||
//
|
|
||||||
// See the guidelines above for deciding between `FAILED_PRECONDITION`,
|
|
||||||
// `ABORTED`, and `UNAVAILABLE`.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 503 Service Unavailable
|
|
||||||
UNAVAILABLE = 14;
|
|
||||||
|
|
||||||
// Unrecoverable data loss or corruption.
|
|
||||||
//
|
|
||||||
// HTTP Mapping: 500 Internal Server Error
|
|
||||||
DATA_LOSS = 15;
|
|
||||||
}
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,200 +0,0 @@
|
||||||
// Copyright 2017 Google Inc.
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package google.rpc;
|
|
||||||
|
|
||||||
import "google/protobuf/duration.proto";
|
|
||||||
|
|
||||||
option go_package = "rpc";
|
|
||||||
option java_multiple_files = true;
|
|
||||||
option java_outer_classname = "ErrorDetailsProto";
|
|
||||||
option java_package = "com.google.rpc";
|
|
||||||
option objc_class_prefix = "RPC";
|
|
||||||
|
|
||||||
// Describes when the clients can retry a failed request. Clients could ignore
|
|
||||||
// the recommendation here or retry when this information is missing from error
|
|
||||||
// responses.
|
|
||||||
//
|
|
||||||
// It's always recommended that clients should use exponential backoff when
|
|
||||||
// retrying.
|
|
||||||
//
|
|
||||||
// Clients should wait until `retry_delay` amount of time has passed since
|
|
||||||
// receiving the error response before retrying. If retrying requests also
|
|
||||||
// fail, clients should use an exponential backoff scheme to gradually increase
|
|
||||||
// the delay between retries based on `retry_delay`, until either a maximum
|
|
||||||
// number of retires have been reached or a maximum retry delay cap has been
|
|
||||||
// reached.
|
|
||||||
message RetryInfo {
|
|
||||||
// Clients should wait at least this long between retrying the same request.
|
|
||||||
google.protobuf.Duration retry_delay = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes additional debugging info.
|
|
||||||
message DebugInfo {
|
|
||||||
// The stack trace entries indicating where the error occurred.
|
|
||||||
repeated string stack_entries = 1;
|
|
||||||
|
|
||||||
// Additional debugging information provided by the server.
|
|
||||||
string detail = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes how a quota check failed.
|
|
||||||
//
|
|
||||||
// For example if a daily limit was exceeded for the calling project,
|
|
||||||
// a service could respond with a QuotaFailure detail containing the project
|
|
||||||
// id and the description of the quota limit that was exceeded. If the
|
|
||||||
// calling project hasn't enabled the service in the developer console, then
|
|
||||||
// a service could respond with the project id and set `service_disabled`
|
|
||||||
// to true.
|
|
||||||
//
|
|
||||||
// Also see RetryDetail and Help types for other details about handling a
|
|
||||||
// quota failure.
|
|
||||||
message QuotaFailure {
|
|
||||||
// A message type used to describe a single quota violation. For example, a
|
|
||||||
// daily quota or a custom quota that was exceeded.
|
|
||||||
message Violation {
|
|
||||||
// The subject on which the quota check failed.
|
|
||||||
// For example, "clientip:<ip address of client>" or "project:<Google
|
|
||||||
// developer project id>".
|
|
||||||
string subject = 1;
|
|
||||||
|
|
||||||
// A description of how the quota check failed. Clients can use this
|
|
||||||
// description to find more about the quota configuration in the service's
|
|
||||||
// public documentation, or find the relevant quota limit to adjust through
|
|
||||||
// developer console.
|
|
||||||
//
|
|
||||||
// For example: "Service disabled" or "Daily Limit for read operations
|
|
||||||
// exceeded".
|
|
||||||
string description = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes all quota violations.
|
|
||||||
repeated Violation violations = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes what preconditions have failed.
|
|
||||||
//
|
|
||||||
// For example, if an RPC failed because it required the Terms of Service to be
|
|
||||||
// acknowledged, it could list the terms of service violation in the
|
|
||||||
// PreconditionFailure message.
|
|
||||||
message PreconditionFailure {
|
|
||||||
// A message type used to describe a single precondition failure.
|
|
||||||
message Violation {
|
|
||||||
// The type of PreconditionFailure. We recommend using a service-specific
|
|
||||||
// enum type to define the supported precondition violation types. For
|
|
||||||
// example, "TOS" for "Terms of Service violation".
|
|
||||||
string type = 1;
|
|
||||||
|
|
||||||
// The subject, relative to the type, that failed.
|
|
||||||
// For example, "google.com/cloud" relative to the "TOS" type would
|
|
||||||
// indicate which terms of service is being referenced.
|
|
||||||
string subject = 2;
|
|
||||||
|
|
||||||
// A description of how the precondition failed. Developers can use this
|
|
||||||
// description to understand how to fix the failure.
|
|
||||||
//
|
|
||||||
// For example: "Terms of service not accepted".
|
|
||||||
string description = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes all precondition violations.
|
|
||||||
repeated Violation violations = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes violations in a client request. This error type focuses on the
|
|
||||||
// syntactic aspects of the request.
|
|
||||||
message BadRequest {
|
|
||||||
// A message type used to describe a single bad request field.
|
|
||||||
message FieldViolation {
|
|
||||||
// A path leading to a field in the request body. The value will be a
|
|
||||||
// sequence of dot-separated identifiers that identify a protocol buffer
|
|
||||||
// field. E.g., "field_violations.field" would identify this field.
|
|
||||||
string field = 1;
|
|
||||||
|
|
||||||
// A description of why the request element is bad.
|
|
||||||
string description = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes all violations in a client request.
|
|
||||||
repeated FieldViolation field_violations = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains metadata about the request that clients can attach when filing a bug
|
|
||||||
// or providing other forms of feedback.
|
|
||||||
message RequestInfo {
|
|
||||||
// An opaque string that should only be interpreted by the service generating
|
|
||||||
// it. For example, it can be used to identify requests in the service's logs.
|
|
||||||
string request_id = 1;
|
|
||||||
|
|
||||||
// Any data that was used to serve this request. For example, an encrypted
|
|
||||||
// stack trace that can be sent back to the service provider for debugging.
|
|
||||||
string serving_data = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Describes the resource that is being accessed.
|
|
||||||
message ResourceInfo {
|
|
||||||
// A name for the type of resource being accessed, e.g. "sql table",
|
|
||||||
// "cloud storage bucket", "file", "Google calendar"; or the type URL
|
|
||||||
// of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic".
|
|
||||||
string resource_type = 1;
|
|
||||||
|
|
||||||
// The name of the resource being accessed. For example, a shared calendar
|
|
||||||
// name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current
|
|
||||||
// error is
|
|
||||||
// [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED].
|
|
||||||
string resource_name = 2;
|
|
||||||
|
|
||||||
// The owner of the resource (optional).
|
|
||||||
// For example, "user:<owner email>" or "project:<Google developer project
|
|
||||||
// id>".
|
|
||||||
string owner = 3;
|
|
||||||
|
|
||||||
// Describes what error is encountered when accessing this resource.
|
|
||||||
// For example, updating a cloud project may require the `writer` permission
|
|
||||||
// on the developer console project.
|
|
||||||
string description = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Provides links to documentation or for performing an out of band action.
|
|
||||||
//
|
|
||||||
// For example, if a quota check failed with an error indicating the calling
|
|
||||||
// project hasn't enabled the accessed service, this can contain a URL pointing
|
|
||||||
// directly to the right place in the developer console to flip the bit.
|
|
||||||
message Help {
|
|
||||||
// Describes a URL link.
|
|
||||||
message Link {
|
|
||||||
// Describes what the link offers.
|
|
||||||
string description = 1;
|
|
||||||
|
|
||||||
// The URL of the link.
|
|
||||||
string url = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
// URL(s) pointing to additional information on handling the current error.
|
|
||||||
repeated Link links = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Provides a localized error message that is safe to return to the user
|
|
||||||
// which can be attached to an RPC error.
|
|
||||||
message LocalizedMessage {
|
|
||||||
// The locale used following the specification defined at
|
|
||||||
// http://www.rfc-editor.org/rfc/bcp/bcp47.txt.
|
|
||||||
// Examples are: "en-US", "fr-CH", "es-MX"
|
|
||||||
string locale = 1;
|
|
||||||
|
|
||||||
// The localized error message in the above locale.
|
|
||||||
string message = 2;
|
|
||||||
}
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче