Signed-off-by: Yves Brissaud <yves.brissaud@docker.com>
This commit is contained in:
Yves Brissaud 2019-10-30 10:05:32 +01:00
Родитель dc83704d7f
Коммит c9a62caae9
160 изменённых файлов: 3575 добавлений и 16967 удалений

36
Gopkg.lock сгенерированный
Просмотреть файл

@ -80,14 +80,6 @@
pruneopts = "NUT" pruneopts = "NUT"
revision = "3a771d992973f24aa725d07868b467d1ddfceafb" revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
[[projects]]
branch = "master"
digest = "1:fdd135f63857b858859a5a2e361a117622824826a78ce9d8c058144a0f89c7ba"
name = "github.com/containerd/cgroups"
packages = ["."]
pruneopts = "NUT"
revision = "d596c78861b15c27a9ac82975aca4413390b9b49"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:da4daad2ec1737eec4ebeeed7afedb631711f96bbac0c361a17a4d0369d00c6d" digest = "1:da4daad2ec1737eec4ebeeed7afedb631711f96bbac0c361a17a4d0369d00c6d"
@ -97,8 +89,7 @@
revision = "0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f" revision = "0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f"
[[projects]] [[projects]]
branch = "master" digest = "1:a40540af5e9f6f578a0473156f6ddb554aba4e96de5527648cfcc4d074f3120a"
digest = "1:4bda9967609021b43808181e8606bf60e15fd19500f9a0b2bbf74e4c6e4114c5"
name = "github.com/containerd/containerd" name = "github.com/containerd/containerd"
packages = [ packages = [
".", ".",
@ -155,7 +146,8 @@
"version", "version",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "f2b6c31d0fa7f5541f6a37d525ed461b294d785f" revision = "36cf5b690dcc00ff0f34ff7799209050c3d0c59a"
version = "v1.3.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -202,14 +194,6 @@
revision = "d57e8b8d97adfc4a6c224fe116714bf1a1f3beb9" revision = "d57e8b8d97adfc4a6c224fe116714bf1a1f3beb9"
version = "v3.3.12" version = "v3.3.12"
[[projects]]
digest = "1:84c51760e27ff758724218a0b234a8264aa0992f49af4da50bedd682e196f996"
name = "github.com/coreos/go-systemd"
packages = ["dbus"]
pruneopts = "NUT"
revision = "e64a0ec8b42a61e2a9801dc1d0abe539dea79197"
version = "v20"
[[projects]] [[projects]]
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
name = "github.com/davecgh/go-spew" name = "github.com/davecgh/go-spew"
@ -219,7 +203,7 @@
version = "v1.1.1" version = "v1.1.1"
[[projects]] [[projects]]
digest = "1:c0a6ed57acd48a4a3ef5cfe1089fd2278827600415ab05af13444cd710881d6e" digest = "1:e7719c3f4db444af314e43231f9bff4c4c9bcbb327faffe186bf2e026473abd9"
name = "github.com/deislabs/cnab-go" name = "github.com/deislabs/cnab-go"
packages = [ packages = [
"action", "action",
@ -232,8 +216,8 @@
"utils/crud", "utils/crud",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "0ce7659aab8dbd37cc912ea64cb78f403534de42" revision = "a1f17dcdaaff3e06c95ed85c3a34c21c35833831"
version = "v0.3.2-beta1" version = "v0.7.1-beta1"
[[projects]] [[projects]]
digest = "1:fdf76ff539694e8aba8e9c60e7cc1dead88d21c9dc09e5a330104adad0887440" digest = "1:fdf76ff539694e8aba8e9c60e7cc1dead88d21c9dc09e5a330104adad0887440"
@ -494,14 +478,6 @@
pruneopts = "NUT" pruneopts = "NUT"
revision = "eeefdecb41b842af6dc652aaea4026e8403e62df" revision = "eeefdecb41b842af6dc652aaea4026e8403e62df"
[[projects]]
digest = "1:08a552769d7194c0268ba31010f5da91878212603d11b4b551583a7586a76632"
name = "github.com/godbus/dbus"
packages = ["."]
pruneopts = "NUT"
revision = "2ff6f7ffd60f0f2410b3105864bdd12c7894f844"
version = "v5.0.1"
[[projects]] [[projects]]
digest = "1:2dcc32af5bf38c37286c96f0077efa98007f67946b690fce98a3e07b9659bdf1" digest = "1:2dcc32af5bf38c37286c96f0077efa98007f67946b690fce98a3e07b9659bdf1"
name = "github.com/gofrs/flock" name = "github.com/gofrs/flock"

Просмотреть файл

@ -46,7 +46,7 @@ required = ["github.com/wadey/gocovmerge"]
[[override]] [[override]]
name = "github.com/containerd/containerd" name = "github.com/containerd/containerd"
branch = "master" version = "v1.3.0"
[[override]] [[override]]
name = "github.com/docker/cli" name = "github.com/docker/cli"
@ -54,7 +54,7 @@ required = ["github.com/wadey/gocovmerge"]
[[override]] [[override]]
name = "github.com/deislabs/cnab-go" name = "github.com/deislabs/cnab-go"
version = "v0.3.2-beta1" version = "v0.7.1-beta1"
[[constraint]] [[constraint]]
name = "github.com/sirupsen/logrus" name = "github.com/sirupsen/logrus"
@ -82,7 +82,7 @@ required = ["github.com/wadey/gocovmerge"]
[[override]] [[override]]
name = "github.com/docker/distribution" name = "github.com/docker/distribution"
revision = "0d3efadf0154c2b8a4e7b6621fff9809655cc580" revision = "0d3efadf0154c2b8a4e7b6621fff9809655cc580" # version needed by containerd v1.3.0
[[override]] [[override]]
name = "github.com/docker/swarmkit" name = "github.com/docker/swarmkit"

Просмотреть файл

@ -81,12 +81,10 @@ func TestCnabParameters(t *testing.T) {
cmd.Command = dockerCli.Command("app", "run", "--cnab-bundle-json", path.Join(testDir, "bundle.json"), "--name", "cnab-parameters", cmd.Command = dockerCli.Command("app", "run", "--cnab-bundle-json", path.Join(testDir, "bundle.json"), "--name", "cnab-parameters",
"--set", "boolParam=true", "--set", "boolParam=true",
"--set", "stringParam=value", "--set", "stringParam=value",
"--set", "intParam=42", "--set", "intParam=42")
"--set", "floatParam=3.14")
result := icmd.RunCmd(cmd).Assert(t, icmd.Success) result := icmd.RunCmd(cmd).Assert(t, icmd.Success)
expectedOutput := `boolParam=true expectedOutput := `boolParam=true
stringParam=value stringParam=value
intParam=42 intParam=42`
floatParam=3.14`
assert.Assert(t, is.Contains(result.Combined(), expectedOutput)) assert.Assert(t, is.Contains(result.Combined(), expectedOutput))
} }

6
e2e/testdata/cnab-parameters/bundle.json поставляемый
Просмотреть файл

@ -40,12 +40,6 @@
"destination": { "destination": {
"env": "INT_PARAM" "env": "INT_PARAM"
} }
},
"floatParam": {
"definition": "floatParam_type",
"destination": {
"env": "FLOAT_PARAM"
}
} }
} }
} }

1
e2e/testdata/cnab-parameters/cnab/app/run поставляемый
Просмотреть файл

@ -9,7 +9,6 @@ case $action in
echo "boolParam=$BOOL_PARAM" echo "boolParam=$BOOL_PARAM"
echo "stringParam=$STRING_PARAM" echo "stringParam=$STRING_PARAM"
echo "intParam=$INT_PARAM" echo "intParam=$INT_PARAM"
echo "floatParam=$FLOAT_PARAM"
;; ;;
uninstall) uninstall)
echo "uninstall action" echo "uninstall action"

Просмотреть файл

@ -110,7 +110,9 @@ func prepareDriver(dockerCli command.Cli, bindMount BindMount, stdout io.Writer)
// Load any driver-specific config out of the environment. // Load any driver-specific config out of the environment.
driverCfg := map[string]string{} driverCfg := map[string]string{}
for env := range d.Config() { for env := range d.Config() {
driverCfg[env] = os.Getenv(env) if value, ok := os.LookupEnv(env); ok {
driverCfg[env] = value
}
} }
d.SetConfig(driverCfg) d.SetConfig(driverCfg)

Просмотреть файл

@ -6,6 +6,8 @@ import (
"io" "io"
"os" "os"
"github.com/deislabs/cnab-go/driver"
"github.com/deislabs/cnab-go/action" "github.com/deislabs/cnab-go/action"
"github.com/docker/app/internal" "github.com/docker/app/internal"
bdl "github.com/docker/app/internal/bundle" bdl "github.com/docker/app/internal/bundle"
@ -59,13 +61,18 @@ func runRender(dockerCli command.Cli, appname string, opts renderOptions) error
w = f w = f
} }
cfgFunc := func(op *driver.Operation) error {
op.Out = w
return nil
}
action, installation, errBuf, err := prepareCustomAction(internal.ActionRenderName, dockerCli, appname, w, opts) action, installation, errBuf, err := prepareCustomAction(internal.ActionRenderName, dockerCli, appname, w, opts)
if err != nil { if err != nil {
return err return err
} }
installation.Parameters[internal.ParameterRenderFormatName] = opts.formatDriver installation.Parameters[internal.ParameterRenderFormatName] = opts.formatDriver
if err := action.Run(&installation.Claim, nil, w); err != nil { if err := action.Run(&installation.Claim, nil, cfgFunc); err != nil {
return fmt.Errorf("render failed: %s\n%s", err, errBuf) return fmt.Errorf("render failed: %s\n%s", err, errBuf)
} }
return nil return nil

Просмотреть файл

@ -6,6 +6,7 @@ import (
"github.com/docker/app/internal/cnab" "github.com/docker/app/internal/cnab"
"github.com/deislabs/cnab-go/driver"
"github.com/docker/app/internal/cliopts" "github.com/docker/app/internal/cliopts"
"github.com/deislabs/cnab-go/action" "github.com/deislabs/cnab-go/action"
@ -79,7 +80,11 @@ func runRemove(dockerCli command.Cli, installationName string, opts removeOption
uninst := &action.Uninstall{ uninst := &action.Uninstall{
Driver: driverImpl, Driver: driverImpl,
} }
if err := uninst.Run(&installation.Claim, creds, os.Stdout); err != nil { cfgFunc := func(op *driver.Operation) error {
op.Out = dockerCli.Out()
return nil
}
if err := uninst.Run(&installation.Claim, creds, cfgFunc); err != nil {
if err2 := installationStore.Store(installation); err2 != nil { if err2 := installationStore.Store(installation); err2 != nil {
return fmt.Errorf("%s while %s", err2, errBuf) return fmt.Errorf("%s while %s", err2, errBuf)
} }

Просмотреть файл

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/deislabs/cnab-go/driver"
"github.com/docker/app/internal/cliopts" "github.com/docker/app/internal/cliopts"
"github.com/deislabs/cnab-go/action" "github.com/deislabs/cnab-go/action"
@ -152,7 +153,11 @@ func runBundle(dockerCli command.Cli, bndl *bundle.Bundle, opts runOptions, ref
} }
{ {
defer muteDockerCli(dockerCli)() defer muteDockerCli(dockerCli)()
err = inst.Run(&installation.Claim, creds, os.Stdout) cfgFunc := func(op *driver.Operation) error {
op.Out = dockerCli.Out()
return nil
}
err = inst.Run(&installation.Claim, creds, cfgFunc)
} }
// Even if the installation failed, the installation is persisted with its failure status, // Even if the installation failed, the installation is persisted with its failure status,
// so any installation needs a clean uninstallation. // so any installation needs a clean uninstallation.

Просмотреть файл

@ -4,6 +4,8 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/deislabs/cnab-go/driver"
"github.com/deislabs/cnab-go/action" "github.com/deislabs/cnab-go/action"
"github.com/deislabs/cnab-go/credentials" "github.com/deislabs/cnab-go/credentials"
"github.com/docker/app/internal/bundle" "github.com/docker/app/internal/bundle"
@ -85,7 +87,11 @@ func runUpdate(dockerCli command.Cli, installationName string, opts updateOption
u := &action.Upgrade{ u := &action.Upgrade{
Driver: driverImpl, Driver: driverImpl,
} }
err = u.Run(&installation.Claim, creds, os.Stdout) cfgFunc := func(op *driver.Operation) error {
op.Out = dockerCli.Out()
return nil
}
err = u.Run(&installation.Claim, creds, cfgFunc)
err2 := installationStore.Store(installation) err2 := installationStore.Store(installation)
if err != nil { if err != nil {
return fmt.Errorf("Update failed: %s\n%s", err, errBuf) return fmt.Errorf("Update failed: %s\n%s", err, errBuf)

201
vendor/github.com/containerd/cgroups/LICENSE сгенерированный поставляемый
Просмотреть файл

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

337
vendor/github.com/containerd/cgroups/blkio.go сгенерированный поставляемый
Просмотреть файл

@ -1,337 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func NewBlkio(root string) *blkioController {
return &blkioController{
root: filepath.Join(root, string(Blkio)),
}
}
type blkioController struct {
root string
}
func (b *blkioController) Name() Name {
return Blkio
}
func (b *blkioController) Path(path string) string {
return filepath.Join(b.root, path)
}
func (b *blkioController) Create(path string, resources *specs.LinuxResources) error {
if err := os.MkdirAll(b.Path(path), defaultDirPerm); err != nil {
return err
}
if resources.BlockIO == nil {
return nil
}
for _, t := range createBlkioSettings(resources.BlockIO) {
if t.value != nil {
if err := ioutil.WriteFile(
filepath.Join(b.Path(path), fmt.Sprintf("blkio.%s", t.name)),
t.format(t.value),
defaultFilePerm,
); err != nil {
return err
}
}
}
return nil
}
func (b *blkioController) Update(path string, resources *specs.LinuxResources) error {
return b.Create(path, resources)
}
func (b *blkioController) Stat(path string, stats *Metrics) error {
stats.Blkio = &BlkIOStat{}
settings := []blkioStatSettings{
{
name: "throttle.io_serviced",
entry: &stats.Blkio.IoServicedRecursive,
},
{
name: "throttle.io_service_bytes",
entry: &stats.Blkio.IoServiceBytesRecursive,
},
}
// Try to read CFQ stats available on all CFQ enabled kernels first
if _, err := os.Lstat(filepath.Join(b.Path(path), fmt.Sprintf("blkio.io_serviced_recursive"))); err == nil {
settings = append(settings,
blkioStatSettings{
name: "sectors_recursive",
entry: &stats.Blkio.SectorsRecursive,
},
blkioStatSettings{
name: "io_service_bytes_recursive",
entry: &stats.Blkio.IoServiceBytesRecursive,
},
blkioStatSettings{
name: "io_serviced_recursive",
entry: &stats.Blkio.IoServicedRecursive,
},
blkioStatSettings{
name: "io_queued_recursive",
entry: &stats.Blkio.IoQueuedRecursive,
},
blkioStatSettings{
name: "io_service_time_recursive",
entry: &stats.Blkio.IoServiceTimeRecursive,
},
blkioStatSettings{
name: "io_wait_time_recursive",
entry: &stats.Blkio.IoWaitTimeRecursive,
},
blkioStatSettings{
name: "io_merged_recursive",
entry: &stats.Blkio.IoMergedRecursive,
},
blkioStatSettings{
name: "time_recursive",
entry: &stats.Blkio.IoTimeRecursive,
},
)
}
f, err := os.Open("/proc/diskstats")
if err != nil {
return err
}
defer f.Close()
devices, err := getDevices(f)
if err != nil {
return err
}
for _, t := range settings {
if err := b.readEntry(devices, path, t.name, t.entry); err != nil {
return err
}
}
return nil
}
func (b *blkioController) readEntry(devices map[deviceKey]string, path, name string, entry *[]*BlkIOEntry) error {
f, err := os.Open(filepath.Join(b.Path(path), fmt.Sprintf("blkio.%s", name)))
if err != nil {
return err
}
defer f.Close()
sc := bufio.NewScanner(f)
for sc.Scan() {
if err := sc.Err(); err != nil {
return err
}
// format: dev type amount
fields := strings.FieldsFunc(sc.Text(), splitBlkIOStatLine)
if len(fields) < 3 {
if len(fields) == 2 && fields[0] == "Total" {
// skip total line
continue
} else {
return fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text())
}
}
major, err := strconv.ParseUint(fields[0], 10, 64)
if err != nil {
return err
}
minor, err := strconv.ParseUint(fields[1], 10, 64)
if err != nil {
return err
}
op := ""
valueField := 2
if len(fields) == 4 {
op = fields[2]
valueField = 3
}
v, err := strconv.ParseUint(fields[valueField], 10, 64)
if err != nil {
return err
}
*entry = append(*entry, &BlkIOEntry{
Device: devices[deviceKey{major, minor}],
Major: major,
Minor: minor,
Op: op,
Value: v,
})
}
return nil
}
func createBlkioSettings(blkio *specs.LinuxBlockIO) []blkioSettings {
settings := []blkioSettings{}
if blkio.Weight != nil {
settings = append(settings,
blkioSettings{
name: "weight",
value: blkio.Weight,
format: uintf,
})
}
if blkio.LeafWeight != nil {
settings = append(settings,
blkioSettings{
name: "leaf_weight",
value: blkio.LeafWeight,
format: uintf,
})
}
for _, wd := range blkio.WeightDevice {
if wd.Weight != nil {
settings = append(settings,
blkioSettings{
name: "weight_device",
value: wd,
format: weightdev,
})
}
if wd.LeafWeight != nil {
settings = append(settings,
blkioSettings{
name: "leaf_weight_device",
value: wd,
format: weightleafdev,
})
}
}
for _, t := range []struct {
name string
list []specs.LinuxThrottleDevice
}{
{
name: "throttle.read_bps_device",
list: blkio.ThrottleReadBpsDevice,
},
{
name: "throttle.read_iops_device",
list: blkio.ThrottleReadIOPSDevice,
},
{
name: "throttle.write_bps_device",
list: blkio.ThrottleWriteBpsDevice,
},
{
name: "throttle.write_iops_device",
list: blkio.ThrottleWriteIOPSDevice,
},
} {
for _, td := range t.list {
settings = append(settings, blkioSettings{
name: t.name,
value: td,
format: throttleddev,
})
}
}
return settings
}
type blkioSettings struct {
name string
value interface{}
format func(v interface{}) []byte
}
type blkioStatSettings struct {
name string
entry *[]*BlkIOEntry
}
func uintf(v interface{}) []byte {
return []byte(strconv.FormatUint(uint64(*v.(*uint16)), 10))
}
func weightdev(v interface{}) []byte {
wd := v.(specs.LinuxWeightDevice)
return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, *wd.Weight))
}
func weightleafdev(v interface{}) []byte {
wd := v.(specs.LinuxWeightDevice)
return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, *wd.LeafWeight))
}
func throttleddev(v interface{}) []byte {
td := v.(specs.LinuxThrottleDevice)
return []byte(fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate))
}
func splitBlkIOStatLine(r rune) bool {
return r == ' ' || r == ':'
}
type deviceKey struct {
major, minor uint64
}
// getDevices makes a best effort attempt to read all the devices into a map
// keyed by major and minor number. Since devices may be mapped multiple times,
// we err on taking the first occurrence.
func getDevices(r io.Reader) (map[deviceKey]string, error) {
var (
s = bufio.NewScanner(r)
devices = make(map[deviceKey]string)
)
for s.Scan() {
fields := strings.Fields(s.Text())
major, err := strconv.Atoi(fields[0])
if err != nil {
return nil, err
}
minor, err := strconv.Atoi(fields[1])
if err != nil {
return nil, err
}
key := deviceKey{
major: uint64(major),
minor: uint64(minor),
}
if _, ok := devices[key]; ok {
continue
}
devices[key] = filepath.Join("/dev", fields[2])
}
return devices, s.Err()
}
func major(devNumber uint64) uint64 {
return (devNumber >> 8) & 0xfff
}
func minor(devNumber uint64) uint64 {
return (devNumber & 0xff) | ((devNumber >> 12) & 0xfff00)
}

532
vendor/github.com/containerd/cgroups/cgroup.go сгенерированный поставляемый
Просмотреть файл

@ -1,532 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
)
// New returns a new control via the cgroup cgroups interface
func New(hierarchy Hierarchy, path Path, resources *specs.LinuxResources, opts ...InitOpts) (Cgroup, error) {
config := newInitConfig()
for _, o := range opts {
if err := o(config); err != nil {
return nil, err
}
}
subsystems, err := hierarchy()
if err != nil {
return nil, err
}
var active []Subsystem
for _, s := range subsystems {
// check if subsystem exists
if err := initializeSubsystem(s, path, resources); err != nil {
if err == ErrControllerNotActive {
if config.InitCheck != nil {
if skerr := config.InitCheck(s, path, err); skerr != nil {
if skerr != ErrIgnoreSubsystem {
return nil, skerr
}
}
}
continue
}
return nil, err
}
active = append(active, s)
}
return &cgroup{
path: path,
subsystems: active,
}, nil
}
// Load will load an existing cgroup and allow it to be controlled
func Load(hierarchy Hierarchy, path Path, opts ...InitOpts) (Cgroup, error) {
config := newInitConfig()
for _, o := range opts {
if err := o(config); err != nil {
return nil, err
}
}
var activeSubsystems []Subsystem
subsystems, err := hierarchy()
if err != nil {
return nil, err
}
// check that the subsystems still exist, and keep only those that actually exist
for _, s := range pathers(subsystems) {
p, err := path(s.Name())
if err != nil {
if os.IsNotExist(errors.Cause(err)) {
return nil, ErrCgroupDeleted
}
if err == ErrControllerNotActive {
if config.InitCheck != nil {
if skerr := config.InitCheck(s, path, err); skerr != nil {
if skerr != ErrIgnoreSubsystem {
return nil, skerr
}
}
}
continue
}
return nil, err
}
if _, err := os.Lstat(s.Path(p)); err != nil {
if os.IsNotExist(err) {
continue
}
return nil, err
}
activeSubsystems = append(activeSubsystems, s)
}
// if we do not have any active systems then the cgroup is deleted
if len(activeSubsystems) == 0 {
return nil, ErrCgroupDeleted
}
return &cgroup{
path: path,
subsystems: activeSubsystems,
}, nil
}
type cgroup struct {
path Path
subsystems []Subsystem
mu sync.Mutex
err error
}
// New returns a new sub cgroup
func (c *cgroup) New(name string, resources *specs.LinuxResources) (Cgroup, error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return nil, c.err
}
path := subPath(c.path, name)
for _, s := range c.subsystems {
if err := initializeSubsystem(s, path, resources); err != nil {
return nil, err
}
}
return &cgroup{
path: path,
subsystems: c.subsystems,
}, nil
}
// Subsystems returns all the subsystems that are currently being
// consumed by the group
func (c *cgroup) Subsystems() []Subsystem {
return c.subsystems
}
// Add moves the provided process into the new cgroup
func (c *cgroup) Add(process Process) error {
if process.Pid <= 0 {
return ErrInvalidPid
}
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return c.err
}
return c.add(process)
}
func (c *cgroup) add(process Process) error {
for _, s := range pathers(c.subsystems) {
p, err := c.path(s.Name())
if err != nil {
return err
}
if err := ioutil.WriteFile(
filepath.Join(s.Path(p), cgroupProcs),
[]byte(strconv.Itoa(process.Pid)),
defaultFilePerm,
); err != nil {
return err
}
}
return nil
}
// AddTask moves the provided tasks (threads) into the new cgroup
func (c *cgroup) AddTask(process Process) error {
if process.Pid <= 0 {
return ErrInvalidPid
}
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return c.err
}
return c.addTask(process)
}
func (c *cgroup) addTask(process Process) error {
for _, s := range pathers(c.subsystems) {
p, err := c.path(s.Name())
if err != nil {
return err
}
if err := ioutil.WriteFile(
filepath.Join(s.Path(p), cgroupTasks),
[]byte(strconv.Itoa(process.Pid)),
defaultFilePerm,
); err != nil {
return err
}
}
return nil
}
// Delete will remove the control group from each of the subsystems registered
func (c *cgroup) Delete() error {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return c.err
}
var errors []string
for _, s := range c.subsystems {
if d, ok := s.(deleter); ok {
sp, err := c.path(s.Name())
if err != nil {
return err
}
if err := d.Delete(sp); err != nil {
errors = append(errors, string(s.Name()))
}
continue
}
if p, ok := s.(pather); ok {
sp, err := c.path(s.Name())
if err != nil {
return err
}
path := p.Path(sp)
if err := remove(path); err != nil {
errors = append(errors, path)
}
}
}
if len(errors) > 0 {
return fmt.Errorf("cgroups: unable to remove paths %s", strings.Join(errors, ", "))
}
c.err = ErrCgroupDeleted
return nil
}
// Stat returns the current metrics for the cgroup
func (c *cgroup) Stat(handlers ...ErrorHandler) (*Metrics, error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return nil, c.err
}
if len(handlers) == 0 {
handlers = append(handlers, errPassthrough)
}
var (
stats = &Metrics{
CPU: &CPUStat{
Throttling: &Throttle{},
Usage: &CPUUsage{},
},
}
wg = &sync.WaitGroup{}
errs = make(chan error, len(c.subsystems))
)
for _, s := range c.subsystems {
if ss, ok := s.(stater); ok {
sp, err := c.path(s.Name())
if err != nil {
return nil, err
}
wg.Add(1)
go func() {
defer wg.Done()
if err := ss.Stat(sp, stats); err != nil {
for _, eh := range handlers {
if herr := eh(err); herr != nil {
errs <- herr
}
}
}
}()
}
}
wg.Wait()
close(errs)
for err := range errs {
return nil, err
}
return stats, nil
}
// Update updates the cgroup with the new resource values provided
//
// Be prepared to handle EBUSY when trying to update a cgroup with
// live processes and other operations like Stats being performed at the
// same time
func (c *cgroup) Update(resources *specs.LinuxResources) error {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return c.err
}
for _, s := range c.subsystems {
if u, ok := s.(updater); ok {
sp, err := c.path(s.Name())
if err != nil {
return err
}
if err := u.Update(sp, resources); err != nil {
return err
}
}
}
return nil
}
// Processes returns the processes running inside the cgroup along
// with the subsystem used, pid, and path
func (c *cgroup) Processes(subsystem Name, recursive bool) ([]Process, error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return nil, c.err
}
return c.processes(subsystem, recursive)
}
func (c *cgroup) processes(subsystem Name, recursive bool) ([]Process, error) {
s := c.getSubsystem(subsystem)
sp, err := c.path(subsystem)
if err != nil {
return nil, err
}
path := s.(pather).Path(sp)
var processes []Process
err = filepath.Walk(path, func(p string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !recursive && info.IsDir() {
if p == path {
return nil
}
return filepath.SkipDir
}
dir, name := filepath.Split(p)
if name != cgroupProcs {
return nil
}
procs, err := readPids(dir, subsystem)
if err != nil {
return err
}
processes = append(processes, procs...)
return nil
})
return processes, err
}
// Tasks returns the tasks running inside the cgroup along
// with the subsystem used, pid, and path
func (c *cgroup) Tasks(subsystem Name, recursive bool) ([]Task, error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return nil, c.err
}
return c.tasks(subsystem, recursive)
}
func (c *cgroup) tasks(subsystem Name, recursive bool) ([]Task, error) {
s := c.getSubsystem(subsystem)
sp, err := c.path(subsystem)
if err != nil {
return nil, err
}
path := s.(pather).Path(sp)
var tasks []Task
err = filepath.Walk(path, func(p string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !recursive && info.IsDir() {
if p == path {
return nil
}
return filepath.SkipDir
}
dir, name := filepath.Split(p)
if name != cgroupTasks {
return nil
}
procs, err := readTasksPids(dir, subsystem)
if err != nil {
return err
}
tasks = append(tasks, procs...)
return nil
})
return tasks, err
}
// Freeze freezes the entire cgroup and all the processes inside it
func (c *cgroup) Freeze() error {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return c.err
}
s := c.getSubsystem(Freezer)
if s == nil {
return ErrFreezerNotSupported
}
sp, err := c.path(Freezer)
if err != nil {
return err
}
return s.(*freezerController).Freeze(sp)
}
// Thaw thaws out the cgroup and all the processes inside it
func (c *cgroup) Thaw() error {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return c.err
}
s := c.getSubsystem(Freezer)
if s == nil {
return ErrFreezerNotSupported
}
sp, err := c.path(Freezer)
if err != nil {
return err
}
return s.(*freezerController).Thaw(sp)
}
// OOMEventFD returns the memory cgroup's out of memory event fd that triggers
// when processes inside the cgroup receive an oom event. Returns
// ErrMemoryNotSupported if memory cgroups is not supported.
func (c *cgroup) OOMEventFD() (uintptr, error) {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return 0, c.err
}
s := c.getSubsystem(Memory)
if s == nil {
return 0, ErrMemoryNotSupported
}
sp, err := c.path(Memory)
if err != nil {
return 0, err
}
return s.(*memoryController).OOMEventFD(sp)
}
// State returns the state of the cgroup and its processes
func (c *cgroup) State() State {
c.mu.Lock()
defer c.mu.Unlock()
c.checkExists()
if c.err != nil && c.err == ErrCgroupDeleted {
return Deleted
}
s := c.getSubsystem(Freezer)
if s == nil {
return Thawed
}
sp, err := c.path(Freezer)
if err != nil {
return Unknown
}
state, err := s.(*freezerController).state(sp)
if err != nil {
return Unknown
}
return state
}
// MoveTo does a recursive move subsystem by subsystem of all the processes
// inside the group
func (c *cgroup) MoveTo(destination Cgroup) error {
c.mu.Lock()
defer c.mu.Unlock()
if c.err != nil {
return c.err
}
for _, s := range c.subsystems {
processes, err := c.processes(s.Name(), true)
if err != nil {
return err
}
for _, p := range processes {
if err := destination.Add(p); err != nil {
if strings.Contains(err.Error(), "no such process") {
continue
}
return err
}
}
}
return nil
}
func (c *cgroup) getSubsystem(n Name) Subsystem {
for _, s := range c.subsystems {
if s.Name() == n {
return s
}
}
return nil
}
func (c *cgroup) checkExists() {
for _, s := range pathers(c.subsystems) {
p, err := c.path(s.Name())
if err != nil {
return
}
if _, err := os.Lstat(s.Path(p)); err != nil {
if os.IsNotExist(err) {
c.err = ErrCgroupDeleted
return
}
}
}
}

88
vendor/github.com/containerd/cgroups/control.go сгенерированный поставляемый
Просмотреть файл

@ -1,88 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"os"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
const (
cgroupProcs = "cgroup.procs"
cgroupTasks = "tasks"
defaultDirPerm = 0755
)
// defaultFilePerm is a var so that the test framework can change the filemode
// of all files created when the tests are running. The difference between the
// tests and real world use is that files like "cgroup.procs" will exist when writing
// to a read cgroup filesystem and do not exist prior when running in the tests.
// this is set to a non 0 value in the test code
var defaultFilePerm = os.FileMode(0)
type Process struct {
// Subsystem is the name of the subsystem that the process is in
Subsystem Name
// Pid is the process id of the process
Pid int
// Path is the full path of the subsystem and location that the process is in
Path string
}
type Task struct {
// Subsystem is the name of the subsystem that the task is in
Subsystem Name
// Pid is the process id of the task
Pid int
// Path is the full path of the subsystem and location that the task is in
Path string
}
// Cgroup handles interactions with the individual groups to perform
// actions on them as them main interface to this cgroup package
type Cgroup interface {
// New creates a new cgroup under the calling cgroup
New(string, *specs.LinuxResources) (Cgroup, error)
// Add adds a process to the cgroup (cgroup.procs)
Add(Process) error
// AddTask adds a process to the cgroup (tasks)
AddTask(Process) error
// Delete removes the cgroup as a whole
Delete() error
// MoveTo moves all the processes under the calling cgroup to the provided one
// subsystems are moved one at a time
MoveTo(Cgroup) error
// Stat returns the stats for all subsystems in the cgroup
Stat(...ErrorHandler) (*Metrics, error)
// Update updates all the subsystems with the provided resource changes
Update(resources *specs.LinuxResources) error
// Processes returns all the processes in a select subsystem for the cgroup
Processes(Name, bool) ([]Process, error)
// Tasks returns all the tasks in a select subsystem for the cgroup
Tasks(Name, bool) ([]Task, error)
// Freeze freezes or pauses all processes inside the cgroup
Freeze() error
// Thaw thaw or resumes all processes inside the cgroup
Thaw() error
// OOMEventFD returns the memory subsystem's event fd for OOM events
OOMEventFD() (uintptr, error)
// State returns the cgroups current state
State() State
// Subsystems returns all the subsystems in the cgroup
Subsystems() []Subsystem
}

129
vendor/github.com/containerd/cgroups/cpu.go сгенерированный поставляемый
Просмотреть файл

@ -1,129 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strconv"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func NewCpu(root string) *cpuController {
return &cpuController{
root: filepath.Join(root, string(Cpu)),
}
}
type cpuController struct {
root string
}
func (c *cpuController) Name() Name {
return Cpu
}
func (c *cpuController) Path(path string) string {
return filepath.Join(c.root, path)
}
func (c *cpuController) Create(path string, resources *specs.LinuxResources) error {
if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil {
return err
}
if cpu := resources.CPU; cpu != nil {
for _, t := range []struct {
name string
ivalue *int64
uvalue *uint64
}{
{
name: "rt_period_us",
uvalue: cpu.RealtimePeriod,
},
{
name: "rt_runtime_us",
ivalue: cpu.RealtimeRuntime,
},
{
name: "shares",
uvalue: cpu.Shares,
},
{
name: "cfs_period_us",
uvalue: cpu.Period,
},
{
name: "cfs_quota_us",
ivalue: cpu.Quota,
},
} {
var value []byte
if t.uvalue != nil {
value = []byte(strconv.FormatUint(*t.uvalue, 10))
} else if t.ivalue != nil {
value = []byte(strconv.FormatInt(*t.ivalue, 10))
}
if value != nil {
if err := ioutil.WriteFile(
filepath.Join(c.Path(path), fmt.Sprintf("cpu.%s", t.name)),
value,
defaultFilePerm,
); err != nil {
return err
}
}
}
}
return nil
}
func (c *cpuController) Update(path string, resources *specs.LinuxResources) error {
return c.Create(path, resources)
}
func (c *cpuController) Stat(path string, stats *Metrics) error {
f, err := os.Open(filepath.Join(c.Path(path), "cpu.stat"))
if err != nil {
return err
}
defer f.Close()
// get or create the cpu field because cpuacct can also set values on this struct
sc := bufio.NewScanner(f)
for sc.Scan() {
if err := sc.Err(); err != nil {
return err
}
key, v, err := parseKV(sc.Text())
if err != nil {
return err
}
switch key {
case "nr_periods":
stats.CPU.Throttling.Periods = v
case "nr_throttled":
stats.CPU.Throttling.ThrottledPeriods = v
case "throttled_time":
stats.CPU.Throttling.ThrottledTime = v
}
}
return nil
}

121
vendor/github.com/containerd/cgroups/cpuacct.go сгенерированный поставляемый
Просмотреть файл

@ -1,121 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"fmt"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
)
const nanosecondsInSecond = 1000000000
var clockTicks = getClockTicks()
func NewCpuacct(root string) *cpuacctController {
return &cpuacctController{
root: filepath.Join(root, string(Cpuacct)),
}
}
type cpuacctController struct {
root string
}
func (c *cpuacctController) Name() Name {
return Cpuacct
}
func (c *cpuacctController) Path(path string) string {
return filepath.Join(c.root, path)
}
func (c *cpuacctController) Stat(path string, stats *Metrics) error {
user, kernel, err := c.getUsage(path)
if err != nil {
return err
}
total, err := readUint(filepath.Join(c.Path(path), "cpuacct.usage"))
if err != nil {
return err
}
percpu, err := c.percpuUsage(path)
if err != nil {
return err
}
stats.CPU.Usage.Total = total
stats.CPU.Usage.User = user
stats.CPU.Usage.Kernel = kernel
stats.CPU.Usage.PerCPU = percpu
return nil
}
func (c *cpuacctController) percpuUsage(path string) ([]uint64, error) {
var usage []uint64
data, err := ioutil.ReadFile(filepath.Join(c.Path(path), "cpuacct.usage_percpu"))
if err != nil {
return nil, err
}
for _, v := range strings.Fields(string(data)) {
u, err := strconv.ParseUint(v, 10, 64)
if err != nil {
return nil, err
}
usage = append(usage, u)
}
return usage, nil
}
func (c *cpuacctController) getUsage(path string) (user uint64, kernel uint64, err error) {
statPath := filepath.Join(c.Path(path), "cpuacct.stat")
data, err := ioutil.ReadFile(statPath)
if err != nil {
return 0, 0, err
}
fields := strings.Fields(string(data))
if len(fields) != 4 {
return 0, 0, fmt.Errorf("%q is expected to have 4 fields", statPath)
}
for _, t := range []struct {
index int
name string
value *uint64
}{
{
index: 0,
name: "user",
value: &user,
},
{
index: 2,
name: "system",
value: &kernel,
},
} {
if fields[t.index] != t.name {
return 0, 0, fmt.Errorf("expected field %q but found %q in %q", t.name, fields[t.index], statPath)
}
v, err := strconv.ParseUint(fields[t.index+1], 10, 64)
if err != nil {
return 0, 0, err
}
*t.value = v
}
return (user * nanosecondsInSecond) / clockTicks, (kernel * nanosecondsInSecond) / clockTicks, nil
}

159
vendor/github.com/containerd/cgroups/cpuset.go сгенерированный поставляемый
Просмотреть файл

@ -1,159 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func NewCputset(root string) *cpusetController {
return &cpusetController{
root: filepath.Join(root, string(Cpuset)),
}
}
type cpusetController struct {
root string
}
func (c *cpusetController) Name() Name {
return Cpuset
}
func (c *cpusetController) Path(path string) string {
return filepath.Join(c.root, path)
}
func (c *cpusetController) Create(path string, resources *specs.LinuxResources) error {
if err := c.ensureParent(c.Path(path), c.root); err != nil {
return err
}
if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil {
return err
}
if err := c.copyIfNeeded(c.Path(path), filepath.Dir(c.Path(path))); err != nil {
return err
}
if resources.CPU != nil {
for _, t := range []struct {
name string
value string
}{
{
name: "cpus",
value: resources.CPU.Cpus,
},
{
name: "mems",
value: resources.CPU.Mems,
},
} {
if t.value != "" {
if err := ioutil.WriteFile(
filepath.Join(c.Path(path), fmt.Sprintf("cpuset.%s", t.name)),
[]byte(t.value),
defaultFilePerm,
); err != nil {
return err
}
}
}
}
return nil
}
func (c *cpusetController) Update(path string, resources *specs.LinuxResources) error {
return c.Create(path, resources)
}
func (c *cpusetController) getValues(path string) (cpus []byte, mems []byte, err error) {
if cpus, err = ioutil.ReadFile(filepath.Join(path, "cpuset.cpus")); err != nil && !os.IsNotExist(err) {
return
}
if mems, err = ioutil.ReadFile(filepath.Join(path, "cpuset.mems")); err != nil && !os.IsNotExist(err) {
return
}
return cpus, mems, nil
}
// ensureParent makes sure that the parent directory of current is created
// and populated with the proper cpus and mems files copied from
// it's parent.
func (c *cpusetController) ensureParent(current, root string) error {
parent := filepath.Dir(current)
if _, err := filepath.Rel(root, parent); err != nil {
return nil
}
// Avoid infinite recursion.
if parent == current {
return fmt.Errorf("cpuset: cgroup parent path outside cgroup root")
}
if cleanPath(parent) != root {
if err := c.ensureParent(parent, root); err != nil {
return err
}
}
if err := os.MkdirAll(current, defaultDirPerm); err != nil {
return err
}
return c.copyIfNeeded(current, parent)
}
// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent
// directory to the current directory if the file's contents are 0
func (c *cpusetController) copyIfNeeded(current, parent string) error {
var (
err error
currentCpus, currentMems []byte
parentCpus, parentMems []byte
)
if currentCpus, currentMems, err = c.getValues(current); err != nil {
return err
}
if parentCpus, parentMems, err = c.getValues(parent); err != nil {
return err
}
if isEmpty(currentCpus) {
if err := ioutil.WriteFile(
filepath.Join(current, "cpuset.cpus"),
parentCpus,
defaultFilePerm,
); err != nil {
return err
}
}
if isEmpty(currentMems) {
if err := ioutil.WriteFile(
filepath.Join(current, "cpuset.mems"),
parentMems,
defaultFilePerm,
); err != nil {
return err
}
}
return nil
}
func isEmpty(b []byte) bool {
return len(bytes.Trim(b, "\n")) == 0
}

93
vendor/github.com/containerd/cgroups/devices.go сгенерированный поставляемый
Просмотреть файл

@ -1,93 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
const (
allowDeviceFile = "devices.allow"
denyDeviceFile = "devices.deny"
wildcard = -1
)
func NewDevices(root string) *devicesController {
return &devicesController{
root: filepath.Join(root, string(Devices)),
}
}
type devicesController struct {
root string
}
func (d *devicesController) Name() Name {
return Devices
}
func (d *devicesController) Path(path string) string {
return filepath.Join(d.root, path)
}
func (d *devicesController) Create(path string, resources *specs.LinuxResources) error {
if err := os.MkdirAll(d.Path(path), defaultDirPerm); err != nil {
return err
}
for _, device := range resources.Devices {
file := denyDeviceFile
if device.Allow {
file = allowDeviceFile
}
if device.Type == "" {
device.Type = "a"
}
if err := ioutil.WriteFile(
filepath.Join(d.Path(path), file),
[]byte(deviceString(device)),
defaultFilePerm,
); err != nil {
return err
}
}
return nil
}
func (d *devicesController) Update(path string, resources *specs.LinuxResources) error {
return d.Create(path, resources)
}
func deviceString(device specs.LinuxDeviceCgroup) string {
return fmt.Sprintf("%s %s:%s %s",
device.Type,
deviceNumber(device.Major),
deviceNumber(device.Minor),
device.Access,
)
}
func deviceNumber(number *int64) string {
if number == nil || *number == wildcard {
return "*"
}
return fmt.Sprint(*number)
}

47
vendor/github.com/containerd/cgroups/errors.go сгенерированный поставляемый
Просмотреть файл

@ -1,47 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"errors"
"os"
)
var (
ErrInvalidPid = errors.New("cgroups: pid must be greater than 0")
ErrMountPointNotExist = errors.New("cgroups: cgroup mountpoint does not exist")
ErrInvalidFormat = errors.New("cgroups: parsing file with invalid format failed")
ErrFreezerNotSupported = errors.New("cgroups: freezer cgroup not supported on this system")
ErrMemoryNotSupported = errors.New("cgroups: memory cgroup not supported on this system")
ErrCgroupDeleted = errors.New("cgroups: cgroup deleted")
ErrNoCgroupMountDestination = errors.New("cgroups: cannot find cgroup mount destination")
)
// ErrorHandler is a function that handles and acts on errors
type ErrorHandler func(err error) error
// IgnoreNotExist ignores any errors that are for not existing files
func IgnoreNotExist(err error) error {
if os.IsNotExist(err) {
return nil
}
return err
}
func errPassthrough(err error) error {
return err
}

82
vendor/github.com/containerd/cgroups/freezer.go сгенерированный поставляемый
Просмотреть файл

@ -1,82 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"io/ioutil"
"path/filepath"
"strings"
"time"
)
func NewFreezer(root string) *freezerController {
return &freezerController{
root: filepath.Join(root, string(Freezer)),
}
}
type freezerController struct {
root string
}
func (f *freezerController) Name() Name {
return Freezer
}
func (f *freezerController) Path(path string) string {
return filepath.Join(f.root, path)
}
func (f *freezerController) Freeze(path string) error {
return f.waitState(path, Frozen)
}
func (f *freezerController) Thaw(path string) error {
return f.waitState(path, Thawed)
}
func (f *freezerController) changeState(path string, state State) error {
return ioutil.WriteFile(
filepath.Join(f.root, path, "freezer.state"),
[]byte(strings.ToUpper(string(state))),
defaultFilePerm,
)
}
func (f *freezerController) state(path string) (State, error) {
current, err := ioutil.ReadFile(filepath.Join(f.root, path, "freezer.state"))
if err != nil {
return "", err
}
return State(strings.ToLower(strings.TrimSpace(string(current)))), nil
}
func (f *freezerController) waitState(path string, state State) error {
for {
if err := f.changeState(path, state); err != nil {
return err
}
current, err := f.state(path)
if err != nil {
return err
}
if current == state {
return nil
}
time.Sleep(1 * time.Millisecond)
}
}

20
vendor/github.com/containerd/cgroups/hierarchy.go сгенерированный поставляемый
Просмотреть файл

@ -1,20 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
// Hierarchy enableds both unified and split hierarchy for cgroups
type Hierarchy func() ([]Subsystem, error)

109
vendor/github.com/containerd/cgroups/hugetlb.go сгенерированный поставляемый
Просмотреть файл

@ -1,109 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func NewHugetlb(root string) (*hugetlbController, error) {
sizes, err := hugePageSizes()
if err != nil {
return nil, err
}
return &hugetlbController{
root: filepath.Join(root, string(Hugetlb)),
sizes: sizes,
}, nil
}
type hugetlbController struct {
root string
sizes []string
}
func (h *hugetlbController) Name() Name {
return Hugetlb
}
func (h *hugetlbController) Path(path string) string {
return filepath.Join(h.root, path)
}
func (h *hugetlbController) Create(path string, resources *specs.LinuxResources) error {
if err := os.MkdirAll(h.Path(path), defaultDirPerm); err != nil {
return err
}
for _, limit := range resources.HugepageLimits {
if err := ioutil.WriteFile(
filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", limit.Pagesize, "limit_in_bytes"}, ".")),
[]byte(strconv.FormatUint(limit.Limit, 10)),
defaultFilePerm,
); err != nil {
return err
}
}
return nil
}
func (h *hugetlbController) Stat(path string, stats *Metrics) error {
for _, size := range h.sizes {
s, err := h.readSizeStat(path, size)
if err != nil {
return err
}
stats.Hugetlb = append(stats.Hugetlb, s)
}
return nil
}
func (h *hugetlbController) readSizeStat(path, size string) (*HugetlbStat, error) {
s := HugetlbStat{
Pagesize: size,
}
for _, t := range []struct {
name string
value *uint64
}{
{
name: "usage_in_bytes",
value: &s.Usage,
},
{
name: "max_usage_in_bytes",
value: &s.Max,
},
{
name: "failcnt",
value: &s.Failcnt,
},
} {
v, err := readUint(filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", size, t.name}, ".")))
if err != nil {
return nil, err
}
*t.value = v
}
return &s, nil
}

329
vendor/github.com/containerd/cgroups/memory.go сгенерированный поставляемый
Просмотреть файл

@ -1,329 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"syscall"
"golang.org/x/sys/unix"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func NewMemory(root string) *memoryController {
return &memoryController{
root: filepath.Join(root, string(Memory)),
}
}
type memoryController struct {
root string
}
func (m *memoryController) Name() Name {
return Memory
}
func (m *memoryController) Path(path string) string {
return filepath.Join(m.root, path)
}
func (m *memoryController) Create(path string, resources *specs.LinuxResources) error {
if err := os.MkdirAll(m.Path(path), defaultDirPerm); err != nil {
return err
}
if resources.Memory == nil {
return nil
}
if resources.Memory.Kernel != nil {
// Check if kernel memory is enabled
// We have to limit the kernel memory here as it won't be accounted at all
// until a limit is set on the cgroup and limit cannot be set once the
// cgroup has children, or if there are already tasks in the cgroup.
for _, i := range []int64{1, -1} {
if err := ioutil.WriteFile(
filepath.Join(m.Path(path), "memory.kmem.limit_in_bytes"),
[]byte(strconv.FormatInt(i, 10)),
defaultFilePerm,
); err != nil {
return checkEBUSY(err)
}
}
}
return m.set(path, getMemorySettings(resources))
}
func (m *memoryController) Update(path string, resources *specs.LinuxResources) error {
if resources.Memory == nil {
return nil
}
g := func(v *int64) bool {
return v != nil && *v > 0
}
settings := getMemorySettings(resources)
if g(resources.Memory.Limit) && g(resources.Memory.Swap) {
// if the updated swap value is larger than the current memory limit set the swap changes first
// then set the memory limit as swap must always be larger than the current limit
current, err := readUint(filepath.Join(m.Path(path), "memory.limit_in_bytes"))
if err != nil {
return err
}
if current < uint64(*resources.Memory.Swap) {
settings[0], settings[1] = settings[1], settings[0]
}
}
return m.set(path, settings)
}
func (m *memoryController) Stat(path string, stats *Metrics) error {
f, err := os.Open(filepath.Join(m.Path(path), "memory.stat"))
if err != nil {
return err
}
defer f.Close()
stats.Memory = &MemoryStat{
Usage: &MemoryEntry{},
Swap: &MemoryEntry{},
Kernel: &MemoryEntry{},
KernelTCP: &MemoryEntry{},
}
if err := m.parseStats(f, stats.Memory); err != nil {
return err
}
for _, t := range []struct {
module string
entry *MemoryEntry
}{
{
module: "",
entry: stats.Memory.Usage,
},
{
module: "memsw",
entry: stats.Memory.Swap,
},
{
module: "kmem",
entry: stats.Memory.Kernel,
},
{
module: "kmem.tcp",
entry: stats.Memory.KernelTCP,
},
} {
for _, tt := range []struct {
name string
value *uint64
}{
{
name: "usage_in_bytes",
value: &t.entry.Usage,
},
{
name: "max_usage_in_bytes",
value: &t.entry.Max,
},
{
name: "failcnt",
value: &t.entry.Failcnt,
},
{
name: "limit_in_bytes",
value: &t.entry.Limit,
},
} {
parts := []string{"memory"}
if t.module != "" {
parts = append(parts, t.module)
}
parts = append(parts, tt.name)
v, err := readUint(filepath.Join(m.Path(path), strings.Join(parts, ".")))
if err != nil {
return err
}
*tt.value = v
}
}
return nil
}
func (m *memoryController) OOMEventFD(path string) (uintptr, error) {
root := m.Path(path)
f, err := os.Open(filepath.Join(root, "memory.oom_control"))
if err != nil {
return 0, err
}
defer f.Close()
fd, _, serr := unix.RawSyscall(unix.SYS_EVENTFD2, 0, unix.EFD_CLOEXEC, 0)
if serr != 0 {
return 0, serr
}
if err := writeEventFD(root, f.Fd(), fd); err != nil {
unix.Close(int(fd))
return 0, err
}
return fd, nil
}
func writeEventFD(root string, cfd, efd uintptr) error {
f, err := os.OpenFile(filepath.Join(root, "cgroup.event_control"), os.O_WRONLY, 0)
if err != nil {
return err
}
_, err = f.WriteString(fmt.Sprintf("%d %d", efd, cfd))
f.Close()
return err
}
func (m *memoryController) parseStats(r io.Reader, stat *MemoryStat) error {
var (
raw = make(map[string]uint64)
sc = bufio.NewScanner(r)
line int
)
for sc.Scan() {
if err := sc.Err(); err != nil {
return err
}
key, v, err := parseKV(sc.Text())
if err != nil {
return fmt.Errorf("%d: %v", line, err)
}
raw[key] = v
line++
}
stat.Cache = raw["cache"]
stat.RSS = raw["rss"]
stat.RSSHuge = raw["rss_huge"]
stat.MappedFile = raw["mapped_file"]
stat.Dirty = raw["dirty"]
stat.Writeback = raw["writeback"]
stat.PgPgIn = raw["pgpgin"]
stat.PgPgOut = raw["pgpgout"]
stat.PgFault = raw["pgfault"]
stat.PgMajFault = raw["pgmajfault"]
stat.InactiveAnon = raw["inactive_anon"]
stat.ActiveAnon = raw["active_anon"]
stat.InactiveFile = raw["inactive_file"]
stat.ActiveFile = raw["active_file"]
stat.Unevictable = raw["unevictable"]
stat.HierarchicalMemoryLimit = raw["hierarchical_memory_limit"]
stat.HierarchicalSwapLimit = raw["hierarchical_memsw_limit"]
stat.TotalCache = raw["total_cache"]
stat.TotalRSS = raw["total_rss"]
stat.TotalRSSHuge = raw["total_rss_huge"]
stat.TotalMappedFile = raw["total_mapped_file"]
stat.TotalDirty = raw["total_dirty"]
stat.TotalWriteback = raw["total_writeback"]
stat.TotalPgPgIn = raw["total_pgpgin"]
stat.TotalPgPgOut = raw["total_pgpgout"]
stat.TotalPgFault = raw["total_pgfault"]
stat.TotalPgMajFault = raw["total_pgmajfault"]
stat.TotalInactiveAnon = raw["total_inactive_anon"]
stat.TotalActiveAnon = raw["total_active_anon"]
stat.TotalInactiveFile = raw["total_inactive_file"]
stat.TotalActiveFile = raw["total_active_file"]
stat.TotalUnevictable = raw["total_unevictable"]
return nil
}
func (m *memoryController) set(path string, settings []memorySettings) error {
for _, t := range settings {
if t.value != nil {
if err := ioutil.WriteFile(
filepath.Join(m.Path(path), fmt.Sprintf("memory.%s", t.name)),
[]byte(strconv.FormatInt(*t.value, 10)),
defaultFilePerm,
); err != nil {
return err
}
}
}
return nil
}
type memorySettings struct {
name string
value *int64
}
func getMemorySettings(resources *specs.LinuxResources) []memorySettings {
mem := resources.Memory
var swappiness *int64
if mem.Swappiness != nil {
v := int64(*mem.Swappiness)
swappiness = &v
}
return []memorySettings{
{
name: "limit_in_bytes",
value: mem.Limit,
},
{
name: "soft_limit_in_bytes",
value: mem.Reservation,
},
{
name: "memsw.limit_in_bytes",
value: mem.Swap,
},
{
name: "kmem.limit_in_bytes",
value: mem.Kernel,
},
{
name: "kmem.tcp.limit_in_bytes",
value: mem.KernelTCP,
},
{
name: "oom_control",
value: getOomControlValue(mem),
},
{
name: "swappiness",
value: swappiness,
},
}
}
func checkEBUSY(err error) error {
if pathErr, ok := err.(*os.PathError); ok {
if errNo, ok := pathErr.Err.(syscall.Errno); ok {
if errNo == unix.EBUSY {
return fmt.Errorf(
"failed to set memory.kmem.limit_in_bytes, because either tasks have already joined this cgroup or it has children")
}
}
}
return err
}
func getOomControlValue(mem *specs.LinuxMemory) *int64 {
if mem.DisableOOMKiller != nil && *mem.DisableOOMKiller {
i := int64(1)
return &i
}
return nil
}

4696
vendor/github.com/containerd/cgroups/metrics.pb.go сгенерированный поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

39
vendor/github.com/containerd/cgroups/named.go сгенерированный поставляемый
Просмотреть файл

@ -1,39 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import "path/filepath"
func NewNamed(root string, name Name) *namedController {
return &namedController{
root: root,
name: name,
}
}
type namedController struct {
root string
name Name
}
func (n *namedController) Name() Name {
return n.name
}
func (n *namedController) Path(path string) string {
return filepath.Join(n.root, string(n.name), path)
}

58
vendor/github.com/containerd/cgroups/net_cls.go сгенерированный поставляемый
Просмотреть файл

@ -1,58 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"io/ioutil"
"os"
"path/filepath"
"strconv"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func NewNetCls(root string) *netclsController {
return &netclsController{
root: filepath.Join(root, string(NetCLS)),
}
}
type netclsController struct {
root string
}
func (n *netclsController) Name() Name {
return NetCLS
}
func (n *netclsController) Path(path string) string {
return filepath.Join(n.root, path)
}
func (n *netclsController) Create(path string, resources *specs.LinuxResources) error {
if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil {
return err
}
if resources.Network != nil && resources.Network.ClassID != nil && *resources.Network.ClassID > 0 {
return ioutil.WriteFile(
filepath.Join(n.Path(path), "net_cls.classid"),
[]byte(strconv.FormatUint(uint64(*resources.Network.ClassID), 10)),
defaultFilePerm,
)
}
return nil
}

66
vendor/github.com/containerd/cgroups/net_prio.go сгенерированный поставляемый
Просмотреть файл

@ -1,66 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func NewNetPrio(root string) *netprioController {
return &netprioController{
root: filepath.Join(root, string(NetPrio)),
}
}
type netprioController struct {
root string
}
func (n *netprioController) Name() Name {
return NetPrio
}
func (n *netprioController) Path(path string) string {
return filepath.Join(n.root, path)
}
func (n *netprioController) Create(path string, resources *specs.LinuxResources) error {
if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil {
return err
}
if resources.Network != nil {
for _, prio := range resources.Network.Priorities {
if err := ioutil.WriteFile(
filepath.Join(n.Path(path), "net_prio.ifpriomap"),
formatPrio(prio.Name, prio.Priority),
defaultFilePerm,
); err != nil {
return err
}
}
}
return nil
}
func formatPrio(name string, prio uint32) []byte {
return []byte(fmt.Sprintf("%s %d", name, prio))
}

61
vendor/github.com/containerd/cgroups/opts.go сгенерированный поставляемый
Просмотреть файл

@ -1,61 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"github.com/pkg/errors"
)
var (
// ErrIgnoreSubsystem allows the specific subsystem to be skipped
ErrIgnoreSubsystem = errors.New("skip subsystem")
// ErrDevicesRequired is returned when the devices subsystem is required but
// does not exist or is not active
ErrDevicesRequired = errors.New("devices subsystem is required")
)
// InitOpts allows configuration for the creation or loading of a cgroup
type InitOpts func(*InitConfig) error
// InitConfig provides configuration options for the creation
// or loading of a cgroup and its subsystems
type InitConfig struct {
// InitCheck can be used to check initialization errors from the subsystem
InitCheck InitCheck
}
func newInitConfig() *InitConfig {
return &InitConfig{
InitCheck: RequireDevices,
}
}
// InitCheck allows subsystems errors to be checked when initialized or loaded
type InitCheck func(Subsystem, Path, error) error
// AllowAny allows any subsystem errors to be skipped
func AllowAny(s Subsystem, p Path, err error) error {
return ErrIgnoreSubsystem
}
// RequireDevices requires the device subsystem but no others
func RequireDevices(s Subsystem, p Path, err error) error {
if s.Name() == Devices {
return ErrDevicesRequired
}
return ErrIgnoreSubsystem
}

107
vendor/github.com/containerd/cgroups/paths.go сгенерированный поставляемый
Просмотреть файл

@ -1,107 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"fmt"
"path/filepath"
"github.com/pkg/errors"
)
type Path func(subsystem Name) (string, error)
func RootPath(subsysem Name) (string, error) {
return "/", nil
}
// StaticPath returns a static path to use for all cgroups
func StaticPath(path string) Path {
return func(_ Name) (string, error) {
return path, nil
}
}
// NestedPath will nest the cgroups based on the calling processes cgroup
// placing its child processes inside its own path
func NestedPath(suffix string) Path {
paths, err := parseCgroupFile("/proc/self/cgroup")
if err != nil {
return errorPath(err)
}
return existingPath(paths, suffix)
}
// PidPath will return the correct cgroup paths for an existing process running inside a cgroup
// This is commonly used for the Load function to restore an existing container
func PidPath(pid int) Path {
p := fmt.Sprintf("/proc/%d/cgroup", pid)
paths, err := parseCgroupFile(p)
if err != nil {
return errorPath(errors.Wrapf(err, "parse cgroup file %s", p))
}
return existingPath(paths, "")
}
// ErrControllerNotActive is returned when a controller is not supported or enabled
var ErrControllerNotActive = errors.New("controller is not supported")
func existingPath(paths map[string]string, suffix string) Path {
// localize the paths based on the root mount dest for nested cgroups
for n, p := range paths {
dest, err := getCgroupDestination(string(n))
if err != nil {
return errorPath(err)
}
rel, err := filepath.Rel(dest, p)
if err != nil {
return errorPath(err)
}
if rel == "." {
rel = dest
}
paths[n] = filepath.Join("/", rel)
}
return func(name Name) (string, error) {
root, ok := paths[string(name)]
if !ok {
if root, ok = paths[fmt.Sprintf("name=%s", name)]; !ok {
return "", ErrControllerNotActive
}
}
if suffix != "" {
return filepath.Join(root, suffix), nil
}
return root, nil
}
}
func subPath(path Path, subName string) Path {
return func(name Name) (string, error) {
p, err := path(name)
if err != nil {
return "", err
}
return filepath.Join(p, subName), nil
}
}
func errorPath(err error) Path {
return func(_ Name) (string, error) {
return "", err
}
}

37
vendor/github.com/containerd/cgroups/perf_event.go сгенерированный поставляемый
Просмотреть файл

@ -1,37 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import "path/filepath"
func NewPerfEvent(root string) *PerfEventController {
return &PerfEventController{
root: filepath.Join(root, string(PerfEvent)),
}
}
type PerfEventController struct {
root string
}
func (p *PerfEventController) Name() Name {
return PerfEvent
}
func (p *PerfEventController) Path(path string) string {
return filepath.Join(p.root, path)
}

85
vendor/github.com/containerd/cgroups/pids.go сгенерированный поставляемый
Просмотреть файл

@ -1,85 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
func NewPids(root string) *pidsController {
return &pidsController{
root: filepath.Join(root, string(Pids)),
}
}
type pidsController struct {
root string
}
func (p *pidsController) Name() Name {
return Pids
}
func (p *pidsController) Path(path string) string {
return filepath.Join(p.root, path)
}
func (p *pidsController) Create(path string, resources *specs.LinuxResources) error {
if err := os.MkdirAll(p.Path(path), defaultDirPerm); err != nil {
return err
}
if resources.Pids != nil && resources.Pids.Limit > 0 {
return ioutil.WriteFile(
filepath.Join(p.Path(path), "pids.max"),
[]byte(strconv.FormatInt(resources.Pids.Limit, 10)),
defaultFilePerm,
)
}
return nil
}
func (p *pidsController) Update(path string, resources *specs.LinuxResources) error {
return p.Create(path, resources)
}
func (p *pidsController) Stat(path string, stats *Metrics) error {
current, err := readUint(filepath.Join(p.Path(path), "pids.current"))
if err != nil {
return err
}
var max uint64
maxData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "pids.max"))
if err != nil {
return err
}
if maxS := strings.TrimSpace(string(maxData)); maxS != "max" {
if max, err = parseUint(maxS, 10, 64); err != nil {
return err
}
}
stats.Pids = &PidsStat{
Current: current,
Limit: max,
}
return nil
}

153
vendor/github.com/containerd/cgroups/rdma.go сгенерированный поставляемый
Просмотреть файл

@ -1,153 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"io/ioutil"
"math"
"os"
"path/filepath"
"strconv"
"strings"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
type rdmaController struct {
root string
}
func (p *rdmaController) Name() Name {
return Rdma
}
func (p *rdmaController) Path(path string) string {
return filepath.Join(p.root, path)
}
func NewRdma(root string) *rdmaController {
return &rdmaController{
root: filepath.Join(root, string(Rdma)),
}
}
func createCmdString(device string, limits *specs.LinuxRdma) string {
var cmdString string
cmdString = device
if limits.HcaHandles != nil {
cmdString = cmdString + " " + "hca_handle=" + strconv.FormatUint(uint64(*limits.HcaHandles), 10)
}
if limits.HcaObjects != nil {
cmdString = cmdString + " " + "hca_object=" + strconv.FormatUint(uint64(*limits.HcaObjects), 10)
}
return cmdString
}
func (p *rdmaController) Create(path string, resources *specs.LinuxResources) error {
if err := os.MkdirAll(p.Path(path), defaultDirPerm); err != nil {
return err
}
for device, limit := range resources.Rdma {
if device != "" && (limit.HcaHandles != nil || limit.HcaObjects != nil) {
return ioutil.WriteFile(
filepath.Join(p.Path(path), "rdma.max"),
[]byte(createCmdString(device, &limit)),
defaultFilePerm,
)
}
}
return nil
}
func (p *rdmaController) Update(path string, resources *specs.LinuxResources) error {
return p.Create(path, resources)
}
func parseRdmaKV(raw string, entry *RdmaEntry) {
var value uint64
var err error
parts := strings.Split(raw, "=")
switch len(parts) {
case 2:
if parts[1] == "max" {
value = math.MaxUint32
} else {
value, err = parseUint(parts[1], 10, 32)
if err != nil {
return
}
}
if parts[0] == "hca_handle" {
entry.HcaHandles = uint32(value)
} else if parts[0] == "hca_object" {
entry.HcaObjects = uint32(value)
}
}
}
func toRdmaEntry(strEntries []string) []*RdmaEntry {
var rdmaEntries []*RdmaEntry
for i := range strEntries {
parts := strings.Fields(strEntries[i])
switch len(parts) {
case 3:
entry := new(RdmaEntry)
entry.Device = parts[0]
parseRdmaKV(parts[1], entry)
parseRdmaKV(parts[2], entry)
rdmaEntries = append(rdmaEntries, entry)
default:
continue
}
}
return rdmaEntries
}
func (p *rdmaController) Stat(path string, stats *Metrics) error {
currentData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "rdma.current"))
if err != nil {
return err
}
currentPerDevices := strings.Split(string(currentData), "\n")
maxData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "rdma.max"))
if err != nil {
return err
}
maxPerDevices := strings.Split(string(maxData), "\n")
// If device got removed between reading two files, ignore returning
// stats.
if len(currentPerDevices) != len(maxPerDevices) {
return nil
}
currentEntries := toRdmaEntry(currentPerDevices)
maxEntries := toRdmaEntry(maxPerDevices)
stats.Rdma = &RdmaStat{
Current: currentEntries,
Limit: maxEntries,
}
return nil
}

28
vendor/github.com/containerd/cgroups/state.go сгенерированный поставляемый
Просмотреть файл

@ -1,28 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
// State is a type that represents the state of the current cgroup
type State string
const (
Unknown State = ""
Thawed State = "thawed"
Frozen State = "frozen"
Freezing State = "freezing"
Deleted State = "deleted"
)

112
vendor/github.com/containerd/cgroups/subsystem.go сгенерированный поставляемый
Просмотреть файл

@ -1,112 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"fmt"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
// Name is a typed name for a cgroup subsystem
type Name string
const (
Devices Name = "devices"
Hugetlb Name = "hugetlb"
Freezer Name = "freezer"
Pids Name = "pids"
NetCLS Name = "net_cls"
NetPrio Name = "net_prio"
PerfEvent Name = "perf_event"
Cpuset Name = "cpuset"
Cpu Name = "cpu"
Cpuacct Name = "cpuacct"
Memory Name = "memory"
Blkio Name = "blkio"
Rdma Name = "rdma"
)
// Subsystems returns a complete list of the default cgroups
// available on most linux systems
func Subsystems() []Name {
n := []Name{
Hugetlb,
Freezer,
Pids,
NetCLS,
NetPrio,
PerfEvent,
Cpuset,
Cpu,
Cpuacct,
Memory,
Blkio,
Rdma,
}
if !isUserNS {
n = append(n, Devices)
}
return n
}
type Subsystem interface {
Name() Name
}
type pather interface {
Subsystem
Path(path string) string
}
type creator interface {
Subsystem
Create(path string, resources *specs.LinuxResources) error
}
type deleter interface {
Subsystem
Delete(path string) error
}
type stater interface {
Subsystem
Stat(path string, stats *Metrics) error
}
type updater interface {
Subsystem
Update(path string, resources *specs.LinuxResources) error
}
// SingleSubsystem returns a single cgroup subsystem within the base Hierarchy
func SingleSubsystem(baseHierarchy Hierarchy, subsystem Name) Hierarchy {
return func() ([]Subsystem, error) {
subsystems, err := baseHierarchy()
if err != nil {
return nil, err
}
for _, s := range subsystems {
if s.Name() == subsystem {
return []Subsystem{
s,
}, nil
}
}
return nil, fmt.Errorf("unable to find subsystem %s", subsystem)
}
}

160
vendor/github.com/containerd/cgroups/systemd.go сгенерированный поставляемый
Просмотреть файл

@ -1,160 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"fmt"
"path/filepath"
"strings"
"sync"
systemdDbus "github.com/coreos/go-systemd/dbus"
"github.com/godbus/dbus"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
const (
SystemdDbus Name = "systemd"
defaultSlice = "system.slice"
)
var (
canDelegate bool
once sync.Once
)
func Systemd() ([]Subsystem, error) {
root, err := v1MountPoint()
if err != nil {
return nil, err
}
defaultSubsystems, err := defaults(root)
if err != nil {
return nil, err
}
s, err := NewSystemd(root)
if err != nil {
return nil, err
}
// make sure the systemd controller is added first
return append([]Subsystem{s}, defaultSubsystems...), nil
}
func Slice(slice, name string) Path {
if slice == "" {
slice = defaultSlice
}
return func(subsystem Name) (string, error) {
return filepath.Join(slice, name), nil
}
}
func NewSystemd(root string) (*SystemdController, error) {
return &SystemdController{
root: root,
}, nil
}
type SystemdController struct {
mu sync.Mutex
root string
}
func (s *SystemdController) Name() Name {
return SystemdDbus
}
func (s *SystemdController) Create(path string, resources *specs.LinuxResources) error {
conn, err := systemdDbus.New()
if err != nil {
return err
}
defer conn.Close()
slice, name := splitName(path)
// We need to see if systemd can handle the delegate property
// Systemd will return an error if it cannot handle delegate regardless
// of its bool setting.
checkDelegate := func() {
canDelegate = true
dlSlice := newProperty("Delegate", true)
if _, err := conn.StartTransientUnit(slice, "testdelegate", []systemdDbus.Property{dlSlice}, nil); err != nil {
if dbusError, ok := err.(dbus.Error); ok {
// Starting with systemd v237, Delegate is not even a property of slices anymore,
// so the D-Bus call fails with "InvalidArgs" error.
if strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.PropertyReadOnly") || strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.InvalidArgs") {
canDelegate = false
}
}
}
conn.StopUnit(slice, "testDelegate", nil)
}
once.Do(checkDelegate)
properties := []systemdDbus.Property{
systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)),
systemdDbus.PropWants(slice),
newProperty("DefaultDependencies", false),
newProperty("MemoryAccounting", true),
newProperty("CPUAccounting", true),
newProperty("BlockIOAccounting", true),
}
// If we can delegate, we add the property back in
if canDelegate {
properties = append(properties, newProperty("Delegate", true))
}
ch := make(chan string)
_, err = conn.StartTransientUnit(name, "replace", properties, ch)
if err != nil {
return err
}
<-ch
return nil
}
func (s *SystemdController) Delete(path string) error {
conn, err := systemdDbus.New()
if err != nil {
return err
}
defer conn.Close()
_, name := splitName(path)
ch := make(chan string)
_, err = conn.StopUnit(name, "replace", ch)
if err != nil {
return err
}
<-ch
return nil
}
func newProperty(name string, units interface{}) systemdDbus.Property {
return systemdDbus.Property{
Name: name,
Value: dbus.MakeVariant(units),
}
}
func unitName(name string) string {
return fmt.Sprintf("%s.slice", name)
}
func splitName(path string) (slice string, unit string) {
slice, unit = filepath.Split(path)
return strings.TrimSuffix(slice, "/"), unit
}

26
vendor/github.com/containerd/cgroups/ticks.go сгенерированный поставляемый
Просмотреть файл

@ -1,26 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
func getClockTicks() uint64 {
// The value comes from `C.sysconf(C._SC_CLK_TCK)`, and
// on Linux it's a constant which is safe to be hard coded,
// so we can avoid using cgo here.
// See https://github.com/containerd/cgroups/pull/12 for
// more details.
return 100
}

324
vendor/github.com/containerd/cgroups/utils.go сгенерированный поставляемый
Просмотреть файл

@ -1,324 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"bufio"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"time"
units "github.com/docker/go-units"
specs "github.com/opencontainers/runtime-spec/specs-go"
)
var isUserNS = runningInUserNS()
// runningInUserNS detects whether we are currently running in a user namespace.
// Copied from github.com/lxc/lxd/shared/util.go
func runningInUserNS() bool {
file, err := os.Open("/proc/self/uid_map")
if err != nil {
// This kernel-provided file only exists if user namespaces are supported
return false
}
defer file.Close()
buf := bufio.NewReader(file)
l, _, err := buf.ReadLine()
if err != nil {
return false
}
line := string(l)
var a, b, c int64
fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
/*
* We assume we are in the initial user namespace if we have a full
* range - 4294967295 uids starting at uid 0.
*/
if a == 0 && b == 0 && c == 4294967295 {
return false
}
return true
}
// defaults returns all known groups
func defaults(root string) ([]Subsystem, error) {
h, err := NewHugetlb(root)
if err != nil && !os.IsNotExist(err) {
return nil, err
}
s := []Subsystem{
NewNamed(root, "systemd"),
NewFreezer(root),
NewPids(root),
NewNetCls(root),
NewNetPrio(root),
NewPerfEvent(root),
NewCputset(root),
NewCpu(root),
NewCpuacct(root),
NewMemory(root),
NewBlkio(root),
NewRdma(root),
}
// only add the devices cgroup if we are not in a user namespace
// because modifications are not allowed
if !isUserNS {
s = append(s, NewDevices(root))
}
// add the hugetlb cgroup if error wasn't due to missing hugetlb
// cgroup support on the host
if err == nil {
s = append(s, h)
}
return s, nil
}
// remove will remove a cgroup path handling EAGAIN and EBUSY errors and
// retrying the remove after a exp timeout
func remove(path string) error {
delay := 10 * time.Millisecond
for i := 0; i < 5; i++ {
if i != 0 {
time.Sleep(delay)
delay *= 2
}
if err := os.RemoveAll(path); err == nil {
return nil
}
}
return fmt.Errorf("cgroups: unable to remove path %q", path)
}
// readPids will read all the pids of processes in a cgroup by the provided path
func readPids(path string, subsystem Name) ([]Process, error) {
f, err := os.Open(filepath.Join(path, cgroupProcs))
if err != nil {
return nil, err
}
defer f.Close()
var (
out []Process
s = bufio.NewScanner(f)
)
for s.Scan() {
if t := s.Text(); t != "" {
pid, err := strconv.Atoi(t)
if err != nil {
return nil, err
}
out = append(out, Process{
Pid: pid,
Subsystem: subsystem,
Path: path,
})
}
}
return out, nil
}
// readTasksPids will read all the pids of tasks in a cgroup by the provided path
func readTasksPids(path string, subsystem Name) ([]Task, error) {
f, err := os.Open(filepath.Join(path, cgroupTasks))
if err != nil {
return nil, err
}
defer f.Close()
var (
out []Task
s = bufio.NewScanner(f)
)
for s.Scan() {
if t := s.Text(); t != "" {
pid, err := strconv.Atoi(t)
if err != nil {
return nil, err
}
out = append(out, Task{
Pid: pid,
Subsystem: subsystem,
Path: path,
})
}
}
return out, nil
}
func hugePageSizes() ([]string, error) {
var (
pageSizes []string
sizeList = []string{"B", "KB", "MB", "GB", "TB", "PB"}
)
files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages")
if err != nil {
return nil, err
}
for _, st := range files {
nameArray := strings.Split(st.Name(), "-")
pageSize, err := units.RAMInBytes(nameArray[1])
if err != nil {
return nil, err
}
pageSizes = append(pageSizes, units.CustomSize("%g%s", float64(pageSize), 1024.0, sizeList))
}
return pageSizes, nil
}
func readUint(path string) (uint64, error) {
v, err := ioutil.ReadFile(path)
if err != nil {
return 0, err
}
return parseUint(strings.TrimSpace(string(v)), 10, 64)
}
func parseUint(s string, base, bitSize int) (uint64, error) {
v, err := strconv.ParseUint(s, base, bitSize)
if err != nil {
intValue, intErr := strconv.ParseInt(s, base, bitSize)
// 1. Handle negative values greater than MinInt64 (and)
// 2. Handle negative values lesser than MinInt64
if intErr == nil && intValue < 0 {
return 0, nil
} else if intErr != nil &&
intErr.(*strconv.NumError).Err == strconv.ErrRange &&
intValue < 0 {
return 0, nil
}
return 0, err
}
return v, nil
}
func parseKV(raw string) (string, uint64, error) {
parts := strings.Fields(raw)
switch len(parts) {
case 2:
v, err := parseUint(parts[1], 10, 64)
if err != nil {
return "", 0, err
}
return parts[0], v, nil
default:
return "", 0, ErrInvalidFormat
}
}
func parseCgroupFile(path string) (map[string]string, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return parseCgroupFromReader(f)
}
func parseCgroupFromReader(r io.Reader) (map[string]string, error) {
var (
cgroups = make(map[string]string)
s = bufio.NewScanner(r)
)
for s.Scan() {
if err := s.Err(); err != nil {
return nil, err
}
var (
text = s.Text()
parts = strings.SplitN(text, ":", 3)
)
if len(parts) < 3 {
return nil, fmt.Errorf("invalid cgroup entry: %q", text)
}
for _, subs := range strings.Split(parts[1], ",") {
if subs != "" {
cgroups[subs] = parts[2]
}
}
}
return cgroups, nil
}
func getCgroupDestination(subsystem string) (string, error) {
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return "", err
}
defer f.Close()
s := bufio.NewScanner(f)
for s.Scan() {
if err := s.Err(); err != nil {
return "", err
}
fields := strings.Fields(s.Text())
for _, opt := range strings.Split(fields[len(fields)-1], ",") {
if opt == subsystem {
return fields[3], nil
}
}
}
return "", ErrNoCgroupMountDestination
}
func pathers(subystems []Subsystem) []pather {
var out []pather
for _, s := range subystems {
if p, ok := s.(pather); ok {
out = append(out, p)
}
}
return out
}
func initializeSubsystem(s Subsystem, path Path, resources *specs.LinuxResources) error {
if c, ok := s.(creator); ok {
p, err := path(s.Name())
if err != nil {
return err
}
if err := c.Create(p, resources); err != nil {
return err
}
} else if c, ok := s.(pather); ok {
p, err := path(s.Name())
if err != nil {
return err
}
// do the default create if the group does not have a custom one
if err := os.MkdirAll(c.Path(p), defaultDirPerm); err != nil {
return err
}
}
return nil
}
func cleanPath(path string) string {
if path == "" {
return ""
}
path = filepath.Clean(path)
if !filepath.IsAbs(path) {
path, _ = filepath.Rel(string(os.PathSeparator), filepath.Clean(string(os.PathSeparator)+path))
}
return filepath.Clean(path)
}

81
vendor/github.com/containerd/cgroups/v1.go сгенерированный поставляемый
Просмотреть файл

@ -1,81 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cgroups
import (
"bufio"
"fmt"
"os"
"path/filepath"
"strings"
)
// V1 returns all the groups in the default cgroups mountpoint in a single hierarchy
func V1() ([]Subsystem, error) {
root, err := v1MountPoint()
if err != nil {
return nil, err
}
subsystems, err := defaults(root)
if err != nil {
return nil, err
}
var enabled []Subsystem
for _, s := range pathers(subsystems) {
// check and remove the default groups that do not exist
if _, err := os.Lstat(s.Path("/")); err == nil {
enabled = append(enabled, s)
}
}
return enabled, nil
}
// v1MountPoint returns the mount point where the cgroup
// mountpoints are mounted in a single hiearchy
func v1MountPoint() (string, error) {
f, err := os.Open("/proc/self/mountinfo")
if err != nil {
return "", err
}
defer f.Close()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
if err := scanner.Err(); err != nil {
return "", err
}
var (
text = scanner.Text()
fields = strings.Split(text, " ")
// safe as mountinfo encodes mountpoints with spaces as \040.
index = strings.Index(text, " - ")
postSeparatorFields = strings.Fields(text[index+3:])
numPostFields = len(postSeparatorFields)
)
// this is an error as we can't detect if the mount is for "cgroup"
if numPostFields == 0 {
return "", fmt.Errorf("Found no fields post '-' in %q", text)
}
if postSeparatorFields[0] == "cgroup" {
// check that the mount is properly formated.
if numPostFields < 3 {
return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
}
return filepath.Dir(fields[4]), nil
}
}
return "", ErrMountPointNotExist
}

266
vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go сгенерированный поставляемый
Просмотреть файл

@ -9,6 +9,7 @@ import (
types "github.com/containerd/containerd/api/types" types "github.com/containerd/containerd/api/types"
proto "github.com/gogo/protobuf/proto" proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
types1 "github.com/gogo/protobuf/types"
grpc "google.golang.org/grpc" grpc "google.golang.org/grpc"
io "io" io "io"
math "math" math "math"
@ -29,11 +30,12 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type ApplyRequest struct { type ApplyRequest struct {
// Diff is the descriptor of the diff to be extracted // Diff is the descriptor of the diff to be extracted
Diff *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"` Diff *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"`
Mounts []*types.Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` Mounts []*types.Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"` Payloads map[string]*types1.Any `protobuf:"bytes,3,rep,name=payloads,proto3" json:"payloads,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
XXX_unrecognized []byte `json:"-"` XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_sizecache int32 `json:"-"` XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
} }
func (m *ApplyRequest) Reset() { *m = ApplyRequest{} } func (m *ApplyRequest) Reset() { *m = ApplyRequest{} }
@ -205,6 +207,7 @@ var xxx_messageInfo_DiffResponse proto.InternalMessageInfo
func init() { func init() {
proto.RegisterType((*ApplyRequest)(nil), "containerd.services.diff.v1.ApplyRequest") proto.RegisterType((*ApplyRequest)(nil), "containerd.services.diff.v1.ApplyRequest")
proto.RegisterMapType((map[string]*types1.Any)(nil), "containerd.services.diff.v1.ApplyRequest.PayloadsEntry")
proto.RegisterType((*ApplyResponse)(nil), "containerd.services.diff.v1.ApplyResponse") proto.RegisterType((*ApplyResponse)(nil), "containerd.services.diff.v1.ApplyResponse")
proto.RegisterType((*DiffRequest)(nil), "containerd.services.diff.v1.DiffRequest") proto.RegisterType((*DiffRequest)(nil), "containerd.services.diff.v1.DiffRequest")
proto.RegisterMapType((map[string]string)(nil), "containerd.services.diff.v1.DiffRequest.LabelsEntry") proto.RegisterMapType((map[string]string)(nil), "containerd.services.diff.v1.DiffRequest.LabelsEntry")
@ -216,36 +219,40 @@ func init() {
} }
var fileDescriptor_3b36a99e6faaa935 = []byte{ var fileDescriptor_3b36a99e6faaa935 = []byte{
// 457 bytes of a gzipped FileDescriptorProto // 526 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x30, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x41, 0x6f, 0xd3, 0x4c,
0x14, 0xaf, 0xfb, 0x0f, 0xf5, 0x75, 0x48, 0xc8, 0x9a, 0x44, 0x14, 0x20, 0xaa, 0x7a, 0xea, 0x40, 0x10, 0x8d, 0xed, 0x24, 0xdf, 0x97, 0x49, 0x2b, 0xa1, 0x55, 0x24, 0x8c, 0x01, 0xab, 0xca, 0x29,
0x38, 0xac, 0xa0, 0x09, 0xb6, 0xcb, 0x40, 0x43, 0x5c, 0xc6, 0x25, 0xda, 0x01, 0x81, 0x04, 0x4a, 0x2d, 0x62, 0x4d, 0x03, 0x2a, 0xd0, 0x5e, 0x5a, 0x54, 0xc4, 0xa5, 0x48, 0x60, 0x7a, 0x40, 0x20,
0x9b, 0x97, 0xce, 0x22, 0x8d, 0xbd, 0xd8, 0xad, 0x94, 0x1b, 0xdf, 0x85, 0x8f, 0xc2, 0x65, 0x47, 0x81, 0x9c, 0x78, 0xed, 0xae, 0x70, 0xbc, 0x8b, 0x77, 0x1d, 0xc9, 0x37, 0xfe, 0x06, 0x67, 0x7e,
0x8e, 0x1c, 0x69, 0x3f, 0x09, 0xb2, 0x93, 0x40, 0x24, 0xa4, 0x12, 0x76, 0xca, 0xcb, 0xf3, 0xef, 0x0a, 0x97, 0x1e, 0x39, 0x72, 0xa4, 0xf9, 0x25, 0xc8, 0xeb, 0x75, 0x31, 0x02, 0x05, 0xc3, 0xc9,
0x9f, 0xfd, 0x6c, 0x38, 0x5d, 0x70, 0x7d, 0xb9, 0x9a, 0xb1, 0xb9, 0x58, 0xfa, 0x73, 0x91, 0xea, 0x9b, 0x9d, 0xf7, 0xde, 0xce, 0xbc, 0x37, 0x0a, 0x1c, 0xc6, 0x54, 0x9e, 0xe5, 0x33, 0x3c, 0x67,
0x90, 0xa7, 0x98, 0x45, 0xf5, 0x32, 0x94, 0xdc, 0x57, 0x98, 0xad, 0xf9, 0x1c, 0x95, 0x1f, 0xf1, 0x0b, 0x6f, 0xce, 0x52, 0x19, 0xd0, 0x94, 0x64, 0x61, 0xf3, 0x18, 0x70, 0xea, 0x09, 0x92, 0x2d,
0x38, 0xf6, 0xd7, 0x87, 0xf6, 0xcb, 0x64, 0x26, 0xb4, 0xa0, 0xf7, 0xfe, 0x60, 0x59, 0x85, 0x63, 0xe9, 0x9c, 0x08, 0x2f, 0xa4, 0x51, 0xe4, 0x2d, 0x77, 0xd5, 0x17, 0xf3, 0x8c, 0x49, 0x86, 0xae,
0x76, 0x7d, 0x7d, 0xe8, 0xee, 0x2f, 0xc4, 0x42, 0x58, 0x9c, 0x6f, 0xaa, 0x82, 0xe2, 0x1e, 0x35, 0xff, 0xc0, 0xe2, 0x1a, 0x87, 0x55, 0x7d, 0xb9, 0xeb, 0x8c, 0x62, 0x16, 0x33, 0x85, 0xf3, 0xca,
0x32, 0xd5, 0xb9, 0x44, 0xe5, 0x2f, 0xc5, 0x2a, 0xd5, 0x25, 0xef, 0xe4, 0x3f, 0x78, 0x11, 0xaa, 0x53, 0x45, 0x71, 0xae, 0xc5, 0x8c, 0xc5, 0x09, 0xf1, 0xd4, 0xaf, 0x59, 0x1e, 0x79, 0x41, 0x5a,
0x79, 0xc6, 0xa5, 0x16, 0x59, 0x41, 0x1e, 0x5f, 0xc1, 0xde, 0x4b, 0x29, 0x93, 0x3c, 0xc0, 0xab, 0xe8, 0xd2, 0x5e, 0xab, 0x7e, 0x64, 0xc1, 0x89, 0xf0, 0x16, 0x2c, 0x4f, 0xa5, 0xe6, 0x1d, 0xfc,
0x15, 0x2a, 0x4d, 0x9f, 0x40, 0xd7, 0xa4, 0x74, 0xc8, 0x88, 0x4c, 0x86, 0xd3, 0xfb, 0xac, 0xb6, 0x05, 0x2f, 0x24, 0x62, 0x9e, 0x51, 0x2e, 0x59, 0x56, 0x91, 0xc7, 0x1f, 0x4d, 0xd8, 0x38, 0xe2,
0x0d, 0xab, 0xc0, 0xce, 0x7e, 0x2b, 0x04, 0x16, 0x49, 0x7d, 0xe8, 0xdb, 0x34, 0xca, 0x69, 0x8f, 0x3c, 0x29, 0x7c, 0xf2, 0x3e, 0x27, 0x42, 0xa2, 0x3b, 0xd0, 0x2d, 0x27, 0xb0, 0x8d, 0x2d, 0x63,
0x3a, 0x93, 0xe1, 0xf4, 0xee, 0xdf, 0x9c, 0xb7, 0x66, 0x3d, 0x28, 0x61, 0xe3, 0x37, 0x70, 0xbb, 0x32, 0x9c, 0xde, 0xc0, 0x8d, 0x11, 0x95, 0x04, 0x3e, 0xbe, 0x94, 0xf0, 0x15, 0x12, 0x79, 0xd0,
0xb4, 0x54, 0x52, 0xa4, 0x0a, 0xe9, 0x11, 0xdc, 0x0a, 0xa5, 0x4c, 0x38, 0x46, 0x8d, 0x6c, 0x2b, 0x57, 0xed, 0x08, 0xdb, 0xdc, 0xb2, 0x26, 0xc3, 0xe9, 0xd5, 0x5f, 0x39, 0x4f, 0xcb, 0xba, 0xaf,
0xf0, 0xf8, 0x6b, 0x1b, 0x86, 0x67, 0x3c, 0x8e, 0xab, 0xec, 0x8f, 0xa0, 0x9b, 0x60, 0xac, 0x1d, 0x61, 0xe8, 0x05, 0xfc, 0xcf, 0x83, 0x22, 0x61, 0x41, 0x28, 0x6c, 0x4b, 0x51, 0xee, 0xe3, 0x35,
0xb2, 0x3b, 0x87, 0x05, 0xd1, 0xc7, 0xd0, 0xcb, 0xf8, 0xe2, 0x52, 0xff, 0x2b, 0x75, 0x81, 0xa2, 0x4e, 0xe2, 0x66, 0x7f, 0xf8, 0x99, 0x66, 0x3e, 0x4e, 0x65, 0x56, 0xf8, 0x97, 0x42, 0xce, 0x73,
0x0f, 0x00, 0x96, 0x18, 0xf1, 0xf0, 0x93, 0x59, 0x73, 0x3a, 0x23, 0x32, 0x19, 0x04, 0x03, 0xdb, 0xd8, 0xfc, 0xa9, 0x84, 0xae, 0x80, 0xf5, 0x8e, 0x14, 0x6a, 0x8e, 0x81, 0x5f, 0x1e, 0xd1, 0x0e,
0xb9, 0xc8, 0x25, 0xd2, 0x3b, 0xd0, 0xc9, 0x30, 0x76, 0xba, 0xb6, 0x6f, 0x4a, 0x7a, 0x0e, 0xfd, 0xf4, 0x96, 0x41, 0x92, 0x13, 0xdb, 0x54, 0xb3, 0x8d, 0x70, 0x95, 0x05, 0xae, 0xb3, 0xc0, 0x47,
0x24, 0x9c, 0x61, 0xa2, 0x9c, 0x9e, 0x35, 0x78, 0xc6, 0x76, 0xdc, 0x08, 0x56, 0xdb, 0x06, 0x3b, 0x69, 0xe1, 0x57, 0x90, 0x7d, 0xf3, 0x81, 0x31, 0x7e, 0x02, 0x9b, 0xfa, 0x69, 0xc1, 0x59, 0x2a,
0xb7, 0xb4, 0xd7, 0xa9, 0xce, 0xf2, 0xa0, 0xd4, 0x70, 0x5f, 0xc0, 0xb0, 0xd6, 0x36, 0x76, 0x9f, 0x08, 0xda, 0x83, 0xff, 0x02, 0xce, 0x13, 0x4a, 0xc2, 0x56, 0xf6, 0xd4, 0xe0, 0xf1, 0x27, 0x13,
0x31, 0xb7, 0xa7, 0x35, 0x08, 0x4c, 0x49, 0xf7, 0xa1, 0xb7, 0x0e, 0x93, 0x15, 0x3a, 0x6d, 0xdb, 0x86, 0xc7, 0x34, 0x8a, 0x6a, 0x8f, 0x6f, 0x41, 0x37, 0x21, 0x91, 0xb4, 0x8d, 0xf5, 0x7e, 0x29,
0x2b, 0x7e, 0x8e, 0xdb, 0xcf, 0xc9, 0xf8, 0x14, 0xf6, 0x0a, 0xf5, 0xf2, 0xb4, 0xab, 0x09, 0x77, 0x10, 0xba, 0x0d, 0xbd, 0x8c, 0xc6, 0x67, 0xf2, 0x4f, 0xee, 0x56, 0x28, 0x74, 0x13, 0x60, 0x41,
0x9a, 0x4e, 0x78, 0xfa, 0x8d, 0x40, 0xd7, 0x48, 0xd0, 0x8f, 0xd0, 0xb3, 0x93, 0xa3, 0x07, 0x3b, 0x42, 0x1a, 0xbc, 0x2d, 0x6b, 0xb6, 0xa5, 0xa6, 0x1f, 0xa8, 0x9b, 0xd3, 0x82, 0x93, 0xd2, 0x95,
0x37, 0x53, 0xbf, 0x50, 0xee, 0xc3, 0x26, 0xd0, 0x32, 0xda, 0x87, 0xd2, 0x67, 0xd2, 0xf4, 0xac, 0x8c, 0x44, 0x76, 0xb7, 0x72, 0x25, 0x23, 0x11, 0x3a, 0x81, 0x7e, 0x12, 0xcc, 0x48, 0x22, 0xec,
0xdc, 0x83, 0x06, 0xc8, 0x42, 0xfc, 0xd5, 0xc5, 0xf5, 0xc6, 0x6b, 0xfd, 0xd8, 0x78, 0xad, 0x2f, 0x9e, 0x7a, 0xe0, 0xde, 0xda, 0x2c, 0x1a, 0x63, 0xe0, 0x13, 0x45, 0xab, 0x82, 0xd0, 0x1a, 0xce,
0x5b, 0x8f, 0x5c, 0x6f, 0x3d, 0xf2, 0x7d, 0xeb, 0x91, 0x9f, 0x5b, 0x8f, 0xbc, 0x3f, 0xbe, 0xd1, 0x43, 0x18, 0x36, 0xae, 0x7f, 0x13, 0xc2, 0xa8, 0x19, 0xc2, 0xa0, 0x69, 0xf7, 0x21, 0x6c, 0x54,
0x6b, 0x3f, 0x31, 0xdf, 0x77, 0xad, 0x59, 0xdf, 0x3e, 0xa4, 0xa7, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xea, 0xda, 0xed, 0x7a, 0x13, 0xad, 0xb6, 0x9b, 0x38, 0xfd, 0x6c, 0x40, 0xb7, 0x94, 0x40, 0x6f,
0xff, 0x61, 0xd1, 0x6e, 0x9e, 0x34, 0x04, 0x00, 0x00, 0xa0, 0xa7, 0x92, 0x43, 0xdb, 0xad, 0x17, 0xcb, 0xd9, 0x69, 0x03, 0xd5, 0xad, 0xbd, 0xd6, 0xef,
0x4c, 0xda, 0x7a, 0xe5, 0x6c, 0xb7, 0x40, 0x56, 0xe2, 0x8f, 0x4e, 0xcf, 0x2f, 0xdc, 0xce, 0xd7,
0x0b, 0xb7, 0xf3, 0x61, 0xe5, 0x1a, 0xe7, 0x2b, 0xd7, 0xf8, 0xb2, 0x72, 0x8d, 0x6f, 0x2b, 0xd7,
0x78, 0xb5, 0xff, 0x4f, 0xff, 0x58, 0x07, 0xe5, 0xf7, 0x65, 0x67, 0xd6, 0x57, 0x7b, 0x7e, 0xf7,
0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x85, 0x25, 0xb8, 0xf8, 0x04, 0x00, 0x00,
} }
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
@ -400,6 +407,34 @@ func (m *ApplyRequest) MarshalTo(dAtA []byte) (int, error) {
i += n i += n
} }
} }
if len(m.Payloads) > 0 {
for k, _ := range m.Payloads {
dAtA[i] = 0x1a
i++
v := m.Payloads[k]
msgSize := 0
if v != nil {
msgSize = v.Size()
msgSize += 1 + sovDiff(uint64(msgSize))
}
mapSize := 1 + len(k) + sovDiff(uint64(len(k))) + msgSize
i = encodeVarintDiff(dAtA, i, uint64(mapSize))
dAtA[i] = 0xa
i++
i = encodeVarintDiff(dAtA, i, uint64(len(k)))
i += copy(dAtA[i:], k)
if v != nil {
dAtA[i] = 0x12
i++
i = encodeVarintDiff(dAtA, i, uint64(v.Size()))
n2, err := v.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n2
}
}
}
if m.XXX_unrecognized != nil { if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized) i += copy(dAtA[i:], m.XXX_unrecognized)
} }
@ -425,11 +460,11 @@ func (m *ApplyResponse) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0xa dAtA[i] = 0xa
i++ i++
i = encodeVarintDiff(dAtA, i, uint64(m.Applied.Size())) i = encodeVarintDiff(dAtA, i, uint64(m.Applied.Size()))
n2, err := m.Applied.MarshalTo(dAtA[i:]) n3, err := m.Applied.MarshalTo(dAtA[i:])
if err != nil { if err != nil {
return 0, err return 0, err
} }
i += n2 i += n3
} }
if m.XXX_unrecognized != nil { if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized) i += copy(dAtA[i:], m.XXX_unrecognized)
@ -530,11 +565,11 @@ func (m *DiffResponse) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1a dAtA[i] = 0x1a
i++ i++
i = encodeVarintDiff(dAtA, i, uint64(m.Diff.Size())) i = encodeVarintDiff(dAtA, i, uint64(m.Diff.Size()))
n3, err := m.Diff.MarshalTo(dAtA[i:]) n4, err := m.Diff.MarshalTo(dAtA[i:])
if err != nil { if err != nil {
return 0, err return 0, err
} }
i += n3 i += n4
} }
if m.XXX_unrecognized != nil { if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized) i += copy(dAtA[i:], m.XXX_unrecognized)
@ -567,6 +602,19 @@ func (m *ApplyRequest) Size() (n int) {
n += 1 + l + sovDiff(uint64(l)) n += 1 + l + sovDiff(uint64(l))
} }
} }
if len(m.Payloads) > 0 {
for k, v := range m.Payloads {
_ = k
_ = v
l = 0
if v != nil {
l = v.Size()
l += 1 + sovDiff(uint64(l))
}
mapEntrySize := 1 + len(k) + sovDiff(uint64(len(k))) + l
n += mapEntrySize + 1 + sovDiff(uint64(mapEntrySize))
}
}
if m.XXX_unrecognized != nil { if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized) n += len(m.XXX_unrecognized)
} }
@ -662,9 +710,20 @@ func (this *ApplyRequest) String() string {
if this == nil { if this == nil {
return "nil" return "nil"
} }
keysForPayloads := make([]string, 0, len(this.Payloads))
for k, _ := range this.Payloads {
keysForPayloads = append(keysForPayloads, k)
}
github_com_gogo_protobuf_sortkeys.Strings(keysForPayloads)
mapStringForPayloads := "map[string]*types1.Any{"
for _, k := range keysForPayloads {
mapStringForPayloads += fmt.Sprintf("%v: %v,", k, this.Payloads[k])
}
mapStringForPayloads += "}"
s := strings.Join([]string{`&ApplyRequest{`, s := strings.Join([]string{`&ApplyRequest{`,
`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`, `Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`,
`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`, `Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`,
`Payloads:` + mapStringForPayloads + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`, `}`,
}, "") }, "")
@ -824,6 +883,135 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
return err return err
} }
iNdEx = postIndex iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Payloads", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDiff
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthDiff
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthDiff
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Payloads == nil {
m.Payloads = make(map[string]*types1.Any)
}
var mapkey string
var mapvalue *types1.Any
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDiff
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDiff
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLengthDiff
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey < 0 {
return ErrInvalidLengthDiff
}
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var mapmsglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowDiff
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
mapmsglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if mapmsglen < 0 {
return ErrInvalidLengthDiff
}
postmsgIndex := iNdEx + mapmsglen
if postmsgIndex < 0 {
return ErrInvalidLengthDiff
}
if postmsgIndex > l {
return io.ErrUnexpectedEOF
}
mapvalue = &types1.Any{}
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
return err
}
iNdEx = postmsgIndex
} else {
iNdEx = entryPreIndex
skippy, err := skipDiff(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthDiff
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Payloads[mapkey] = mapvalue
iNdEx = postIndex
default: default:
iNdEx = preIndex iNdEx = preIndex
skippy, err := skipDiff(dAtA[iNdEx:]) skippy, err := skipDiff(dAtA[iNdEx:])

284
vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go сгенерированный поставляемый
Просмотреть файл

@ -10,6 +10,7 @@ import (
rpc "github.com/gogo/googleapis/google/rpc" rpc "github.com/gogo/googleapis/google/rpc"
proto "github.com/gogo/protobuf/proto" proto "github.com/gogo/protobuf/proto"
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
types1 "github.com/gogo/protobuf/types"
grpc "google.golang.org/grpc" grpc "google.golang.org/grpc"
io "io" io "io"
math "math" math "math"
@ -191,11 +192,51 @@ func (m *PluginsResponse) XXX_DiscardUnknown() {
var xxx_messageInfo_PluginsResponse proto.InternalMessageInfo var xxx_messageInfo_PluginsResponse proto.InternalMessageInfo
type ServerResponse struct {
UUID string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ServerResponse) Reset() { *m = ServerResponse{} }
func (*ServerResponse) ProtoMessage() {}
func (*ServerResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_1a14fda866f10715, []int{3}
}
func (m *ServerResponse) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ServerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ServerResponse.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (m *ServerResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ServerResponse.Merge(m, src)
}
func (m *ServerResponse) XXX_Size() int {
return m.Size()
}
func (m *ServerResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ServerResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ServerResponse proto.InternalMessageInfo
func init() { func init() {
proto.RegisterType((*Plugin)(nil), "containerd.services.introspection.v1.Plugin") proto.RegisterType((*Plugin)(nil), "containerd.services.introspection.v1.Plugin")
proto.RegisterMapType((map[string]string)(nil), "containerd.services.introspection.v1.Plugin.ExportsEntry") proto.RegisterMapType((map[string]string)(nil), "containerd.services.introspection.v1.Plugin.ExportsEntry")
proto.RegisterType((*PluginsRequest)(nil), "containerd.services.introspection.v1.PluginsRequest") proto.RegisterType((*PluginsRequest)(nil), "containerd.services.introspection.v1.PluginsRequest")
proto.RegisterType((*PluginsResponse)(nil), "containerd.services.introspection.v1.PluginsResponse") proto.RegisterType((*PluginsResponse)(nil), "containerd.services.introspection.v1.PluginsResponse")
proto.RegisterType((*ServerResponse)(nil), "containerd.services.introspection.v1.ServerResponse")
} }
func init() { func init() {
@ -203,38 +244,42 @@ func init() {
} }
var fileDescriptor_1a14fda866f10715 = []byte{ var fileDescriptor_1a14fda866f10715 = []byte{
// 487 bytes of a gzipped FileDescriptorProto // 549 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x40, 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xc1, 0x6e, 0xd3, 0x40,
0x10, 0xcd, 0x3a, 0x69, 0xdc, 0x4c, 0xca, 0x87, 0x56, 0x15, 0x58, 0x3e, 0xb8, 0x51, 0xc4, 0x21, 0x10, 0xad, 0x9d, 0x34, 0x6e, 0x37, 0xa5, 0xa0, 0x55, 0x55, 0x2c, 0x83, 0x9c, 0x28, 0xe2, 0x10,
0x42, 0xb0, 0x56, 0x03, 0x48, 0xb4, 0x48, 0x1c, 0x22, 0x72, 0xa8, 0xd4, 0x43, 0xe5, 0x5e, 0x10, 0x21, 0x58, 0xab, 0x01, 0x24, 0x5a, 0x24, 0x0e, 0x51, 0x73, 0x88, 0xd4, 0x43, 0xe5, 0xa8, 0x08,
0x97, 0xca, 0x71, 0x36, 0x66, 0x85, 0xeb, 0xdd, 0xee, 0xae, 0x2d, 0x72, 0xe3, 0xc6, 0x5f, 0xcb, 0x71, 0xa9, 0x1c, 0x67, 0x63, 0x56, 0x38, 0xde, 0xed, 0xee, 0xda, 0x22, 0x37, 0x3e, 0x2f, 0x47,
0x91, 0x23, 0xa7, 0x8a, 0xfa, 0x37, 0xf0, 0x03, 0x90, 0xbd, 0x76, 0x9b, 0xdc, 0x12, 0x71, 0x9b, 0x8e, 0x9c, 0x02, 0xf5, 0x37, 0xf0, 0x01, 0xc8, 0xbb, 0x76, 0x9a, 0xdc, 0x12, 0x71, 0x9b, 0x79,
0x79, 0x7e, 0x6f, 0xe6, 0xcd, 0x93, 0x17, 0x82, 0x98, 0xe9, 0xaf, 0xd9, 0x8c, 0x44, 0xfc, 0xda, 0x33, 0x6f, 0xe6, 0xcd, 0xf3, 0xca, 0xc0, 0x8f, 0x88, 0xfc, 0x9a, 0x8e, 0x51, 0x48, 0x67, 0x5e,
0x8f, 0x78, 0xaa, 0x43, 0x96, 0x52, 0x39, 0x5f, 0x2f, 0x43, 0xc1, 0x7c, 0x45, 0x65, 0xce, 0x22, 0x48, 0x13, 0x19, 0x90, 0x04, 0xf3, 0xc9, 0x7a, 0x18, 0x30, 0xe2, 0x09, 0xcc, 0x33, 0x12, 0x62,
0xaa, 0x7c, 0x96, 0x6a, 0xc9, 0x95, 0xa0, 0x91, 0x66, 0x3c, 0xf5, 0xf3, 0xe3, 0x4d, 0x80, 0x08, 0xe1, 0x91, 0x44, 0x72, 0x2a, 0x18, 0x0e, 0x25, 0xa1, 0x89, 0x97, 0x9d, 0x6d, 0x02, 0x88, 0x71,
0xc9, 0x35, 0xc7, 0x2f, 0x1e, 0xd4, 0xa4, 0x51, 0x92, 0x4d, 0x62, 0x7e, 0xec, 0x9e, 0x6c, 0xb5, 0x2a, 0x29, 0x7c, 0xf1, 0xc0, 0x46, 0x15, 0x13, 0x6d, 0x36, 0x66, 0x67, 0xce, 0xf9, 0x56, 0x9b,
0x59, 0x2f, 0x05, 0x55, 0xbe, 0x48, 0x42, 0xbd, 0xe0, 0xf2, 0xda, 0x2c, 0x70, 0x9f, 0xc7, 0x9c, 0xe5, 0x9c, 0x61, 0xe1, 0xb1, 0x38, 0x90, 0x53, 0xca, 0x67, 0x7a, 0x81, 0xf3, 0x34, 0xa2, 0x34,
0xc7, 0x09, 0xf5, 0xa5, 0x88, 0x7c, 0xa5, 0x43, 0x9d, 0xa9, 0xfa, 0xc3, 0x61, 0xcc, 0x63, 0x5e, 0x8a, 0xb1, 0xc7, 0x59, 0xe8, 0x09, 0x19, 0xc8, 0x54, 0x94, 0x85, 0x67, 0x65, 0x41, 0x65, 0xe3,
0x95, 0x7e, 0x59, 0x19, 0x74, 0xf8, 0xd7, 0x82, 0xee, 0x45, 0x92, 0xc5, 0x2c, 0xc5, 0x18, 0x3a, 0x74, 0xea, 0xe1, 0x19, 0x93, 0xf3, 0xb2, 0x78, 0x12, 0xd1, 0x88, 0xaa, 0xd0, 0x2b, 0x22, 0x8d,
0xe5, 0x44, 0x07, 0x0d, 0xd0, 0xa8, 0x17, 0x54, 0x35, 0x7e, 0x06, 0x16, 0x9b, 0x3b, 0x56, 0x89, 0x76, 0xfe, 0x9a, 0xa0, 0x71, 0x1d, 0xa7, 0x11, 0x49, 0x20, 0x04, 0xf5, 0x62, 0x9d, 0x6d, 0xb4,
0x4c, 0xba, 0xc5, 0xed, 0x91, 0x75, 0xf6, 0x29, 0xb0, 0xd8, 0x1c, 0xbb, 0xb0, 0x2f, 0xe9, 0x4d, 0x8d, 0xee, 0xa1, 0xaf, 0x62, 0x78, 0x0a, 0x4c, 0x32, 0xb1, 0xcd, 0x02, 0xe9, 0x37, 0xf2, 0x65,
0xc6, 0x24, 0x55, 0x4e, 0x7b, 0xd0, 0x1e, 0xf5, 0x82, 0xfb, 0x1e, 0x7f, 0x84, 0x5e, 0xe3, 0x49, 0xcb, 0x1c, 0x5e, 0xfa, 0x26, 0x99, 0x40, 0x07, 0x1c, 0x70, 0x7c, 0x97, 0x12, 0x8e, 0x85, 0x5d,
0x39, 0x9d, 0x41, 0x7b, 0xd4, 0x1f, 0xbb, 0x64, 0xed, 0xec, 0xca, 0x36, 0xb9, 0xa8, 0x29, 0x93, 0x6b, 0xd7, 0xba, 0x87, 0xfe, 0x2a, 0x87, 0x1f, 0xc1, 0x61, 0x25, 0x58, 0xd8, 0xf5, 0x76, 0xad,
0xce, 0xea, 0xf6, 0xa8, 0x15, 0x3c, 0x48, 0xf0, 0x25, 0xd8, 0xf4, 0xbb, 0xe0, 0x52, 0x2b, 0x67, 0xdb, 0xec, 0x39, 0x68, 0xcd, 0x13, 0x75, 0x13, 0xba, 0x2e, 0x5b, 0xfa, 0xf5, 0xc5, 0xb2, 0xb5,
0xaf, 0x52, 0x9f, 0x90, 0x6d, 0x42, 0x23, 0xe6, 0x0c, 0x32, 0x35, 0xda, 0x69, 0xaa, 0xe5, 0x32, 0xe7, 0x3f, 0x50, 0xe0, 0x08, 0x58, 0xf8, 0x3b, 0xa3, 0x5c, 0x0a, 0x7b, 0x5f, 0xb1, 0xcf, 0xd1,
0x68, 0x26, 0xe1, 0x21, 0x1c, 0x44, 0xa1, 0x08, 0x67, 0x2c, 0x61, 0x9a, 0x51, 0xe5, 0x74, 0x2b, 0x36, 0x8e, 0x22, 0x7d, 0x06, 0x1a, 0x68, 0xee, 0x20, 0x91, 0x7c, 0xee, 0x57, 0x93, 0x60, 0x07,
0xd3, 0x1b, 0x18, 0x7e, 0x0d, 0xfb, 0x2c, 0x65, 0xfa, 0x8a, 0x4a, 0xe9, 0xd8, 0x03, 0x34, 0xea, 0x1c, 0x85, 0x01, 0x0b, 0xc6, 0x24, 0x26, 0x92, 0x60, 0x61, 0x37, 0x94, 0xe8, 0x0d, 0x0c, 0xbe,
0x8f, 0x31, 0x31, 0x69, 0x12, 0x29, 0x22, 0x72, 0x59, 0xa5, 0x19, 0xd8, 0x25, 0x67, 0x2a, 0xa5, 0x06, 0x07, 0x24, 0x21, 0xf2, 0x16, 0x73, 0x6e, 0x5b, 0x6d, 0xa3, 0xdb, 0xec, 0x41, 0xa4, 0x1d,
0x7b, 0x0a, 0x07, 0xeb, 0xbb, 0xf0, 0x53, 0x68, 0x7f, 0xa3, 0xcb, 0x3a, 0xbe, 0xb2, 0xc4, 0x87, 0x45, 0x9c, 0x85, 0x68, 0xa4, 0xac, 0xf6, 0xad, 0xa2, 0x67, 0xc0, 0xb9, 0x73, 0x01, 0x8e, 0xd6,
0xb0, 0x97, 0x87, 0x49, 0x46, 0x4d, 0x80, 0x81, 0x69, 0x4e, 0xad, 0xf7, 0x68, 0xf8, 0x12, 0x1e, 0x77, 0xc1, 0x27, 0xa0, 0xf6, 0x0d, 0xcf, 0x4b, 0xfb, 0x8a, 0x10, 0x9e, 0x80, 0xfd, 0x2c, 0x88,
0x1b, 0xbb, 0x2a, 0xa0, 0x37, 0x19, 0x55, 0x1a, 0x3b, 0x60, 0x2f, 0x58, 0xa2, 0xa9, 0x54, 0x0e, 0x53, 0xac, 0x0d, 0xf4, 0x75, 0x72, 0x61, 0xbe, 0x37, 0x3a, 0x2f, 0xc1, 0xb1, 0x96, 0x2b, 0x7c,
0xaa, 0xbc, 0x35, 0xed, 0xf0, 0x0a, 0x9e, 0xdc, 0x73, 0x95, 0xe0, 0xa9, 0xa2, 0xf8, 0x1c, 0x6c, 0x7c, 0x97, 0x62, 0x21, 0xa1, 0x0d, 0xac, 0x29, 0x89, 0x25, 0xe6, 0xc2, 0x36, 0x94, 0xb6, 0x2a,
0x61, 0xa0, 0x8a, 0xdc, 0x1f, 0xbf, 0xda, 0x25, 0xa2, 0x3a, 0xf2, 0x66, 0xc4, 0xf8, 0x27, 0x82, 0xed, 0xdc, 0x82, 0xc7, 0xab, 0x5e, 0xc1, 0x68, 0x22, 0x30, 0xbc, 0x02, 0x16, 0xd3, 0x90, 0x6a,
0x47, 0x67, 0xeb, 0x54, 0x9c, 0x83, 0x5d, 0xaf, 0xc4, 0x6f, 0x77, 0x99, 0xdc, 0x5c, 0xe3, 0xbe, 0x6e, 0xf6, 0x5e, 0xed, 0x62, 0x51, 0x69, 0x79, 0x35, 0xa2, 0x83, 0xc0, 0xf1, 0x08, 0xf3, 0x0c,
0xdb, 0x51, 0x65, 0xee, 0x9a, 0x2c, 0x56, 0x77, 0x5e, 0xeb, 0xf7, 0x9d, 0xd7, 0xfa, 0x51, 0x78, 0xf3, 0xd5, 0xfc, 0xe7, 0xa0, 0x9e, 0xa6, 0x64, 0xa2, 0x6f, 0xe9, 0x1f, 0xe4, 0xcb, 0x56, 0xfd,
0x68, 0x55, 0x78, 0xe8, 0x57, 0xe1, 0xa1, 0x3f, 0x85, 0x87, 0xbe, 0x9c, 0xff, 0xdf, 0x5b, 0xfc, 0xe6, 0x66, 0x78, 0xe9, 0x2b, 0xb4, 0xf7, 0xdb, 0x00, 0x8f, 0x86, 0xeb, 0xa3, 0x61, 0x06, 0xac,
0xb0, 0x01, 0x7c, 0xb6, 0x66, 0xdd, 0xea, 0xf7, 0x7f, 0xf3, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe6, 0x52, 0x22, 0x7c, 0xbb, 0x8b, 0x92, 0xea, 0x7a, 0xe7, 0xdd, 0x8e, 0xac, 0x52, 0xe7, 0x27, 0xd0,
0x72, 0xde, 0x35, 0xe4, 0x03, 0x00, 0x00, 0xd0, 0xca, 0xe1, 0x69, 0xf5, 0xa5, 0xaa, 0xb7, 0x8f, 0x06, 0xc5, 0xdb, 0x77, 0xb6, 0x94, 0xb3,
0x79, 0x7f, 0x7f, 0xba, 0xb8, 0x77, 0xf7, 0x7e, 0xdd, 0xbb, 0x7b, 0x3f, 0x72, 0xd7, 0x58, 0xe4,
0xae, 0xf1, 0x33, 0x77, 0x8d, 0x3f, 0xb9, 0x6b, 0x7c, 0xb9, 0xfa, 0xbf, 0x1f, 0xc6, 0x87, 0x0d,
0xe0, 0x73, 0x6d, 0xdc, 0x50, 0x7a, 0xdf, 0xfc, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x0c, 0xb3, 0x50,
0xdc, 0x89, 0x04, 0x00, 0x00,
} }
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.
@ -254,6 +299,8 @@ type IntrospectionClient interface {
// Clients can use this to detect features and capabilities when using // Clients can use this to detect features and capabilities when using
// containerd. // containerd.
Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error) Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error)
// Server returns information about the containerd server
Server(ctx context.Context, in *types1.Empty, opts ...grpc.CallOption) (*ServerResponse, error)
} }
type introspectionClient struct { type introspectionClient struct {
@ -273,6 +320,15 @@ func (c *introspectionClient) Plugins(ctx context.Context, in *PluginsRequest, o
return out, nil return out, nil
} }
func (c *introspectionClient) Server(ctx context.Context, in *types1.Empty, opts ...grpc.CallOption) (*ServerResponse, error) {
out := new(ServerResponse)
err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Server", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// IntrospectionServer is the server API for Introspection service. // IntrospectionServer is the server API for Introspection service.
type IntrospectionServer interface { type IntrospectionServer interface {
// Plugins returns a list of plugins in containerd. // Plugins returns a list of plugins in containerd.
@ -280,6 +336,8 @@ type IntrospectionServer interface {
// Clients can use this to detect features and capabilities when using // Clients can use this to detect features and capabilities when using
// containerd. // containerd.
Plugins(context.Context, *PluginsRequest) (*PluginsResponse, error) Plugins(context.Context, *PluginsRequest) (*PluginsResponse, error)
// Server returns information about the containerd server
Server(context.Context, *types1.Empty) (*ServerResponse, error)
} }
func RegisterIntrospectionServer(s *grpc.Server, srv IntrospectionServer) { func RegisterIntrospectionServer(s *grpc.Server, srv IntrospectionServer) {
@ -304,6 +362,24 @@ func _Introspection_Plugins_Handler(srv interface{}, ctx context.Context, dec fu
return interceptor(ctx, in, info, handler) return interceptor(ctx, in, info, handler)
} }
func _Introspection_Server_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(types1.Empty)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(IntrospectionServer).Server(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/containerd.services.introspection.v1.Introspection/Server",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(IntrospectionServer).Server(ctx, req.(*types1.Empty))
}
return interceptor(ctx, in, info, handler)
}
var _Introspection_serviceDesc = grpc.ServiceDesc{ var _Introspection_serviceDesc = grpc.ServiceDesc{
ServiceName: "containerd.services.introspection.v1.Introspection", ServiceName: "containerd.services.introspection.v1.Introspection",
HandlerType: (*IntrospectionServer)(nil), HandlerType: (*IntrospectionServer)(nil),
@ -312,6 +388,10 @@ var _Introspection_serviceDesc = grpc.ServiceDesc{
MethodName: "Plugins", MethodName: "Plugins",
Handler: _Introspection_Plugins_Handler, Handler: _Introspection_Plugins_Handler,
}, },
{
MethodName: "Server",
Handler: _Introspection_Server_Handler,
},
}, },
Streams: []grpc.StreamDesc{}, Streams: []grpc.StreamDesc{},
Metadata: "github.com/containerd/containerd/api/services/introspection/v1/introspection.proto", Metadata: "github.com/containerd/containerd/api/services/introspection/v1/introspection.proto",
@ -488,6 +568,33 @@ func (m *PluginsResponse) MarshalTo(dAtA []byte) (int, error) {
return i, nil return i, nil
} }
func (m *ServerResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ServerResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.UUID) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintIntrospection(dAtA, i, uint64(len(m.UUID)))
i += copy(dAtA[i:], m.UUID)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int { func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 { for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80) dAtA[offset] = uint8(v&0x7f | 0x80)
@ -583,6 +690,22 @@ func (m *PluginsResponse) Size() (n int) {
return n return n
} }
func (m *ServerResponse) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.UUID)
if l > 0 {
n += 1 + l + sovIntrospection(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovIntrospection(x uint64) (n int) { func sovIntrospection(x uint64) (n int) {
for { for {
n++ n++
@ -645,6 +768,17 @@ func (this *PluginsResponse) String() string {
}, "") }, "")
return s return s
} }
func (this *ServerResponse) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&ServerResponse{`,
`UUID:` + fmt.Sprintf("%v", this.UUID) + `,`,
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
`}`,
}, "")
return s
}
func valueToStringIntrospection(v interface{}) string { func valueToStringIntrospection(v interface{}) string {
rv := reflect.ValueOf(v) rv := reflect.ValueOf(v)
if rv.IsNil() { if rv.IsNil() {
@ -1206,6 +1340,92 @@ func (m *PluginsResponse) Unmarshal(dAtA []byte) error {
} }
return nil return nil
} }
func (m *ServerResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowIntrospection
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ServerResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ServerResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field UUID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowIntrospection
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthIntrospection
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthIntrospection
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.UUID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipIntrospection(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthIntrospection
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthIntrospection
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipIntrospection(dAtA []byte) (n int, err error) { func skipIntrospection(dAtA []byte) (n int, err error) {
l := len(dAtA) l := len(dAtA)
iNdEx := 0 iNdEx := 0

2
vendor/github.com/containerd/containerd/archive/compression/compression.go сгенерированный поставляемый
Просмотреть файл

@ -180,7 +180,7 @@ func DecompressStream(archive io.Reader) (DecompressReadCloser, error) {
} }
} }
// CompressStream compresseses the dest with specified compression algorithm. // CompressStream compresses the dest with specified compression algorithm.
func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) {
switch compression { switch compression {
case Uncompressed: case Uncompressed:

261
vendor/github.com/containerd/containerd/archive/tar.go сгенерированный поставляемый
Просмотреть файл

@ -19,9 +19,7 @@ package archive
import ( import (
"archive/tar" "archive/tar"
"context" "context"
"fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"runtime" "runtime"
@ -91,11 +89,6 @@ const (
// archives. // archives.
whiteoutMetaPrefix = whiteoutPrefix + whiteoutPrefix whiteoutMetaPrefix = whiteoutPrefix + whiteoutPrefix
// whiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
// layers. Normally these should not go into exported archives and all changed
// hardlinks should be copied to the top layer.
whiteoutLinkDir = whiteoutMetaPrefix + "plnk"
// whiteoutOpaqueDir file means directory has been made opaque - meaning // whiteoutOpaqueDir file means directory has been made opaque - meaning
// readdir calls to this directory do not follow to lower layers. // readdir calls to this directory do not follow to lower layers.
whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq" whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq"
@ -117,11 +110,15 @@ func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int
if options.Filter == nil { if options.Filter == nil {
options.Filter = all options.Filter = all
} }
if options.applyFunc == nil {
options.applyFunc = applyNaive
}
return apply(ctx, root, tar.NewReader(r), options) return options.applyFunc(ctx, root, tar.NewReader(r), options)
} }
// applyNaive applies a tar stream of an OCI style diff tar. // applyNaive applies a tar stream of an OCI style diff tar to a directory
// applying each file as either a whole file or whiteout.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets // See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) { func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) {
var ( var (
@ -131,11 +128,49 @@ func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyO
// may occur out of order // may occur out of order
unpackedPaths = make(map[string]struct{}) unpackedPaths = make(map[string]struct{})
// Used for aufs plink directory convertWhiteout = options.ConvertWhiteout
aufsTempdir = ""
aufsHardlinks = make(map[string]*tar.Header)
) )
if convertWhiteout == nil {
// handle whiteouts by removing the target files
convertWhiteout = func(hdr *tar.Header, path string) (bool, error) {
base := filepath.Base(path)
dir := filepath.Dir(path)
if base == whiteoutOpaqueDir {
_, err := os.Lstat(dir)
if err != nil {
return false, err
}
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) {
err = nil // parent was deleted
}
return err
}
if path == dir {
return nil
}
if _, exists := unpackedPaths[path]; !exists {
err := os.RemoveAll(path)
return err
}
return nil
})
return false, err
}
if strings.HasPrefix(base, whiteoutPrefix) {
originalBase := base[len(whiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
return false, os.RemoveAll(originalPath)
}
return true, nil
}
}
// Iterate through the files in the archive. // Iterate through the files in the archive.
for { for {
select { select {
@ -193,85 +228,21 @@ func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyO
if base == "" { if base == "" {
parentPath = filepath.Dir(path) parentPath = filepath.Dir(path)
} }
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { if err := mkparent(ctx, parentPath, root, options.Parents); err != nil {
err = mkdirAll(parentPath, 0755)
if err != nil {
return 0, err
}
}
}
// Skip AUFS metadata dirs
if strings.HasPrefix(hdr.Name, whiteoutMetaPrefix) {
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
// We don't want this directory, but we need the files in them so that
// such hardlinks can be resolved.
if strings.HasPrefix(hdr.Name, whiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
if aufsTempdir, err = ioutil.TempDir(os.Getenv("XDG_RUNTIME_DIR"), "dockerplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)
}
p, err := fs.RootPath(aufsTempdir, basename)
if err != nil {
return 0, err
}
if err := createTarFile(ctx, p, root, hdr, tr); err != nil {
return 0, err
}
}
if hdr.Name != whiteoutOpaqueDir {
continue
}
}
if strings.HasPrefix(base, whiteoutPrefix) {
dir := filepath.Dir(path)
if base == whiteoutOpaqueDir {
_, err := os.Lstat(dir)
if err != nil {
return 0, err
}
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) {
err = nil // parent was deleted
}
return err
}
if path == dir {
return nil
}
if _, exists := unpackedPaths[path]; !exists {
err := os.RemoveAll(path)
return err
}
return nil
})
if err != nil {
return 0, err
}
continue
}
originalBase := base[len(whiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
// Ensure originalPath is under dir
if dir[len(dir)-1] != filepath.Separator {
dir += string(filepath.Separator)
}
if !strings.HasPrefix(originalPath, dir) {
return 0, errors.Wrapf(errInvalidArchive, "invalid whiteout name: %v", base)
}
if err := os.RemoveAll(originalPath); err != nil {
return 0, err return 0, err
} }
}
// Naive whiteout convert function which handles whiteout files by
// removing the target files.
if err := validateWhiteout(path); err != nil {
return 0, err
}
writeFile, err := convertWhiteout(hdr, path)
if err != nil {
return 0, errors.Wrapf(err, "failed to convert whiteout file %q", hdr.Name)
}
if !writeFile {
continue continue
} }
// If path exits we almost always just want to remove and replace it. // If path exits we almost always just want to remove and replace it.
@ -289,26 +260,6 @@ func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyO
srcData := io.Reader(tr) srcData := io.Reader(tr)
srcHdr := hdr srcHdr := hdr
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
// we manually retarget these into the temporary files we extracted them into
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), whiteoutLinkDir) {
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
return 0, fmt.Errorf("invalid aufs hardlink")
}
p, err := fs.RootPath(aufsTempdir, linkBasename)
if err != nil {
return 0, err
}
tmpFile, err := os.Open(p)
if err != nil {
return 0, err
}
defer tmpFile.Close()
srcData = tmpFile
}
if err := createTarFile(ctx, path, root, srcHdr, srcData); err != nil { if err := createTarFile(ctx, path, root, srcHdr, srcData); err != nil {
return 0, err return 0, err
} }
@ -428,6 +379,66 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header
return chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)) return chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime))
} }
func mkparent(ctx context.Context, path, root string, parents []string) error {
if dir, err := os.Lstat(path); err == nil {
if dir.IsDir() {
return nil
}
return &os.PathError{
Op: "mkparent",
Path: path,
Err: syscall.ENOTDIR,
}
} else if !os.IsNotExist(err) {
return err
}
i := len(path)
for i > len(root) && !os.IsPathSeparator(path[i-1]) {
i--
}
if i > len(root)+1 {
if err := mkparent(ctx, path[:i-1], root, parents); err != nil {
return err
}
}
if err := mkdir(path, 0755); err != nil {
// Check that still doesn't exist
dir, err1 := os.Lstat(path)
if err1 == nil && dir.IsDir() {
return nil
}
return err
}
for _, p := range parents {
ppath, err := fs.RootPath(p, path[len(root):])
if err != nil {
return err
}
dir, err := os.Lstat(ppath)
if err == nil {
if !dir.IsDir() {
// Replaced, do not copy attributes
break
}
if err := copyDirInfo(dir, path); err != nil {
return err
}
return copyUpXAttrs(path, ppath)
} else if !os.IsNotExist(err) {
return err
}
}
log.G(ctx).Debugf("parent directory %q not found: default permissions(0755) used", path)
return nil
}
type changeWriter struct { type changeWriter struct {
tw *tar.Writer tw *tar.Writer
source string source string
@ -493,6 +504,12 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
// truncate timestamp for compatibility. without PAX stdlib rounds timestamps instead
hdr.Format = tar.FormatPAX
hdr.ModTime = hdr.ModTime.Truncate(time.Second)
hdr.AccessTime = time.Time{}
hdr.ChangeTime = time.Time{}
name := p name := p
if strings.HasPrefix(name, string(filepath.Separator)) { if strings.HasPrefix(name, string(filepath.Separator)) {
name, err = filepath.Rel(string(filepath.Separator), name) name, err = filepath.Rel(string(filepath.Separator), name)
@ -598,6 +615,9 @@ func (cw *changeWriter) Close() error {
} }
func (cw *changeWriter) includeParents(hdr *tar.Header) error { func (cw *changeWriter) includeParents(hdr *tar.Header) error {
if cw.addedDirs == nil {
return nil
}
name := strings.TrimRight(hdr.Name, "/") name := strings.TrimRight(hdr.Name, "/")
fname := filepath.Join(cw.source, name) fname := filepath.Join(cw.source, name)
parent := filepath.Dir(name) parent := filepath.Dir(name)
@ -684,3 +704,26 @@ func hardlinkRootPath(root, linkname string) (string, error) {
} }
return targetPath, nil return targetPath, nil
} }
func validateWhiteout(path string) error {
base := filepath.Base(path)
dir := filepath.Dir(path)
if base == whiteoutOpaqueDir {
return nil
}
if strings.HasPrefix(base, whiteoutPrefix) {
originalBase := base[len(whiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
// Ensure originalPath is under dir
if dir[len(dir)-1] != filepath.Separator {
dir += string(filepath.Separator)
}
if !strings.HasPrefix(originalPath, dir) {
return errors.Wrapf(errInvalidArchive, "invalid whiteout name: %v", base)
}
}
return nil
}

38
vendor/github.com/containerd/containerd/archive/tar_opts.go сгенерированный поставляемый
Просмотреть файл

@ -16,7 +16,19 @@
package archive package archive
import "archive/tar" import (
"archive/tar"
"context"
)
// ApplyOptions provides additional options for an Apply operation
type ApplyOptions struct {
Filter Filter // Filter tar headers
ConvertWhiteout ConvertWhiteout // Convert whiteout files
Parents []string // Parent directories to handle inherited attributes without CoW
applyFunc func(context.Context, string, *tar.Reader, ApplyOptions) (int64, error)
}
// ApplyOpt allows setting mutable archive apply properties on creation // ApplyOpt allows setting mutable archive apply properties on creation
type ApplyOpt func(options *ApplyOptions) error type ApplyOpt func(options *ApplyOptions) error
@ -24,6 +36,9 @@ type ApplyOpt func(options *ApplyOptions) error
// Filter specific files from the archive // Filter specific files from the archive
type Filter func(*tar.Header) (bool, error) type Filter func(*tar.Header) (bool, error)
// ConvertWhiteout converts whiteout files from the archive
type ConvertWhiteout func(*tar.Header, string) (bool, error)
// all allows all files // all allows all files
func all(_ *tar.Header) (bool, error) { func all(_ *tar.Header) (bool, error) {
return true, nil return true, nil
@ -36,3 +51,24 @@ func WithFilter(f Filter) ApplyOpt {
return nil return nil
} }
} }
// WithConvertWhiteout uses the convert function to convert the whiteout files.
func WithConvertWhiteout(c ConvertWhiteout) ApplyOpt {
return func(options *ApplyOptions) error {
options.ConvertWhiteout = c
return nil
}
}
// WithParents provides parent directories for resolving inherited attributes
// directory from the filesystem.
// Inherited attributes are searched from first to last, making the first
// element in the list the most immediate parent directory.
// NOTE: When applying to a filesystem which supports CoW, file attributes
// should be inherited by the filesystem.
func WithParents(p []string) ApplyOpt {
return func(options *ApplyOptions) error {
options.Parents = p
return nil
}
}

59
vendor/github.com/containerd/containerd/archive/tar_opts_linux.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,59 @@
// +build linux
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
import (
"archive/tar"
"os"
"path/filepath"
"strings"
"golang.org/x/sys/unix"
)
// AufsConvertWhiteout converts whiteout files for aufs.
func AufsConvertWhiteout(_ *tar.Header, _ string) (bool, error) {
return true, nil
}
// OverlayConvertWhiteout converts whiteout files for overlay.
func OverlayConvertWhiteout(hdr *tar.Header, path string) (bool, error) {
base := filepath.Base(path)
dir := filepath.Dir(path)
// if a directory is marked as opaque, we need to translate that to overlay
if base == whiteoutOpaqueDir {
// don't write the file itself
return false, unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
}
// if a file was deleted and we are using overlay, we need to create a character device
if strings.HasPrefix(base, whiteoutPrefix) {
originalBase := base[len(whiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
return false, err
}
// don't write the file itself
return false, os.Chown(originalPath, hdr.Uid, hdr.Gid)
}
return true, nil
}

24
vendor/github.com/containerd/containerd/archive/tar_opts_unix.go сгенерированный поставляемый
Просмотреть файл

@ -1,24 +0,0 @@
// +build !windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package archive
// ApplyOptions provides additional options for an Apply operation
type ApplyOptions struct {
Filter Filter // Filter tar headers
}

18
vendor/github.com/containerd/containerd/archive/tar_opts_windows.go сгенерированный поставляемый
Просмотреть файл

@ -18,28 +18,12 @@
package archive package archive
// ApplyOptions provides additional options for an Apply operation
type ApplyOptions struct {
ParentLayerPaths []string // Parent layer paths used for Windows layer apply
IsWindowsContainerLayer bool // True if the tar stream to be applied is a Windows Container Layer
Filter Filter // Filter tar headers
}
// WithParentLayers adds parent layers to the apply process this is required
// for all Windows layers except the base layer.
func WithParentLayers(parentPaths []string) ApplyOpt {
return func(options *ApplyOptions) error {
options.ParentLayerPaths = parentPaths
return nil
}
}
// AsWindowsContainerLayer indicates that the tar stream to apply is that of // AsWindowsContainerLayer indicates that the tar stream to apply is that of
// a Windows Container Layer. The caller must be holding SeBackupPrivilege and // a Windows Container Layer. The caller must be holding SeBackupPrivilege and
// SeRestorePrivilege. // SeRestorePrivilege.
func AsWindowsContainerLayer() ApplyOpt { func AsWindowsContainerLayer() ApplyOpt {
return func(options *ApplyOptions) error { return func(options *ApplyOptions) error {
options.IsWindowsContainerLayer = true options.applyFunc = applyWindowsLayer
return nil return nil
} }
} }

77
vendor/github.com/containerd/containerd/archive/tar_unix.go сгенерированный поставляемый
Просмотреть файл

@ -20,11 +20,12 @@ package archive
import ( import (
"archive/tar" "archive/tar"
"context"
"os" "os"
"strings"
"sync" "sync"
"syscall" "syscall"
"github.com/containerd/continuity/fs"
"github.com/containerd/continuity/sysx" "github.com/containerd/continuity/sysx"
"github.com/opencontainers/runc/libcontainer/system" "github.com/opencontainers/runc/libcontainer/system"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -74,10 +75,6 @@ func openFile(name string, flag int, perm os.FileMode) (*os.File, error) {
return f, err return f, err
} }
func mkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
func mkdir(path string, perm os.FileMode) error { func mkdir(path string, perm os.FileMode) error {
if err := os.Mkdir(path, perm); err != nil { if err := os.Mkdir(path, perm); err != nil {
return err return err
@ -149,11 +146,71 @@ func getxattr(path, attr string) ([]byte, error) {
} }
func setxattr(path, key, value string) error { func setxattr(path, key, value string) error {
return sysx.LSetxattr(path, key, []byte(value), 0) // Do not set trusted attributes
if strings.HasPrefix(key, "trusted.") {
return errors.Wrap(unix.ENOTSUP, "admin attributes from archive not supported")
}
return unix.Lsetxattr(path, key, []byte(value), 0)
} }
// apply applies a tar stream of an OCI style diff tar. func copyDirInfo(fi os.FileInfo, path string) error {
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets st := fi.Sys().(*syscall.Stat_t)
func apply(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) { if err := os.Lchown(path, int(st.Uid), int(st.Gid)); err != nil {
return applyNaive(ctx, root, tr, options) if os.IsPermission(err) {
// Normally if uid/gid are the same this would be a no-op, but some
// filesystems may still return EPERM... for instance NFS does this.
// In such a case, this is not an error.
if dstStat, err2 := os.Lstat(path); err2 == nil {
st2 := dstStat.Sys().(*syscall.Stat_t)
if st.Uid == st2.Uid && st.Gid == st2.Gid {
err = nil
}
}
}
if err != nil {
return errors.Wrapf(err, "failed to chown %s", path)
}
}
if err := os.Chmod(path, fi.Mode()); err != nil {
return errors.Wrapf(err, "failed to chmod %s", path)
}
timespec := []unix.Timespec{unix.Timespec(fs.StatAtime(st)), unix.Timespec(fs.StatMtime(st))}
if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
return errors.Wrapf(err, "failed to utime %s", path)
}
return nil
}
func copyUpXAttrs(dst, src string) error {
xattrKeys, err := sysx.LListxattr(src)
if err != nil {
if err == unix.ENOTSUP || err == sysx.ENODATA {
return nil
}
return errors.Wrapf(err, "failed to list xattrs on %s", src)
}
for _, xattr := range xattrKeys {
// Do not copy up trusted attributes
if strings.HasPrefix(xattr, "trusted.") {
continue
}
data, err := sysx.LGetxattr(src, xattr)
if err != nil {
if err == unix.ENOTSUP || err == sysx.ENODATA {
continue
}
return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
}
if err := unix.Lsetxattr(dst, xattr, data, unix.XATTR_CREATE); err != nil {
if err == unix.ENOTSUP || err == unix.ENODATA || err == unix.EEXIST {
continue
}
return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
}
}
return nil
} }

31
vendor/github.com/containerd/containerd/archive/tar_windows.go сгенерированный поставляемый
Просмотреть файл

@ -23,7 +23,6 @@ import (
"bufio" "bufio"
"context" "context"
"encoding/base64" "encoding/base64"
"errors"
"fmt" "fmt"
"io" "io"
"os" "os"
@ -36,6 +35,7 @@ import (
"github.com/Microsoft/go-winio" "github.com/Microsoft/go-winio"
"github.com/Microsoft/hcsshim" "github.com/Microsoft/hcsshim"
"github.com/containerd/containerd/sys" "github.com/containerd/containerd/sys"
"github.com/pkg/errors"
) )
const ( const (
@ -107,10 +107,6 @@ func openFile(name string, flag int, perm os.FileMode) (*os.File, error) {
return sys.OpenFileSequential(name, flag, perm) return sys.OpenFileSequential(name, flag, perm)
} }
func mkdirAll(path string, perm os.FileMode) error {
return sys.MkdirAll(path, perm)
}
func mkdir(path string, perm os.FileMode) error { func mkdir(path string, perm os.FileMode) error {
return os.Mkdir(path, perm) return os.Mkdir(path, perm)
} }
@ -153,16 +149,8 @@ func setxattr(path, key, value string) error {
return errors.New("xattrs not supported on Windows") return errors.New("xattrs not supported on Windows")
} }
// apply applies a tar stream of an OCI style diff tar of a Windows layer. // applyWindowsLayer applies a tar stream of an OCI style diff tar of a Windows
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets // layer using the hcsshim layer writer and backup streams.
func apply(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) {
if options.IsWindowsContainerLayer {
return applyWindowsLayer(ctx, root, tr, options)
}
return applyNaive(ctx, root, tr, options)
}
// applyWindowsLayer applies a tar stream of an OCI style diff tar of a Windows layer.
// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets // See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
func applyWindowsLayer(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) { func applyWindowsLayer(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) {
home, id := filepath.Split(root) home, id := filepath.Split(root)
@ -170,7 +158,7 @@ func applyWindowsLayer(ctx context.Context, root string, tr *tar.Reader, options
HomeDir: home, HomeDir: home,
} }
w, err := hcsshim.NewLayerWriter(info, id, options.ParentLayerPaths) w, err := hcsshim.NewLayerWriter(info, id, options.Parents)
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -443,3 +431,14 @@ func writeBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (
} }
} }
} }
func copyDirInfo(fi os.FileInfo, path string) error {
if err := os.Chmod(path, fi.Mode()); err != nil {
return errors.Wrapf(err, "failed to chmod %s", path)
}
return nil
}
func copyUpXAttrs(dst, src string) error {
return nil
}

2
vendor/github.com/containerd/containerd/archive/time_unix.go сгенерированный поставляемый
Просмотреть файл

@ -32,7 +32,7 @@ func chtimes(path string, atime, mtime time.Time) error {
utimes[1] = unix.NsecToTimespec(mtime.UnixNano()) utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil { if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil {
return errors.Wrap(err, "failed call to UtimesNanoAt") return errors.Wrapf(err, "failed call to UtimesNanoAt for %s", path)
} }
return nil return nil

20
vendor/github.com/containerd/containerd/cio/io_unix.go сгенерированный поставляемый
Просмотреть файл

@ -72,17 +72,19 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) {
} }
var wg = &sync.WaitGroup{} var wg = &sync.WaitGroup{}
wg.Add(1) if fifos.Stdout != "" {
go func() { wg.Add(1)
p := bufPool.Get().(*[]byte) go func() {
defer bufPool.Put(p) p := bufPool.Get().(*[]byte)
defer bufPool.Put(p)
io.CopyBuffer(ioset.Stdout, pipes.Stdout, *p) io.CopyBuffer(ioset.Stdout, pipes.Stdout, *p)
pipes.Stdout.Close() pipes.Stdout.Close()
wg.Done() wg.Done()
}() }()
}
if !fifos.Terminal { if !fifos.Terminal && fifos.Stderr != "" {
wg.Add(1) wg.Add(1)
go func() { go func() {
p := bufPool.Get().(*[]byte) p := bufPool.Get().(*[]byte)

84
vendor/github.com/containerd/containerd/client.go сгенерированный поставляемый
Просмотреть файл

@ -57,6 +57,7 @@ import (
"github.com/containerd/containerd/snapshots" "github.com/containerd/containerd/snapshots"
snproxy "github.com/containerd/containerd/snapshots/proxy" snproxy "github.com/containerd/containerd/snapshots/proxy"
"github.com/containerd/typeurl" "github.com/containerd/typeurl"
"github.com/gogo/protobuf/types"
ptypes "github.com/gogo/protobuf/types" ptypes "github.com/gogo/protobuf/types"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
specs "github.com/opencontainers/runtime-spec/specs-go" specs "github.com/opencontainers/runtime-spec/specs-go"
@ -88,12 +89,20 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
copts.timeout = 10 * time.Second copts.timeout = 10 * time.Second
} }
c := &Client{} c := &Client{
defaultns: copts.defaultns,
}
if copts.defaultRuntime != "" { if copts.defaultRuntime != "" {
c.runtime = copts.defaultRuntime c.runtime = copts.defaultRuntime
} else { } else {
c.runtime = fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS) c.runtime = defaults.DefaultRuntime
}
if copts.defaultPlatform != nil {
c.platform = copts.defaultPlatform
} else {
c.platform = platforms.Default()
} }
if copts.services != nil { if copts.services != nil {
@ -137,13 +146,12 @@ func New(address string, opts ...ClientOpt) (*Client, error) {
c.conn, c.connector = conn, connector c.conn, c.connector = conn, connector
} }
if copts.services == nil && c.conn == nil { if copts.services == nil && c.conn == nil {
return nil, errors.New("no grpc connection or services is available") return nil, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection or services is available")
} }
// check namespace labels for default runtime // check namespace labels for default runtime
if copts.defaultRuntime == "" && copts.defaultns != "" { if copts.defaultRuntime == "" && c.defaultns != "" {
ctx := namespaces.WithNamespace(context.Background(), copts.defaultns) if label, err := c.GetLabel(context.Background(), defaults.DefaultRuntimeNSLabel); err != nil {
if label, err := c.GetLabel(ctx, defaults.DefaultRuntimeNSLabel); err != nil {
return nil, err return nil, err
} else if label != "" { } else if label != "" {
c.runtime = label c.runtime = label
@ -163,14 +171,14 @@ func NewWithConn(conn *grpc.ClientConn, opts ...ClientOpt) (*Client, error) {
} }
} }
c := &Client{ c := &Client{
conn: conn, defaultns: copts.defaultns,
runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS), conn: conn,
runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS),
} }
// check namespace labels for default runtime // check namespace labels for default runtime
if copts.defaultRuntime == "" && copts.defaultns != "" { if copts.defaultRuntime == "" && c.defaultns != "" {
ctx := namespaces.WithNamespace(context.Background(), copts.defaultns) if label, err := c.GetLabel(context.Background(), defaults.DefaultRuntimeNSLabel); err != nil {
if label, err := c.GetLabel(ctx, defaults.DefaultRuntimeNSLabel); err != nil {
return nil, err return nil, err
} else if label != "" { } else if label != "" {
c.runtime = label c.runtime = label
@ -190,13 +198,15 @@ type Client struct {
connMu sync.Mutex connMu sync.Mutex
conn *grpc.ClientConn conn *grpc.ClientConn
runtime string runtime string
defaultns string
platform platforms.MatchComparer
connector func() (*grpc.ClientConn, error) connector func() (*grpc.ClientConn, error)
} }
// Reconnect re-establishes the GRPC connection to the containerd daemon // Reconnect re-establishes the GRPC connection to the containerd daemon
func (c *Client) Reconnect() error { func (c *Client) Reconnect() error {
if c.connector == nil { if c.connector == nil {
return errors.New("unable to reconnect to containerd, no connector available") return errors.Wrap(errdefs.ErrUnavailable, "unable to reconnect to containerd, no connector available")
} }
c.connMu.Lock() c.connMu.Lock()
defer c.connMu.Unlock() defer c.connMu.Unlock()
@ -219,7 +229,7 @@ func (c *Client) IsServing(ctx context.Context) (bool, error) {
c.connMu.Lock() c.connMu.Lock()
if c.conn == nil { if c.conn == nil {
c.connMu.Unlock() c.connMu.Unlock()
return false, errors.New("no grpc connection available") return false, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
} }
c.connMu.Unlock() c.connMu.Unlock()
r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.WaitForReady(true)) r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.WaitForReady(true))
@ -291,10 +301,14 @@ type RemoteContext struct {
PlatformMatcher platforms.MatchComparer PlatformMatcher platforms.MatchComparer
// Unpack is done after an image is pulled to extract into a snapshotter. // Unpack is done after an image is pulled to extract into a snapshotter.
// It is done simultaneously for schema 2 images when they are pulled.
// If an image is not unpacked on pull, it can be unpacked any time // If an image is not unpacked on pull, it can be unpacked any time
// afterwards. Unpacking is required to run an image. // afterwards. Unpacking is required to run an image.
Unpack bool Unpack bool
// UnpackOpts handles options to the unpack call.
UnpackOpts []UnpackOpt
// Snapshotter used for unpacking // Snapshotter used for unpacking
Snapshotter string Snapshotter string
@ -326,9 +340,8 @@ type RemoteContext struct {
// MaxConcurrentDownloads is the max concurrent content downloads for each pull. // MaxConcurrentDownloads is the max concurrent content downloads for each pull.
MaxConcurrentDownloads int MaxConcurrentDownloads int
// AppendDistributionSourceLabel allows fetcher to add distribute source // AllMetadata downloads all manifests and known-configuration files
// label for each blob content, which doesn't work for legacy schema1. AllMetadata bool
AppendDistributionSourceLabel bool
} }
func defaultRemoteContext() *RemoteContext { func defaultRemoteContext() *RemoteContext {
@ -350,7 +363,7 @@ func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (imag
} }
if fetchCtx.Unpack { if fetchCtx.Unpack {
return images.Image{}, errors.New("unpack on fetch not supported, try pull") return images.Image{}, errors.Wrap(errdefs.ErrNotImplemented, "unpack on fetch not supported, try pull")
} }
if fetchCtx.PlatformMatcher == nil { if fetchCtx.PlatformMatcher == nil {
@ -403,6 +416,11 @@ func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor,
} }
} }
// Annotate ref with digest to push only push tag for single digest
if !strings.Contains(ref, "@") {
ref = ref + "@" + desc.Digest.String()
}
pusher, err := pushCtx.Resolver.Pusher(ctx, ref) pusher, err := pushCtx.Resolver.Pusher(ctx, ref)
if err != nil { if err != nil {
return err return err
@ -491,7 +509,10 @@ func writeIndex(ctx context.Context, index *ocispec.Index, client *Client, ref s
func (c *Client) GetLabel(ctx context.Context, label string) (string, error) { func (c *Client) GetLabel(ctx context.Context, label string) (string, error) {
ns, err := namespaces.NamespaceRequired(ctx) ns, err := namespaces.NamespaceRequired(ctx)
if err != nil { if err != nil {
return "", err if c.defaultns == "" {
return "", err
}
ns = c.defaultns
} }
srv := c.NamespaceService() srv := c.NamespaceService()
@ -557,6 +578,10 @@ func (c *Client) ContentStore() content.Store {
// SnapshotService returns the underlying snapshotter for the provided snapshotter name // SnapshotService returns the underlying snapshotter for the provided snapshotter name
func (c *Client) SnapshotService(snapshotterName string) snapshots.Snapshotter { func (c *Client) SnapshotService(snapshotterName string) snapshots.Snapshotter {
snapshotterName, err := c.resolveSnapshotterName(context.Background(), snapshotterName)
if err != nil {
snapshotterName = DefaultSnapshotter
}
if c.snapshotters != nil { if c.snapshotters != nil {
return c.snapshotters[snapshotterName] return c.snapshotters[snapshotterName]
} }
@ -656,7 +681,7 @@ func (c *Client) Version(ctx context.Context) (Version, error) {
c.connMu.Lock() c.connMu.Lock()
if c.conn == nil { if c.conn == nil {
c.connMu.Unlock() c.connMu.Unlock()
return Version{}, errors.New("no grpc connection available") return Version{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
} }
c.connMu.Unlock() c.connMu.Unlock()
response, err := c.VersionService().Version(ctx, &ptypes.Empty{}) response, err := c.VersionService().Version(ctx, &ptypes.Empty{})
@ -669,6 +694,27 @@ func (c *Client) Version(ctx context.Context) (Version, error) {
}, nil }, nil
} }
type ServerInfo struct {
UUID string
}
func (c *Client) Server(ctx context.Context) (ServerInfo, error) {
c.connMu.Lock()
if c.conn == nil {
c.connMu.Unlock()
return ServerInfo{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available")
}
c.connMu.Unlock()
response, err := c.IntrospectionService().Server(ctx, &types.Empty{})
if err != nil {
return ServerInfo{}, err
}
return ServerInfo{
UUID: response.UUID,
}, nil
}
func (c *Client) resolveSnapshotterName(ctx context.Context, name string) (string, error) { func (c *Client) resolveSnapshotterName(ctx context.Context, name string) (string, error) {
if name == "" { if name == "" {
label, err := c.GetLabel(ctx, defaults.DefaultSnapshotterNSLabel) label, err := c.GetLabel(ctx, defaults.DefaultSnapshotterNSLabel)

26
vendor/github.com/containerd/containerd/client_opts.go сгенерированный поставляемый
Просмотреть файл

@ -26,11 +26,12 @@ import (
) )
type clientOpts struct { type clientOpts struct {
defaultns string defaultns string
defaultRuntime string defaultRuntime string
services *services defaultPlatform platforms.MatchComparer
dialOptions []grpc.DialOption services *services
timeout time.Duration dialOptions []grpc.DialOption
timeout time.Duration
} }
// ClientOpt allows callers to set options on the containerd client // ClientOpt allows callers to set options on the containerd client
@ -55,6 +56,14 @@ func WithDefaultRuntime(rt string) ClientOpt {
} }
} }
// WithDefaultPlatform sets the default platform matcher on the client
func WithDefaultPlatform(platform platforms.MatchComparer) ClientOpt {
return func(c *clientOpts) error {
c.defaultPlatform = platform
return nil
}
}
// WithDialOpts allows grpc.DialOptions to be set on the connection // WithDialOpts allows grpc.DialOptions to be set on the connection
func WithDialOpts(opts []grpc.DialOption) ClientOpt { func WithDialOpts(opts []grpc.DialOption) ClientOpt {
return func(c *clientOpts) error { return func(c *clientOpts) error {
@ -195,11 +204,10 @@ func WithMaxConcurrentDownloads(max int) RemoteOpt {
} }
} }
// WithAppendDistributionSourceLabel allows fetcher to add distribute source // WithAllMetadata downloads all manifests and known-configuration files
// label for each blob content, which doesn't work for legacy schema1. func WithAllMetadata() RemoteOpt {
func WithAppendDistributionSourceLabel() RemoteOpt {
return func(_ *Client, c *RemoteContext) error { return func(_ *Client, c *RemoteContext) error {
c.AppendDistributionSourceLabel = true c.AllMetadata = true
return nil return nil
} }
} }

5
vendor/github.com/containerd/containerd/container.go сгенерированный поставляемый
Просмотреть файл

@ -25,6 +25,7 @@ import (
"github.com/containerd/containerd/api/services/tasks/v1" "github.com/containerd/containerd/api/services/tasks/v1"
"github.com/containerd/containerd/api/types" "github.com/containerd/containerd/api/types"
tasktypes "github.com/containerd/containerd/api/types/task"
"github.com/containerd/containerd/cio" "github.com/containerd/containerd/cio"
"github.com/containerd/containerd/containers" "github.com/containerd/containerd/containers"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
@ -382,7 +383,9 @@ func (c *container) loadTask(ctx context.Context, ioAttach cio.Attach) (Task, er
return nil, err return nil, err
} }
var i cio.IO var i cio.IO
if ioAttach != nil { if ioAttach != nil && response.Process.Status != tasktypes.StatusUnknown {
// Do not attach IO for task in unknown state, because there
// are no fifo paths anyway.
if i, err = attachExistingIO(response, ioAttach); err != nil { if i, err = attachExistingIO(response, ioAttach); err != nil {
return nil, err return nil, err
} }

15
vendor/github.com/containerd/containerd/container_opts.go сгенерированный поставляемый
Просмотреть файл

@ -22,7 +22,6 @@ import (
"github.com/containerd/containerd/containers" "github.com/containerd/containerd/containers"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/oci" "github.com/containerd/containerd/oci"
"github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/snapshots" "github.com/containerd/containerd/snapshots"
"github.com/containerd/typeurl" "github.com/containerd/typeurl"
"github.com/gogo/protobuf/types" "github.com/gogo/protobuf/types"
@ -78,6 +77,14 @@ func WithImage(i Image) NewContainerOpts {
} }
} }
// WithImageName allows setting the image name as the base for the container
func WithImageName(n string) NewContainerOpts {
return func(ctx context.Context, _ *Client, c *containers.Container) error {
c.Image = n
return nil
}
}
// WithContainerLabels adds the provided labels to the container // WithContainerLabels adds the provided labels to the container
func WithContainerLabels(labels map[string]string) NewContainerOpts { func WithContainerLabels(labels map[string]string) NewContainerOpts {
return func(_ context.Context, _ *Client, c *containers.Container) error { return func(_ context.Context, _ *Client, c *containers.Container) error {
@ -171,7 +178,9 @@ func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Conta
if err != nil { if err != nil {
return err return err
} }
return s.Remove(ctx, c.SnapshotKey) if err := s.Remove(ctx, c.SnapshotKey); err != nil && !errdefs.IsNotFound(err) {
return err
}
} }
return nil return nil
} }
@ -180,7 +189,7 @@ func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Conta
// root filesystem in read-only mode // root filesystem in read-only mode
func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainerOpts { func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error { return func(ctx context.Context, client *Client, c *containers.Container) error {
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default()) diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), client.platform)
if err != nil { if err != nil {
return err return err
} }

3
vendor/github.com/containerd/containerd/container_opts_unix.go сгенерированный поставляемый
Просмотреть файл

@ -28,7 +28,6 @@ import (
"github.com/containerd/containerd/containers" "github.com/containerd/containerd/containers"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/mount" "github.com/containerd/containerd/mount"
"github.com/containerd/containerd/platforms"
"github.com/opencontainers/image-spec/identity" "github.com/opencontainers/image-spec/identity"
) )
@ -45,7 +44,7 @@ func WithRemappedSnapshotView(id string, i Image, uid, gid uint32) NewContainerO
func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool) NewContainerOpts { func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool) NewContainerOpts {
return func(ctx context.Context, client *Client, c *containers.Container) error { return func(ctx context.Context, client *Client, c *containers.Container) error {
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default()) diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), client.platform)
if err != nil { if err != nil {
return err return err
} }

3
vendor/github.com/containerd/containerd/container_restore_opts.go сгенерированный поставляемый
Просмотреть файл

@ -22,7 +22,6 @@ import (
"github.com/containerd/containerd/containers" "github.com/containerd/containerd/containers"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
"github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/proto"
ptypes "github.com/gogo/protobuf/types" ptypes "github.com/gogo/protobuf/types"
"github.com/opencontainers/image-spec/identity" "github.com/opencontainers/image-spec/identity"
@ -58,7 +57,7 @@ func WithRestoreImage(ctx context.Context, id string, client *Client, checkpoint
return err return err
} }
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default()) diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), client.platform)
if err != nil { if err != nil {
return err return err
} }

2
vendor/github.com/containerd/containerd/containers/containers.go сгенерированный поставляемый
Просмотреть файл

@ -49,7 +49,7 @@ type Container struct {
// This property is required and immutable. // This property is required and immutable.
Runtime RuntimeInfo Runtime RuntimeInfo
// Spec should carry the the runtime specification used to implement the // Spec should carry the runtime specification used to implement the
// container. // container.
// //
// This field is required but mutable. // This field is required but mutable.

22
vendor/github.com/containerd/containerd/content/helpers.go сгенерированный поставляемый
Просмотреть файл

@ -169,6 +169,28 @@ func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error {
return err return err
} }
// CopyReader copies to a writer from a given reader, returning
// the number of bytes copied.
// Note: if the writer has a non-zero offset, the total number
// of bytes read may be greater than those copied if the reader
// is not an io.Seeker.
// This copy does not commit the writer.
func CopyReader(cw Writer, r io.Reader) (int64, error) {
ws, err := cw.Status()
if err != nil {
return 0, errors.Wrap(err, "failed to get status")
}
if ws.Offset > 0 {
r, err = seekReader(r, ws.Offset, 0)
if err != nil {
return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
}
}
return copyWithBuffer(cw, r)
}
// seekReader attempts to seek the reader to the given offset, either by // seekReader attempts to seek the reader to the given offset, either by
// resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding // resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding
// up to the given offset. // up to the given offset.

2
vendor/github.com/containerd/containerd/defaults/defaults_unix.go сгенерированный поставляемый
Просмотреть файл

@ -32,4 +32,6 @@ const (
// DefaultFIFODir is the default location used by client-side cio library // DefaultFIFODir is the default location used by client-side cio library
// to store FIFOs. // to store FIFOs.
DefaultFIFODir = "/run/containerd/fifo" DefaultFIFODir = "/run/containerd/fifo"
// DefaultRuntime is the default linux runtime
DefaultRuntime = "io.containerd.runc.v2"
) )

2
vendor/github.com/containerd/containerd/defaults/defaults_windows.go сгенерированный поставляемый
Просмотреть файл

@ -40,4 +40,6 @@ const (
// DefaultFIFODir is the default location used by client-side cio library // DefaultFIFODir is the default location used by client-side cio library
// to store FIFOs. Unused on Windows. // to store FIFOs. Unused on Windows.
DefaultFIFODir = "" DefaultFIFODir = ""
// DefaultRuntime is the default windows runtime
DefaultRuntime = "io.containerd.runhcs.v1"
) )

7
vendor/github.com/containerd/containerd/diff.go сгенерированный поставляемый
Просмотреть файл

@ -48,13 +48,14 @@ type diffRemote struct {
func (r *diffRemote) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (ocispec.Descriptor, error) { func (r *diffRemote) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (ocispec.Descriptor, error) {
var config diff.ApplyConfig var config diff.ApplyConfig
for _, opt := range opts { for _, opt := range opts {
if err := opt(&config); err != nil { if err := opt(ctx, desc, &config); err != nil {
return ocispec.Descriptor{}, err return ocispec.Descriptor{}, err
} }
} }
req := &diffapi.ApplyRequest{ req := &diffapi.ApplyRequest{
Diff: fromDescriptor(desc), Diff: fromDescriptor(desc),
Mounts: fromMounts(mounts), Mounts: fromMounts(mounts),
Payloads: config.ProcessorPayloads,
} }
resp, err := r.client.Apply(ctx, req) resp, err := r.client.Apply(ctx, req)
if err != nil { if err != nil {

13
vendor/github.com/containerd/containerd/diff/diff.go сгенерированный поставляемый
Просмотреть файл

@ -20,6 +20,7 @@ import (
"context" "context"
"github.com/containerd/containerd/mount" "github.com/containerd/containerd/mount"
"github.com/gogo/protobuf/types"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
) )
@ -53,10 +54,12 @@ type Comparer interface {
// ApplyConfig is used to hold parameters needed for a apply operation // ApplyConfig is used to hold parameters needed for a apply operation
type ApplyConfig struct { type ApplyConfig struct {
// ProcessorPayloads specifies the payload sent to various processors
ProcessorPayloads map[string]*types.Any
} }
// ApplyOpt is used to configure an Apply operation // ApplyOpt is used to configure an Apply operation
type ApplyOpt func(*ApplyConfig) error type ApplyOpt func(context.Context, ocispec.Descriptor, *ApplyConfig) error
// Applier allows applying diffs between mounts // Applier allows applying diffs between mounts
type Applier interface { type Applier interface {
@ -94,3 +97,11 @@ func WithLabels(labels map[string]string) Opt {
return nil return nil
} }
} }
// WithPayloads sets the apply processor payloads to the config
func WithPayloads(payloads map[string]*types.Any) ApplyOpt {
return func(_ context.Context, _ ocispec.Descriptor, c *ApplyConfig) error {
c.ProcessorPayloads = payloads
return nil
}
}

187
vendor/github.com/containerd/containerd/diff/stream.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,187 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diff
import (
"context"
"io"
"os"
"github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/images"
"github.com/gogo/protobuf/types"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
var (
handlers []Handler
// ErrNoProcessor is returned when no stream processor is available for a media-type
ErrNoProcessor = errors.New("no processor for media-type")
)
func init() {
// register the default compression handler
RegisterProcessor(compressedHandler)
}
// RegisterProcessor registers a stream processor for media-types
func RegisterProcessor(handler Handler) {
handlers = append(handlers, handler)
}
// GetProcessor returns the processor for a media-type
func GetProcessor(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
// reverse this list so that user configured handlers come up first
for i := len(handlers) - 1; i >= 0; i-- {
processor, ok := handlers[i](ctx, stream.MediaType())
if ok {
return processor(ctx, stream, payloads)
}
}
return nil, ErrNoProcessor
}
// Handler checks a media-type and initializes the processor
type Handler func(ctx context.Context, mediaType string) (StreamProcessorInit, bool)
// StaticHandler returns the processor init func for a static media-type
func StaticHandler(expectedMediaType string, fn StreamProcessorInit) Handler {
return func(ctx context.Context, mediaType string) (StreamProcessorInit, bool) {
if mediaType == expectedMediaType {
return fn, true
}
return nil, false
}
}
// StreamProcessorInit returns the initialized stream processor
type StreamProcessorInit func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error)
// RawProcessor provides access to direct fd for processing
type RawProcessor interface {
// File returns the fd for the read stream of the underlying processor
File() *os.File
}
// StreamProcessor handles processing a content stream and transforming it into a different media-type
type StreamProcessor interface {
io.ReadCloser
// MediaType is the resulting media-type that the processor processes the stream into
MediaType() string
}
func compressedHandler(ctx context.Context, mediaType string) (StreamProcessorInit, bool) {
compressed, err := images.DiffCompression(ctx, mediaType)
if err != nil {
return nil, false
}
if compressed != "" {
return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
ds, err := compression.DecompressStream(stream)
if err != nil {
return nil, err
}
return &compressedProcessor{
rc: ds,
}, nil
}, true
}
return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
return &stdProcessor{
rc: stream,
}, nil
}, true
}
// NewProcessorChain initialized the root StreamProcessor
func NewProcessorChain(mt string, r io.Reader) StreamProcessor {
return &processorChain{
mt: mt,
rc: r,
}
}
type processorChain struct {
mt string
rc io.Reader
}
func (c *processorChain) MediaType() string {
return c.mt
}
func (c *processorChain) Read(p []byte) (int, error) {
return c.rc.Read(p)
}
func (c *processorChain) Close() error {
return nil
}
type stdProcessor struct {
rc StreamProcessor
}
func (c *stdProcessor) MediaType() string {
return ocispec.MediaTypeImageLayer
}
func (c *stdProcessor) Read(p []byte) (int, error) {
return c.rc.Read(p)
}
func (c *stdProcessor) Close() error {
return nil
}
type compressedProcessor struct {
rc io.ReadCloser
}
func (c *compressedProcessor) MediaType() string {
return ocispec.MediaTypeImageLayer
}
func (c *compressedProcessor) Read(p []byte) (int, error) {
return c.rc.Read(p)
}
func (c *compressedProcessor) Close() error {
return c.rc.Close()
}
func BinaryHandler(id, returnsMediaType string, mediaTypes []string, path string, args []string) Handler {
set := make(map[string]struct{}, len(mediaTypes))
for _, m := range mediaTypes {
set[m] = struct{}{}
}
return func(_ context.Context, mediaType string) (StreamProcessorInit, bool) {
if _, ok := set[mediaType]; ok {
return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) {
payload := payloads[id]
return NewBinaryProcessor(ctx, mediaType, returnsMediaType, stream, path, args, payload)
}, true
}
return nil, false
}
}
const mediaTypeEnvVar = "STREAM_PROCESSOR_MEDIATYPE"

146
vendor/github.com/containerd/containerd/diff/stream_unix.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,146 @@
// +build !windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diff
import (
"bytes"
"context"
"fmt"
"io"
"os"
"os/exec"
"sync"
"github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/types"
"github.com/pkg/errors"
)
// NewBinaryProcessor returns a binary processor for use with processing content streams
func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args []string, payload *types.Any) (StreamProcessor, error) {
cmd := exec.CommandContext(ctx, name, args...)
cmd.Env = os.Environ()
var payloadC io.Closer
if payload != nil {
data, err := proto.Marshal(payload)
if err != nil {
return nil, err
}
r, w, err := os.Pipe()
if err != nil {
return nil, err
}
go func() {
io.Copy(w, bytes.NewReader(data))
w.Close()
}()
cmd.ExtraFiles = append(cmd.ExtraFiles, r)
payloadC = r
}
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt))
var (
stdin io.Reader
closer func() error
err error
)
if f, ok := stream.(RawProcessor); ok {
stdin = f.File()
closer = f.File().Close
} else {
stdin = stream
}
cmd.Stdin = stdin
r, w, err := os.Pipe()
if err != nil {
return nil, err
}
cmd.Stdout = w
stderr := bytes.NewBuffer(nil)
cmd.Stderr = stderr
if err := cmd.Start(); err != nil {
return nil, err
}
p := &binaryProcessor{
cmd: cmd,
r: r,
mt: rmt,
stderr: stderr,
}
go p.wait()
// close after start and dup
w.Close()
if closer != nil {
closer()
}
if payloadC != nil {
payloadC.Close()
}
return p, nil
}
type binaryProcessor struct {
cmd *exec.Cmd
r *os.File
mt string
stderr *bytes.Buffer
mu sync.Mutex
err error
}
func (c *binaryProcessor) Err() error {
c.mu.Lock()
defer c.mu.Unlock()
return c.err
}
func (c *binaryProcessor) wait() {
if err := c.cmd.Wait(); err != nil {
if _, ok := err.(*exec.ExitError); ok {
c.mu.Lock()
c.err = errors.New(c.stderr.String())
c.mu.Unlock()
}
}
}
func (c *binaryProcessor) File() *os.File {
return c.r
}
func (c *binaryProcessor) MediaType() string {
return c.mt
}
func (c *binaryProcessor) Read(p []byte) (int, error) {
return c.r.Read(p)
}
func (c *binaryProcessor) Close() error {
err := c.r.Close()
if kerr := c.cmd.Process.Kill(); err == nil {
err = kerr
}
return err
}

165
vendor/github.com/containerd/containerd/diff/stream_windows.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,165 @@
// +build windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package diff
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"sync"
winio "github.com/Microsoft/go-winio"
"github.com/gogo/protobuf/proto"
"github.com/gogo/protobuf/types"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
const processorPipe = "STREAM_PROCESSOR_PIPE"
// NewBinaryProcessor returns a binary processor for use with processing content streams
func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args []string, payload *types.Any) (StreamProcessor, error) {
cmd := exec.CommandContext(ctx, name, args...)
cmd.Env = os.Environ()
if payload != nil {
data, err := proto.Marshal(payload)
if err != nil {
return nil, err
}
up, err := getUiqPath()
if err != nil {
return nil, err
}
path := fmt.Sprintf("\\\\.\\pipe\\containerd-processor-%s-pipe", up)
l, err := winio.ListenPipe(path, nil)
if err != nil {
return nil, err
}
go func() {
defer l.Close()
conn, err := l.Accept()
if err != nil {
logrus.WithError(err).Error("accept npipe connection")
return
}
io.Copy(conn, bytes.NewReader(data))
conn.Close()
}()
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", processorPipe, path))
}
cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt))
var (
stdin io.Reader
closer func() error
err error
)
if f, ok := stream.(RawProcessor); ok {
stdin = f.File()
closer = f.File().Close
} else {
stdin = stream
}
cmd.Stdin = stdin
r, w, err := os.Pipe()
if err != nil {
return nil, err
}
cmd.Stdout = w
stderr := bytes.NewBuffer(nil)
cmd.Stderr = stderr
if err := cmd.Start(); err != nil {
return nil, err
}
p := &binaryProcessor{
cmd: cmd,
r: r,
mt: rmt,
stderr: stderr,
}
go p.wait()
// close after start and dup
w.Close()
if closer != nil {
closer()
}
return p, nil
}
type binaryProcessor struct {
cmd *exec.Cmd
r *os.File
mt string
stderr *bytes.Buffer
mu sync.Mutex
err error
}
func (c *binaryProcessor) Err() error {
c.mu.Lock()
defer c.mu.Unlock()
return c.err
}
func (c *binaryProcessor) wait() {
if err := c.cmd.Wait(); err != nil {
if _, ok := err.(*exec.ExitError); ok {
c.mu.Lock()
c.err = errors.New(c.stderr.String())
c.mu.Unlock()
}
}
}
func (c *binaryProcessor) File() *os.File {
return c.r
}
func (c *binaryProcessor) MediaType() string {
return c.mt
}
func (c *binaryProcessor) Read(p []byte) (int, error) {
return c.r.Read(p)
}
func (c *binaryProcessor) Close() error {
err := c.r.Close()
if kerr := c.cmd.Process.Kill(); err == nil {
err = kerr
}
return err
}
func getUiqPath() (string, error) {
dir, err := ioutil.TempDir("", "")
if err != nil {
return "", err
}
os.Remove(dir)
return filepath.Base(dir), nil
}

2
vendor/github.com/containerd/containerd/events/exchange/exchange.go сгенерированный поставляемый
Просмотреть файл

@ -50,7 +50,7 @@ var _ events.Publisher = &Exchange{}
var _ events.Forwarder = &Exchange{} var _ events.Forwarder = &Exchange{}
var _ events.Subscriber = &Exchange{} var _ events.Subscriber = &Exchange{}
// Forward accepts an envelope to be direcly distributed on the exchange. // Forward accepts an envelope to be directly distributed on the exchange.
// //
// This is useful when an event is forwarded on behalf of another namespace or // This is useful when an event is forwarded on behalf of another namespace or
// when the event is propagated on behalf of another publisher. // when the event is propagated on behalf of another publisher.

167
vendor/github.com/containerd/containerd/image.go сгенерированный поставляемый
Просмотреть файл

@ -19,16 +19,21 @@ package containerd
import ( import (
"context" "context"
"fmt" "fmt"
"strings"
"sync/atomic"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/diff"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms" "github.com/containerd/containerd/platforms"
"github.com/containerd/containerd/rootfs" "github.com/containerd/containerd/rootfs"
"github.com/containerd/containerd/snapshots"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/identity" "github.com/opencontainers/image-spec/identity"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/sync/semaphore"
) )
// Image describes an image used by containers // Image describes an image used by containers
@ -40,11 +45,13 @@ type Image interface {
// Labels of the image // Labels of the image
Labels() map[string]string Labels() map[string]string
// Unpack unpacks the image's content into a snapshot // Unpack unpacks the image's content into a snapshot
Unpack(context.Context, string) error Unpack(context.Context, string, ...UnpackOpt) error
// RootFS returns the unpacked diffids that make up images rootfs. // RootFS returns the unpacked diffids that make up images rootfs.
RootFS(ctx context.Context) ([]digest.Digest, error) RootFS(ctx context.Context) ([]digest.Digest, error)
// Size returns the total size of the image's packed resources. // Size returns the total size of the image's packed resources.
Size(ctx context.Context) (int64, error) Size(ctx context.Context) (int64, error)
// Usage returns a usage calculation for the image.
Usage(context.Context, ...UsageOpt) (int64, error)
// Config descriptor for the image. // Config descriptor for the image.
Config(ctx context.Context) (ocispec.Descriptor, error) Config(ctx context.Context) (ocispec.Descriptor, error)
// IsUnpacked returns whether or not an image is unpacked. // IsUnpacked returns whether or not an image is unpacked.
@ -53,6 +60,49 @@ type Image interface {
ContentStore() content.Store ContentStore() content.Store
} }
type usageOptions struct {
manifestLimit *int
manifestOnly bool
snapshots bool
}
// UsageOpt is used to configure the usage calculation
type UsageOpt func(*usageOptions) error
// WithUsageManifestLimit sets the limit to the number of manifests which will
// be walked for usage. Setting this value to 0 will require all manifests to
// be walked, returning ErrNotFound if manifests are missing.
// NOTE: By default all manifests which exist will be walked
// and any non-existent manifests and their subobjects will be ignored.
func WithUsageManifestLimit(i int) UsageOpt {
// If 0 then don't filter any manifests
// By default limits to current platform
return func(o *usageOptions) error {
o.manifestLimit = &i
return nil
}
}
// WithSnapshotUsage will check for referenced snapshots from the image objects
// and include the snapshot size in the total usage.
func WithSnapshotUsage() UsageOpt {
return func(o *usageOptions) error {
o.snapshots = true
return nil
}
}
// WithManifestUsage is used to get the usage for an image based on what is
// reported by the manifests rather than what exists in the content store.
// NOTE: This function is best used with the manifest limit set to get a
// consistent value, otherwise non-existent manifests will be excluded.
func WithManifestUsage() UsageOpt {
return func(o *usageOptions) error {
o.manifestOnly = true
return nil
}
}
var _ = (Image)(&image{}) var _ = (Image)(&image{})
// NewImage returns a client image object from the metadata image // NewImage returns a client image object from the metadata image
@ -60,7 +110,7 @@ func NewImage(client *Client, i images.Image) Image {
return &image{ return &image{
client: client, client: client,
i: i, i: i,
platform: platforms.Default(), platform: client.platform,
} }
} }
@ -98,8 +148,95 @@ func (i *image) RootFS(ctx context.Context) ([]digest.Digest, error) {
} }
func (i *image) Size(ctx context.Context) (int64, error) { func (i *image) Size(ctx context.Context) (int64, error) {
provider := i.client.ContentStore() return i.Usage(ctx, WithUsageManifestLimit(1), WithManifestUsage())
return i.i.Size(ctx, provider, i.platform) }
func (i *image) Usage(ctx context.Context, opts ...UsageOpt) (int64, error) {
var config usageOptions
for _, opt := range opts {
if err := opt(&config); err != nil {
return 0, err
}
}
var (
provider = i.client.ContentStore()
handler = images.ChildrenHandler(provider)
size int64
mustExist bool
)
if config.manifestLimit != nil {
handler = images.LimitManifests(handler, i.platform, *config.manifestLimit)
mustExist = true
}
var wh images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
var usage int64
children, err := handler(ctx, desc)
if err != nil {
if !errdefs.IsNotFound(err) || mustExist {
return nil, err
}
if !config.manifestOnly {
// Do not count size of non-existent objects
desc.Size = 0
}
} else if config.snapshots || !config.manifestOnly {
info, err := provider.Info(ctx, desc.Digest)
if err != nil {
if !errdefs.IsNotFound(err) {
return nil, err
}
if !config.manifestOnly {
// Do not count size of non-existent objects
desc.Size = 0
}
} else if info.Size > desc.Size {
// Count actual usage, Size may be unset or -1
desc.Size = info.Size
}
for k, v := range info.Labels {
const prefix = "containerd.io/gc.ref.snapshot."
if !strings.HasPrefix(k, prefix) {
continue
}
sn := i.client.SnapshotService(k[len(prefix):])
if sn == nil {
continue
}
u, err := sn.Usage(ctx, v)
if err != nil {
if !errdefs.IsNotFound(err) && !errdefs.IsInvalidArgument(err) {
return nil, err
}
} else {
usage += u.Size
}
}
}
// Ignore unknown sizes. Generally unknown sizes should
// never be set in manifests, however, the usage
// calculation does not need to enforce this.
if desc.Size >= 0 {
usage += desc.Size
}
atomic.AddInt64(&size, usage)
return children, nil
}
l := semaphore.NewWeighted(3)
if err := images.Dispatch(ctx, wh, l, i.i.Target); err != nil {
return 0, err
}
return size, nil
} }
func (i *image) Config(ctx context.Context) (ocispec.Descriptor, error) { func (i *image) Config(ctx context.Context) (ocispec.Descriptor, error) {
@ -130,13 +267,31 @@ func (i *image) IsUnpacked(ctx context.Context, snapshotterName string) (bool, e
return false, nil return false, nil
} }
func (i *image) Unpack(ctx context.Context, snapshotterName string) error { // UnpackConfig provides configuration for the unpack of an image
type UnpackConfig struct {
// ApplyOpts for applying a diff to a snapshotter
ApplyOpts []diff.ApplyOpt
// SnapshotOpts for configuring a snapshotter
SnapshotOpts []snapshots.Opt
}
// UnpackOpt provides configuration for unpack
type UnpackOpt func(context.Context, *UnpackConfig) error
func (i *image) Unpack(ctx context.Context, snapshotterName string, opts ...UnpackOpt) error {
ctx, done, err := i.client.WithLease(ctx) ctx, done, err := i.client.WithLease(ctx)
if err != nil { if err != nil {
return err return err
} }
defer done(ctx) defer done(ctx)
var config UnpackConfig
for _, o := range opts {
if err := o(ctx, &config); err != nil {
return err
}
}
layers, err := i.getLayers(ctx, i.platform) layers, err := i.getLayers(ctx, i.platform)
if err != nil { if err != nil {
return err return err
@ -158,7 +313,7 @@ func (i *image) Unpack(ctx context.Context, snapshotterName string) error {
return err return err
} }
for _, layer := range layers { for _, layer := range layers {
unpacked, err = rootfs.ApplyLayer(ctx, layer, chain, sn, a) unpacked, err = rootfs.ApplyLayerWithOpts(ctx, layer, chain, sn, a, config.SnapshotOpts, config.ApplyOpts)
if err != nil { if err != nil {
return err return err
} }

32
vendor/github.com/containerd/containerd/images/archive/exporter.go сгенерированный поставляемый
Просмотреть файл

@ -89,31 +89,29 @@ func WithImage(is images.Store, name string) ExportOpt {
} }
// WithManifest adds a manifest to the exported archive. // WithManifest adds a manifest to the exported archive.
// It is up to caller to put name annotation to on the manifest // When names are given they will be set on the manifest in the
// descriptor if needed. // exported archive, creating an index record for each name.
func WithManifest(manifest ocispec.Descriptor) ExportOpt { // When no names are provided, it is up to caller to put name annotation to
// on the manifest descriptor if needed.
func WithManifest(manifest ocispec.Descriptor, names ...string) ExportOpt {
return func(ctx context.Context, o *exportOptions) error { return func(ctx context.Context, o *exportOptions) error {
o.manifests = append(o.manifests, manifest) if len(names) == 0 {
return nil
}
}
// WithNamedManifest adds a manifest to the exported archive
// with the provided names.
func WithNamedManifest(manifest ocispec.Descriptor, names ...string) ExportOpt {
return func(ctx context.Context, o *exportOptions) error {
for _, name := range names {
manifest.Annotations = addNameAnnotation(name, manifest.Annotations)
o.manifests = append(o.manifests, manifest) o.manifests = append(o.manifests, manifest)
} }
for _, name := range names {
mc := manifest
mc.Annotations = addNameAnnotation(name, manifest.Annotations)
o.manifests = append(o.manifests, mc)
}
return nil return nil
} }
} }
func addNameAnnotation(name string, annotations map[string]string) map[string]string { func addNameAnnotation(name string, base map[string]string) map[string]string {
if annotations == nil { annotations := map[string]string{}
annotations = map[string]string{} for k, v := range base {
annotations[k] = v
} }
annotations[images.AnnotationImageName] = name annotations[images.AnnotationImageName] = name

151
vendor/github.com/containerd/containerd/images/archive/importer.go сгенерированный поставляемый
Просмотреть файл

@ -22,12 +22,14 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"io/ioutil" "io/ioutil"
"path" "path"
"github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/log" "github.com/containerd/containerd/log"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
@ -36,6 +38,22 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
) )
type importOpts struct {
compress bool
}
// ImportOpt is an option for importing an OCI index
type ImportOpt func(*importOpts) error
// WithImportCompression compresses uncompressed layers on import.
// This is used for import formats which do not include the manifest.
func WithImportCompression() ImportOpt {
return func(io *importOpts) error {
io.compress = true
return nil
}
}
// ImportIndex imports an index from a tar archive image bundle // ImportIndex imports an index from a tar archive image bundle
// - implements Docker v1.1, v1.2 and OCI v1. // - implements Docker v1.1, v1.2 and OCI v1.
// - prefers OCI v1 when provided // - prefers OCI v1 when provided
@ -43,8 +61,7 @@ import (
// - normalizes Docker references and adds as OCI ref name // - normalizes Docker references and adds as OCI ref name
// e.g. alpine:latest -> docker.io/library/alpine:latest // e.g. alpine:latest -> docker.io/library/alpine:latest
// - existing OCI reference names are untouched // - existing OCI reference names are untouched
// - TODO: support option to compress layers on ingest func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opts ...ImportOpt) (ocispec.Descriptor, error) {
func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error) {
var ( var (
tr = tar.NewReader(reader) tr = tar.NewReader(reader)
@ -56,7 +73,15 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc
} }
symlinks = make(map[string]string) symlinks = make(map[string]string)
blobs = make(map[string]ocispec.Descriptor) blobs = make(map[string]ocispec.Descriptor)
iopts importOpts
) )
for _, o := range opts {
if err := o(&iopts); err != nil {
return ocispec.Descriptor{}, err
}
}
for { for {
hdr, err := tr.Next() hdr, err := tr.Next()
if err == io.EOF { if err == io.EOF {
@ -99,7 +124,7 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc
} }
// If OCI layout was given, interpret the tar as an OCI layout. // If OCI layout was given, interpret the tar as an OCI layout.
// When not provided, the layout of the tar will be interpretted // When not provided, the layout of the tar will be interpreted
// as Docker v1.1 or v1.2. // as Docker v1.1 or v1.2.
if ociLayout.Version != "" { if ociLayout.Version != "" {
if ociLayout.Version != ocispec.ImageLayoutVersion { if ociLayout.Version != ocispec.ImageLayoutVersion {
@ -137,19 +162,23 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc
if !ok { if !ok {
return ocispec.Descriptor{}, errors.Errorf("image config %q not found", mfst.Config) return ocispec.Descriptor{}, errors.Errorf("image config %q not found", mfst.Config)
} }
config.MediaType = ocispec.MediaTypeImageConfig config.MediaType = images.MediaTypeDockerSchema2Config
layers, err := resolveLayers(ctx, store, mfst.Layers, blobs) layers, err := resolveLayers(ctx, store, mfst.Layers, blobs, iopts.compress)
if err != nil { if err != nil {
return ocispec.Descriptor{}, errors.Wrap(err, "failed to resolve layers") return ocispec.Descriptor{}, errors.Wrap(err, "failed to resolve layers")
} }
manifest := ocispec.Manifest{ manifest := struct {
Versioned: specs.Versioned{ SchemaVersion int `json:"schemaVersion"`
SchemaVersion: 2, MediaType string `json:"mediaType"`
}, Config ocispec.Descriptor `json:"config"`
Config: config, Layers []ocispec.Descriptor `json:"layers"`
Layers: layers, }{
SchemaVersion: 2,
MediaType: images.MediaTypeDockerSchema2Manifest,
Config: config,
Layers: layers,
} }
desc, err := writeManifest(ctx, store, manifest, ocispec.MediaTypeImageManifest) desc, err := writeManifest(ctx, store, manifest, ocispec.MediaTypeImageManifest)
@ -211,36 +240,118 @@ func onUntarBlob(ctx context.Context, r io.Reader, store content.Ingester, size
return dgstr.Digest(), nil return dgstr.Digest(), nil
} }
func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor) ([]ocispec.Descriptor, error) { func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor, compress bool) ([]ocispec.Descriptor, error) {
var layers []ocispec.Descriptor layers := make([]ocispec.Descriptor, len(layerFiles))
for _, f := range layerFiles { descs := map[digest.Digest]*ocispec.Descriptor{}
filters := []string{}
for i, f := range layerFiles {
desc, ok := blobs[f] desc, ok := blobs[f]
if !ok { if !ok {
return nil, errors.Errorf("layer %q not found", f) return nil, errors.Errorf("layer %q not found", f)
} }
layers[i] = desc
descs[desc.Digest] = &layers[i]
filters = append(filters, "labels.\"containerd.io/uncompressed\"=="+desc.Digest.String())
}
err := store.Walk(ctx, func(info content.Info) error {
dgst, ok := info.Labels["containerd.io/uncompressed"]
if ok {
desc := descs[digest.Digest(dgst)]
if desc != nil {
desc.MediaType = images.MediaTypeDockerSchema2LayerGzip
desc.Digest = info.Digest
desc.Size = info.Size
}
}
return nil
}, filters...)
if err != nil {
return nil, errors.Wrap(err, "failure checking for compressed blobs")
}
for i, desc := range layers {
if desc.MediaType != "" {
continue
}
// Open blob, resolve media type // Open blob, resolve media type
ra, err := store.ReaderAt(ctx, desc) ra, err := store.ReaderAt(ctx, desc)
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to open %q (%s)", f, desc.Digest) return nil, errors.Wrapf(err, "failed to open %q (%s)", layerFiles[i], desc.Digest)
} }
s, err := compression.DecompressStream(content.NewReader(ra)) s, err := compression.DecompressStream(content.NewReader(ra))
if err != nil { if err != nil {
return nil, errors.Wrapf(err, "failed to detect compression for %q", f) return nil, errors.Wrapf(err, "failed to detect compression for %q", layerFiles[i])
} }
if s.GetCompression() == compression.Uncompressed { if s.GetCompression() == compression.Uncompressed {
// TODO: Support compressing and writing back to content store if compress {
desc.MediaType = ocispec.MediaTypeImageLayer ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded())
labels := map[string]string{
"containerd.io/uncompressed": desc.Digest.String(),
}
layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels))
if err != nil {
s.Close()
return nil, err
}
layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
} else {
layers[i].MediaType = images.MediaTypeDockerSchema2Layer
}
} else { } else {
desc.MediaType = ocispec.MediaTypeImageLayerGzip layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
} }
s.Close() s.Close()
layers = append(layers, desc)
} }
return layers, nil return layers, nil
} }
func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string, opts ...content.Opt) (desc ocispec.Descriptor, err error) {
w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
if err != nil {
return ocispec.Descriptor{}, errors.Wrap(err, "failed to open writer")
}
defer func() {
w.Close()
if err != nil {
cs.Abort(ctx, ref)
}
}()
if err := w.Truncate(0); err != nil {
return ocispec.Descriptor{}, errors.Wrap(err, "failed to truncate writer")
}
cw, err := compression.CompressStream(w, compression.Gzip)
if err != nil {
return ocispec.Descriptor{}, err
}
if _, err := io.Copy(cw, r); err != nil {
return ocispec.Descriptor{}, err
}
if err := cw.Close(); err != nil {
return ocispec.Descriptor{}, err
}
cst, err := w.Status()
if err != nil {
return ocispec.Descriptor{}, errors.Wrap(err, "failed to get writer status")
}
desc.Digest = w.Digest()
desc.Size = cst.Offset
if err := w.Commit(ctx, desc.Size, desc.Digest, opts...); err != nil {
if !errdefs.IsAlreadyExists(err) {
return ocispec.Descriptor{}, errors.Wrap(err, "failed to commit")
}
}
return desc, nil
}
func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{}, mediaType string) (ocispec.Descriptor, error) { func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{}, mediaType string) (ocispec.Descriptor, error) {
manifestBytes, err := json.Marshal(manifest) manifestBytes, err := json.Marshal(manifest)
if err != nil { if err != nil {

2
vendor/github.com/containerd/containerd/images/archive/reference.go сгенерированный поставляемый
Просмотреть файл

@ -91,7 +91,7 @@ func familiarizeReference(ref string) (string, error) {
func ociReferenceName(name string) string { func ociReferenceName(name string) string {
// OCI defines the reference name as only a tag excluding the // OCI defines the reference name as only a tag excluding the
// repository. The containerd annotation contains the full image name // repository. The containerd annotation contains the full image name
// since the tag is insufficent for correctly naming and referring to an // since the tag is insufficient for correctly naming and referring to an
// image // image
var ociRef string var ociRef string
if spec, err := reference.Parse(name); err == nil { if spec, err := reference.Parse(name); err == nil {

7
vendor/github.com/containerd/containerd/images/handlers.go сгенерированный поставляемый
Просмотреть файл

@ -117,7 +117,7 @@ func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) err
// //
// If any handler returns an error, the dispatch session will be canceled. // If any handler returns an error, the dispatch session will be canceled.
func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error { func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error {
eg, ctx := errgroup.WithContext(ctx) eg, ctx2 := errgroup.WithContext(ctx)
for _, desc := range descs { for _, desc := range descs {
desc := desc desc := desc
@ -126,10 +126,11 @@ func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted,
return err return err
} }
} }
eg.Go(func() error { eg.Go(func() error {
desc := desc desc := desc
children, err := handler.Handle(ctx, desc) children, err := handler.Handle(ctx2, desc)
if limiter != nil { if limiter != nil {
limiter.Release(1) limiter.Release(1)
} }
@ -141,7 +142,7 @@ func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted,
} }
if len(children) > 0 { if len(children) > 0 {
return Dispatch(ctx, handler, limiter, children...) return Dispatch(ctx2, handler, limiter, children...)
} }
return nil return nil

60
vendor/github.com/containerd/containerd/images/image.go сгенерированный поставляемый
Просмотреть файл

@ -20,7 +20,6 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"sort" "sort"
"strings"
"time" "time"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
@ -119,7 +118,7 @@ func (image *Image) Size(ctx context.Context, provider content.Provider, platfor
} }
size += desc.Size size += desc.Size
return nil, nil return nil, nil
}), FilterPlatforms(ChildrenHandler(provider), platform)), image.Target) }), LimitManifests(FilterPlatforms(ChildrenHandler(provider), platform), platform, 1)), image.Target)
} }
type platformManifest struct { type platformManifest struct {
@ -142,6 +141,7 @@ type platformManifest struct {
// this direction because this abstraction is not needed.` // this direction because this abstraction is not needed.`
func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) { func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) {
var ( var (
limit = 1
m []platformManifest m []platformManifest
wasIndex bool wasIndex bool
) )
@ -210,10 +210,22 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
} }
} }
sort.SliceStable(descs, func(i, j int) bool {
if descs[i].Platform == nil {
return false
}
if descs[j].Platform == nil {
return true
}
return platform.Less(*descs[i].Platform, *descs[j].Platform)
})
wasIndex = true wasIndex = true
if len(descs) > limit {
return descs[:limit], nil
}
return descs, nil return descs, nil
} }
return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest) return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest)
}), image); err != nil { }), image); err != nil {
@ -227,17 +239,6 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
} }
return ocispec.Manifest{}, err return ocispec.Manifest{}, err
} }
sort.SliceStable(m, func(i, j int) bool {
if m[i].p == nil {
return false
}
if m[j].p == nil {
return true
}
return platform.Less(*m[i].p, *m[j].p)
})
return *m[0].m, nil return *m[0].m, nil
} }
@ -356,15 +357,11 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr
} }
descs = append(descs, index.Manifests...) descs = append(descs, index.Manifests...)
case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip,
MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip,
MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig,
ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip,
ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip,
MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig:
// childless data types.
return nil, nil
default: default:
if IsLayerType(desc.MediaType) || IsKnownConfig(desc.MediaType) {
// childless data types.
return nil, nil
}
log.G(ctx).Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType) log.G(ctx).Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType)
} }
@ -387,22 +384,3 @@ func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.D
} }
return config.RootFS.DiffIDs, nil return config.RootFS.DiffIDs, nil
} }
// IsCompressedDiff returns true if mediaType is a known compressed diff media type.
// It returns false if the media type is a diff, but not compressed. If the media type
// is not a known diff type, it returns errdefs.ErrNotImplemented
func IsCompressedDiff(ctx context.Context, mediaType string) (bool, error) {
switch mediaType {
case ocispec.MediaTypeImageLayer, MediaTypeDockerSchema2Layer:
case ocispec.MediaTypeImageLayerGzip, MediaTypeDockerSchema2LayerGzip:
return true, nil
default:
// Still apply all generic media types *.tar[.+]gzip and *.tar
if strings.HasSuffix(mediaType, ".tar.gzip") || strings.HasSuffix(mediaType, ".tar+gzip") {
return true, nil
} else if !strings.HasSuffix(mediaType, ".tar") {
return false, errdefs.ErrNotImplemented
}
}
return false, nil
}

84
vendor/github.com/containerd/containerd/images/mediatypes.go сгенерированный поставляемый
Просмотреть файл

@ -16,6 +16,15 @@
package images package images
import (
"context"
"sort"
"strings"
"github.com/containerd/containerd/errdefs"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// mediatype definitions for image components handled in containerd. // mediatype definitions for image components handled in containerd.
// //
// oci components are generally referenced directly, although we may centralize // oci components are generally referenced directly, although we may centralize
@ -40,3 +49,78 @@ const (
// Legacy Docker schema1 manifest // Legacy Docker schema1 manifest
MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
) )
// DiffCompression returns the compression as defined by the layer diff media
// type. For Docker media types without compression, "unknown" is returned to
// indicate that the media type may be compressed. If the media type is not
// recognized as a layer diff, then it returns errdefs.ErrNotImplemented
func DiffCompression(ctx context.Context, mediaType string) (string, error) {
base, ext := parseMediaTypes(mediaType)
switch base {
case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerForeign:
if len(ext) > 0 {
// Type is wrapped
return "", nil
}
// These media types may have been compressed but failed to
// use the correct media type. The decompression function
// should detect and handle this case.
return "unknown", nil
case MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2LayerForeignGzip:
if len(ext) > 0 {
// Type is wrapped
return "", nil
}
return "gzip", nil
case ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerNonDistributable:
if len(ext) > 0 {
switch ext[len(ext)-1] {
case "gzip":
return "gzip", nil
}
}
return "", nil
default:
return "", errdefs.ErrNotImplemented
}
}
// parseMediaTypes splits the media type into the base type and
// an array of sorted extensions
func parseMediaTypes(mt string) (string, []string) {
if mt == "" {
return "", []string{}
}
s := strings.Split(mt, "+")
ext := s[1:]
sort.Strings(ext)
return s[0], ext
}
// IsLayerTypes returns true if the media type is a layer
func IsLayerType(mt string) bool {
if strings.HasPrefix(mt, "application/vnd.oci.image.layer.") {
return true
}
// Parse Docker media types, strip off any + suffixes first
base, _ := parseMediaTypes(mt)
switch base {
case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip,
MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip:
return true
}
return false
}
// IsKnownConfig returns true if the media type is a known config type
func IsKnownConfig(mt string) bool {
switch mt {
case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig,
MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig:
return true
}
return false
}

21
vendor/github.com/containerd/containerd/import.go сгенерированный поставляемый
Просмотреть файл

@ -35,6 +35,7 @@ type importOpts struct {
imageRefT func(string) string imageRefT func(string) string
dgstRefT func(digest.Digest) string dgstRefT func(digest.Digest) string
allPlatforms bool allPlatforms bool
compress bool
} }
// ImportOpt allows the caller to specify import specific options // ImportOpt allows the caller to specify import specific options
@ -74,9 +75,18 @@ func WithAllPlatforms(allPlatforms bool) ImportOpt {
} }
} }
// WithImportCompression compresses uncompressed layers on import.
// This is used for import formats which do not include the manifest.
func WithImportCompression() ImportOpt {
return func(c *importOpts) error {
c.compress = true
return nil
}
}
// Import imports an image from a Tar stream using reader. // Import imports an image from a Tar stream using reader.
// Caller needs to specify importer. Future version may use oci.v1 as the default. // Caller needs to specify importer. Future version may use oci.v1 as the default.
// Note that unreferrenced blobs may be imported to the content store as well. // Note that unreferenced blobs may be imported to the content store as well.
func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt) ([]images.Image, error) { func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt) ([]images.Image, error) {
var iopts importOpts var iopts importOpts
for _, o := range opts { for _, o := range opts {
@ -91,7 +101,12 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
} }
defer done(ctx) defer done(ctx)
index, err := archive.ImportIndex(ctx, c.ContentStore(), reader) var aio []archive.ImportOpt
if iopts.compress {
aio = append(aio, archive.WithImportCompression())
}
index, err := archive.ImportIndex(ctx, c.ContentStore(), reader, aio...)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -110,7 +125,7 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt
} }
var platformMatcher = platforms.All var platformMatcher = platforms.All
if !iopts.allPlatforms { if !iopts.allPlatforms {
platformMatcher = platforms.Default() platformMatcher = c.platform
} }
var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {

3
vendor/github.com/containerd/containerd/install.go сгенерированный поставляемый
Просмотреть файл

@ -27,7 +27,6 @@ import (
"github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/platforms"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@ -43,7 +42,7 @@ func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts)
} }
var ( var (
cs = image.ContentStore() cs = image.ContentStore()
platform = platforms.Default() platform = c.platform
) )
manifest, err := images.Manifest(ctx, cs, image.Target(), platform) manifest, err := images.Manifest(ctx, cs, image.Target(), platform)
if err != nil { if err != nil {

2
vendor/github.com/containerd/containerd/log/context.go сгенерированный поставляемый
Просмотреть файл

@ -30,7 +30,7 @@ var (
// messages. // messages.
G = GetLogger G = GetLogger
// L is an alias for the the standard logger. // L is an alias for the standard logger.
L = logrus.NewEntry(logrus.StandardLogger()) L = logrus.NewEntry(logrus.StandardLogger())
) )

2
vendor/github.com/containerd/containerd/namespaces/context.go сгенерированный поставляемый
Просмотреть файл

@ -64,7 +64,7 @@ func Namespace(ctx context.Context) (string, bool) {
return namespace, ok return namespace, ok
} }
// NamespaceRequired returns the valid namepace from the context or an error. // NamespaceRequired returns the valid namespace from the context or an error.
func NamespaceRequired(ctx context.Context) (string, error) { func NamespaceRequired(ctx context.Context) (string, error) {
namespace, ok := Namespace(ctx) namespace, ok := Namespace(ctx)
if !ok || namespace == "" { if !ok || namespace == "" {

10
vendor/github.com/containerd/containerd/namespaces/ttrpc.go сгенерированный поставляемый
Просмотреть файл

@ -27,10 +27,20 @@ const (
TTRPCHeader = "containerd-namespace-ttrpc" TTRPCHeader = "containerd-namespace-ttrpc"
) )
func copyMetadata(src ttrpc.MD) ttrpc.MD {
md := ttrpc.MD{}
for k, v := range src {
md[k] = append(md[k], v...)
}
return md
}
func withTTRPCNamespaceHeader(ctx context.Context, namespace string) context.Context { func withTTRPCNamespaceHeader(ctx context.Context, namespace string) context.Context {
md, ok := ttrpc.GetMetadata(ctx) md, ok := ttrpc.GetMetadata(ctx)
if !ok { if !ok {
md = ttrpc.MD{} md = ttrpc.MD{}
} else {
md = copyMetadata(md)
} }
md.Set(TTRPCHeader, namespace) md.Set(TTRPCHeader, namespace)
return ttrpc.WithMetadata(ctx, md) return ttrpc.WithMetadata(ctx, md)

36
vendor/github.com/containerd/containerd/namespaces_opts_linux.go сгенерированный поставляемый
Просмотреть файл

@ -1,36 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package containerd
import (
"context"
"github.com/containerd/cgroups"
"github.com/containerd/containerd/namespaces"
)
// WithNamespaceCgroupDeletion removes the cgroup directory that was created for the namespace
func WithNamespaceCgroupDeletion(ctx context.Context, i *namespaces.DeleteInfo) error {
cg, err := cgroups.Load(cgroups.V1, cgroups.StaticPath(i.Name))
if err != nil {
if err == cgroups.ErrCgroupDeleted {
return nil
}
return err
}
return cg.Delete()
}

3
vendor/github.com/containerd/containerd/oci/spec.go сгенерированный поставляемый
Просмотреть файл

@ -78,7 +78,7 @@ func generateDefaultSpecWithPlatform(ctx context.Context, platform, id string, s
return err return err
} }
// ApplyOpts applys the options to the given spec, injecting data from the // ApplyOpts applies the options to the given spec, injecting data from the
// context, client and container instance. // context, client and container instance.
func ApplyOpts(ctx context.Context, client Client, c *containers.Container, s *Spec, opts ...SpecOpts) error { func ApplyOpts(ctx context.Context, client Client, c *containers.Container, s *Spec, opts ...SpecOpts) error {
for _, o := range opts { for _, o := range opts {
@ -141,7 +141,6 @@ func populateDefaultUnixSpec(ctx context.Context, s *Spec, id string) error {
Path: defaultRootfsPath, Path: defaultRootfsPath,
}, },
Process: &specs.Process{ Process: &specs.Process{
Env: defaultUnixEnv,
Cwd: "/", Cwd: "/",
NoNewPrivileges: true, NoNewPrivileges: true,
User: specs.User{ User: specs.User{

116
vendor/github.com/containerd/containerd/oci/spec_opts.go сгенерированный поставляемый
Просмотреть файл

@ -17,6 +17,7 @@
package oci package oci
import ( import (
"bufio"
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
@ -76,6 +77,20 @@ func setLinux(s *Spec) {
} }
} }
// nolint
func setResources(s *Spec) {
if s.Linux != nil {
if s.Linux.Resources == nil {
s.Linux.Resources = &specs.LinuxResources{}
}
}
if s.Windows != nil {
if s.Windows.Resources == nil {
s.Windows.Resources = &specs.WindowsResources{}
}
}
}
// setCapabilities sets Linux Capabilities to empty if unset // setCapabilities sets Linux Capabilities to empty if unset
func setCapabilities(s *Spec) { func setCapabilities(s *Spec) {
setProcess(s) setProcess(s)
@ -104,7 +119,7 @@ func WithDefaultSpecForPlatform(platform string) SpecOpts {
} }
} }
// WithSpecFromBytes loads the the spec from the provided byte slice. // WithSpecFromBytes loads the spec from the provided byte slice.
func WithSpecFromBytes(p []byte) SpecOpts { func WithSpecFromBytes(p []byte) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
*s = Spec{} // make sure spec is cleared. *s = Spec{} // make sure spec is cleared.
@ -137,6 +152,13 @@ func WithEnv(environmentVariables []string) SpecOpts {
} }
} }
// WithDefaultPathEnv sets the $PATH environment variable to the
// default PATH defined in this package.
func WithDefaultPathEnv(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, defaultUnixEnv)
return nil
}
// replaceOrAppendEnvValues returns the defaults with the overrides either // replaceOrAppendEnvValues returns the defaults with the overrides either
// replaced by env key or appended to the list // replaced by env key or appended to the list
func replaceOrAppendEnvValues(defaults, overrides []string) []string { func replaceOrAppendEnvValues(defaults, overrides []string) []string {
@ -312,7 +334,11 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts {
setProcess(s) setProcess(s)
if s.Linux != nil { if s.Linux != nil {
s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, config.Env) defaults := config.Env
if len(defaults) == 0 {
defaults = defaultUnixEnv
}
s.Process.Env = replaceOrAppendEnvValues(defaults, s.Process.Env)
cmd := config.Cmd cmd := config.Cmd
if len(args) > 0 { if len(args) > 0 {
cmd = args cmd = args
@ -334,7 +360,7 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts {
// even if there is no specified user in the image config // even if there is no specified user in the image config
return WithAdditionalGIDs("root")(ctx, client, c, s) return WithAdditionalGIDs("root")(ctx, client, c, s)
} else if s.Windows != nil { } else if s.Windows != nil {
s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, config.Env) s.Process.Env = replaceOrAppendEnvValues(config.Env, s.Process.Env)
cmd := config.Cmd cmd := config.Cmd
if len(args) > 0 { if len(args) > 0 {
cmd = args cmd = args
@ -607,7 +633,7 @@ func WithUserID(uid uint32) SpecOpts {
} }
// WithUsername sets the correct UID and GID for the container // WithUsername sets the correct UID and GID for the container
// based on the the image's /etc/passwd contents. If /etc/passwd // based on the image's /etc/passwd contents. If /etc/passwd
// does not exist, or the username is not found in /etc/passwd, // does not exist, or the username is not found in /etc/passwd,
// it returns error. // it returns error.
func WithUsername(username string) SpecOpts { func WithUsername(username string) SpecOpts {
@ -1139,3 +1165,85 @@ func WithAnnotations(annotations map[string]string) SpecOpts {
return nil return nil
} }
} }
// WithLinuxDevices adds the provided linux devices to the spec
func WithLinuxDevices(devices []specs.LinuxDevice) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setLinux(s)
s.Linux.Devices = append(s.Linux.Devices, devices...)
return nil
}
}
var ErrNotADevice = errors.New("not a device node")
// WithLinuxDevice adds the device specified by path to the spec
func WithLinuxDevice(path, permissions string) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
setLinux(s)
setResources(s)
dev, err := deviceFromPath(path, permissions)
if err != nil {
return err
}
s.Linux.Devices = append(s.Linux.Devices, *dev)
s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, specs.LinuxDeviceCgroup{
Type: dev.Type,
Allow: true,
Major: &dev.Major,
Minor: &dev.Minor,
Access: permissions,
})
return nil
}
}
// WithEnvFile adds environment variables from a file to the container's spec
func WithEnvFile(path string) SpecOpts {
return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error {
var vars []string
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
sc := bufio.NewScanner(f)
for sc.Scan() {
if sc.Err() != nil {
return sc.Err()
}
vars = append(vars, sc.Text())
}
return WithEnv(vars)(nil, nil, nil, s)
}
}
// ErrNoShmMount is returned when there is no /dev/shm mount specified in the config
// and an Opts was trying to set a configuration value on the mount.
var ErrNoShmMount = errors.New("no /dev/shm mount specified")
// WithDevShmSize sets the size of the /dev/shm mount for the container.
//
// The size value is specified in kb, kilobytes.
func WithDevShmSize(kb int64) SpecOpts {
return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error {
for _, m := range s.Mounts {
if m.Source == "shm" && m.Type == "tmpfs" {
for i, o := range m.Options {
if strings.HasPrefix(o, "size=") {
m.Options[i] = fmt.Sprintf("size=%dk", kb)
return nil
}
}
m.Options = append(m.Options, fmt.Sprintf("size=%dk", kb))
return nil
}
}
return ErrNoShmMount
}
}

64
vendor/github.com/containerd/containerd/oci/spec_opts_linux.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,64 @@
// +build linux
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oci
import (
"os"
specs "github.com/opencontainers/runtime-spec/specs-go"
"golang.org/x/sys/unix"
)
func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) {
var stat unix.Stat_t
if err := unix.Lstat(path, &stat); err != nil {
return nil, err
}
var (
// The type is 32bit on mips.
devNumber = uint64(stat.Rdev) // nolint: unconvert
major = unix.Major(devNumber)
minor = unix.Minor(devNumber)
)
if major == 0 {
return nil, ErrNotADevice
}
var (
devType string
mode = stat.Mode
)
switch {
case mode&unix.S_IFBLK == unix.S_IFBLK:
devType = "b"
case mode&unix.S_IFCHR == unix.S_IFCHR:
devType = "c"
}
fm := os.FileMode(mode)
return &specs.LinuxDevice{
Type: devType,
Path: path,
Major: int64(major),
Minor: int64(minor),
FileMode: &fm,
UID: &stat.Uid,
GID: &stat.Gid,
}, nil
}

63
vendor/github.com/containerd/containerd/oci/spec_opts_unix.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,63 @@
// +build !linux,!windows
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package oci
import (
"os"
specs "github.com/opencontainers/runtime-spec/specs-go"
"golang.org/x/sys/unix"
)
func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) {
var stat unix.Stat_t
if err := unix.Lstat(path, &stat); err != nil {
return nil, err
}
var (
devNumber = uint64(stat.Rdev)
major = unix.Major(devNumber)
minor = unix.Minor(devNumber)
)
if major == 0 {
return nil, ErrNotADevice
}
var (
devType string
mode = stat.Mode
)
switch {
case mode&unix.S_IFBLK == unix.S_IFBLK:
devType = "b"
case mode&unix.S_IFCHR == unix.S_IFCHR:
devType = "c"
}
fm := os.FileMode(mode)
return &specs.LinuxDevice{
Type: devType,
Path: path,
Major: int64(major),
Minor: int64(minor),
FileMode: &fm,
UID: &stat.Uid,
GID: &stat.Gid,
}, nil
}

5
vendor/github.com/containerd/containerd/oci/spec_opts_windows.go сгенерированный поставляемый
Просмотреть файл

@ -23,6 +23,7 @@ import (
"github.com/containerd/containerd/containers" "github.com/containerd/containerd/containers"
specs "github.com/opencontainers/runtime-spec/specs-go" specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
) )
// WithWindowsCPUCount sets the `Windows.Resources.CPU.Count` section to the // WithWindowsCPUCount sets the `Windows.Resources.CPU.Count` section to the
@ -65,3 +66,7 @@ func WithWindowNetworksAllowUnqualifiedDNSQuery() SpecOpts {
return nil return nil
} }
} }
func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) {
return nil, errors.New("device from path not supported on Windows")
}

6
vendor/github.com/containerd/containerd/platforms/database.go сгенерированный поставляемый
Просмотреть файл

@ -28,7 +28,7 @@ func isLinuxOS(os string) bool {
return os == "linux" return os == "linux"
} }
// These function are generated from from https://golang.org/src/go/build/syslist.go. // These function are generated from https://golang.org/src/go/build/syslist.go.
// //
// We use switch statements because they are slightly faster than map lookups // We use switch statements because they are slightly faster than map lookups
// and use a little less memory. // and use a little less memory.
@ -38,7 +38,7 @@ func isLinuxOS(os string) bool {
// The OS value should be normalized before calling this function. // The OS value should be normalized before calling this function.
func isKnownOS(os string) bool { func isKnownOS(os string) bool {
switch os { switch os {
case "android", "darwin", "dragonfly", "freebsd", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos":
return true return true
} }
return false return false
@ -60,7 +60,7 @@ func isArmArch(arch string) bool {
// The arch value should be normalized before being passed to this function. // The arch value should be normalized before being passed to this function.
func isKnownArch(arch string) bool { func isKnownArch(arch string) bool {
switch arch { switch arch {
case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "s390", "s390x", "sparc", "sparc64": case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm":
return true return true
} }
return false return false

2
vendor/github.com/containerd/containerd/platforms/platforms.go сгенерированный поставляемый
Просмотреть файл

@ -130,7 +130,7 @@ type Matcher interface {
// specification. The returned matcher only looks for equality based on os, // specification. The returned matcher only looks for equality based on os,
// architecture and variant. // architecture and variant.
// //
// One may implement their own matcher if this doesn't provide the the required // One may implement their own matcher if this doesn't provide the required
// functionality. // functionality.
// //
// Applications should opt to use `Match` over directly parsing specifiers. // Applications should opt to use `Match` over directly parsing specifiers.

13
vendor/github.com/containerd/containerd/plugin/context.go сгенерированный поставляемый
Просмотреть файл

@ -28,12 +28,13 @@ import (
// InitContext is used for plugin inititalization // InitContext is used for plugin inititalization
type InitContext struct { type InitContext struct {
Context context.Context Context context.Context
Root string Root string
State string State string
Config interface{} Config interface{}
Address string Address string
Events *exchange.Exchange TTRPCAddress string
Events *exchange.Exchange
Meta *Meta // plugins can fill in metadata at init. Meta *Meta // plugins can fill in metadata at init.

4
vendor/github.com/containerd/containerd/process.go сгенерированный поставляемый
Просмотреть файл

@ -44,7 +44,7 @@ type Process interface {
Wait(context.Context) (<-chan ExitStatus, error) Wait(context.Context) (<-chan ExitStatus, error)
// CloseIO allows various pipes to be closed on the process // CloseIO allows various pipes to be closed on the process
CloseIO(context.Context, ...IOCloserOpts) error CloseIO(context.Context, ...IOCloserOpts) error
// Resize changes the width and heigh of the process's terminal // Resize changes the width and height of the process's terminal
Resize(ctx context.Context, w, h uint32) error Resize(ctx context.Context, w, h uint32) error
// IO returns the io set for the process // IO returns the io set for the process
IO() cio.IO IO() cio.IO
@ -61,7 +61,7 @@ func NewExitStatus(code uint32, t time.Time, err error) *ExitStatus {
} }
} }
// ExitStatus encapsulates a process' exit status. // ExitStatus encapsulates a process's exit status.
// It is used by `Wait()` to return either a process exit code or an error // It is used by `Wait()` to return either a process exit code or an error
type ExitStatus struct { type ExitStatus struct {
code uint32 code uint32

63
vendor/github.com/containerd/containerd/pull.go сгенерированный поставляемый
Просмотреть файл

@ -32,7 +32,7 @@ import (
// Pull downloads the provided content into containerd's content store // Pull downloads the provided content into containerd's content store
// and returns a platform specific image object // and returns a platform specific image object
func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image, error) { func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Image, retErr error) {
pullCtx := defaultRemoteContext() pullCtx := defaultRemoteContext()
for _, o := range opts { for _, o := range opts {
if err := o(c, pullCtx); err != nil { if err := o(c, pullCtx); err != nil {
@ -44,7 +44,7 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image
if len(pullCtx.Platforms) > 1 { if len(pullCtx.Platforms) > 1 {
return nil, errors.New("cannot pull multiplatform image locally, try Fetch") return nil, errors.New("cannot pull multiplatform image locally, try Fetch")
} else if len(pullCtx.Platforms) == 0 { } else if len(pullCtx.Platforms) == 0 {
pullCtx.PlatformMatcher = platforms.Default() pullCtx.PlatformMatcher = c.platform
} else { } else {
p, err := platforms.Parse(pullCtx.Platforms[0]) p, err := platforms.Parse(pullCtx.Platforms[0])
if err != nil { if err != nil {
@ -61,6 +61,30 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image
} }
defer done(ctx) defer done(ctx)
var unpacks int32
if pullCtx.Unpack {
// unpacker only supports schema 2 image, for schema 1 this is noop.
u, err := c.newUnpacker(ctx, pullCtx)
if err != nil {
return nil, errors.Wrap(err, "create unpacker")
}
unpackWrapper, eg := u.handlerWrapper(ctx, &unpacks)
defer func() {
if err := eg.Wait(); err != nil {
if retErr == nil {
retErr = errors.Wrap(err, "unpack")
}
}
}()
wrapper := pullCtx.HandlerWrapper
pullCtx.HandlerWrapper = func(h images.Handler) images.Handler {
if wrapper == nil {
return unpackWrapper(h)
}
return wrapper(unpackWrapper(h))
}
}
img, err := c.fetch(ctx, pullCtx, ref, 1) img, err := c.fetch(ctx, pullCtx, ref, 1)
if err != nil { if err != nil {
return nil, err return nil, err
@ -69,8 +93,12 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image
i := NewImageWithPlatform(c, img, pullCtx.PlatformMatcher) i := NewImageWithPlatform(c, img, pullCtx.PlatformMatcher)
if pullCtx.Unpack { if pullCtx.Unpack {
if err := i.Unpack(ctx, pullCtx.Snapshotter); err != nil { if unpacks == 0 {
return nil, errors.Wrapf(err, "failed to unpack image on snapshotter %s", pullCtx.Snapshotter) // Try to unpack is none is done previously.
// This is at least required for schema 1 image.
if err := i.Unpack(ctx, pullCtx.Snapshotter, pullCtx.UnpackOpts...); err != nil {
return nil, errors.Wrapf(err, "failed to unpack image on snapshotter %s", pullCtx.Snapshotter)
}
} }
} }
@ -112,9 +140,14 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim
childrenHandler := images.ChildrenHandler(store) childrenHandler := images.ChildrenHandler(store)
// Set any children labels for that content // Set any children labels for that content
childrenHandler = images.SetChildrenLabels(store, childrenHandler) childrenHandler = images.SetChildrenLabels(store, childrenHandler)
// Filter manifests by platforms but allow to handle manifest if rCtx.AllMetadata {
// and configuration for not-target platforms // Filter manifests by platforms but allow to handle manifest
childrenHandler = remotes.FilterManifestByPlatformHandler(childrenHandler, rCtx.PlatformMatcher) // and configuration for not-target platforms
childrenHandler = remotes.FilterManifestByPlatformHandler(childrenHandler, rCtx.PlatformMatcher)
} else {
// Filter children by platforms if specified.
childrenHandler = images.FilterPlatforms(childrenHandler, rCtx.PlatformMatcher)
}
// Sort and limit manifests if a finite number is needed // Sort and limit manifests if a finite number is needed
if limit > 0 { if limit > 0 {
childrenHandler = images.LimitManifests(childrenHandler, rCtx.PlatformMatcher, limit) childrenHandler = images.LimitManifests(childrenHandler, rCtx.PlatformMatcher, limit)
@ -131,22 +164,18 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim
}, },
) )
appendDistSrcLabelHandler, err := docker.AppendDistributionSourceLabel(store, ref)
if err != nil {
return images.Image{}, err
}
handlers := append(rCtx.BaseHandlers, handlers := append(rCtx.BaseHandlers,
remotes.FetchHandler(store, fetcher), remotes.FetchHandler(store, fetcher),
convertibleHandler, convertibleHandler,
childrenHandler, childrenHandler,
appendDistSrcLabelHandler,
) )
// append distribution source label to blob data
if rCtx.AppendDistributionSourceLabel {
appendDistSrcLabelHandler, err := docker.AppendDistributionSourceLabel(store, ref)
if err != nil {
return images.Image{}, err
}
handlers = append(handlers, appendDistSrcLabelHandler)
}
handler = images.Handlers(handlers...) handler = images.Handlers(handlers...)
converterFunc = func(ctx context.Context, desc ocispec.Descriptor) (ocispec.Descriptor, error) { converterFunc = func(ctx context.Context, desc ocispec.Descriptor) (ocispec.Descriptor, error) {

86
vendor/github.com/containerd/containerd/remotes/docker/authorizer.go сгенерированный поставляемый
Просмотреть файл

@ -31,7 +31,6 @@ import (
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log" "github.com/containerd/containerd/log"
"github.com/containerd/containerd/version"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/net/context/ctxhttp" "golang.org/x/net/context/ctxhttp"
@ -41,7 +40,7 @@ type dockerAuthorizer struct {
credentials func(string) (string, string, error) credentials func(string) (string, string, error)
client *http.Client client *http.Client
ua string header http.Header
mu sync.Mutex mu sync.Mutex
// indexed by host name // indexed by host name
@ -50,15 +49,58 @@ type dockerAuthorizer struct {
// NewAuthorizer creates a Docker authorizer using the provided function to // NewAuthorizer creates a Docker authorizer using the provided function to
// get credentials for the token server or basic auth. // get credentials for the token server or basic auth.
// Deprecated: Use NewDockerAuthorizer
func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) Authorizer { func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) Authorizer {
if client == nil { return NewDockerAuthorizer(WithAuthClient(client), WithAuthCreds(f))
client = http.DefaultClient }
type authorizerConfig struct {
credentials func(string) (string, string, error)
client *http.Client
header http.Header
}
// AuthorizerOpt configures an authorizer
type AuthorizerOpt func(*authorizerConfig)
// WithAuthClient provides the HTTP client for the authorizer
func WithAuthClient(client *http.Client) AuthorizerOpt {
return func(opt *authorizerConfig) {
opt.client = client
}
}
// WithAuthCreds provides a credential function to the authorizer
func WithAuthCreds(creds func(string) (string, string, error)) AuthorizerOpt {
return func(opt *authorizerConfig) {
opt.credentials = creds
}
}
// WithAuthHeader provides HTTP headers for authorization
func WithAuthHeader(hdr http.Header) AuthorizerOpt {
return func(opt *authorizerConfig) {
opt.header = hdr
}
}
// NewDockerAuthorizer creates an authorizer using Docker's registry
// authentication spec.
// See https://docs.docker.com/registry/spec/auth/
func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer {
var ao authorizerConfig
for _, opt := range opts {
opt(&ao)
}
if ao.client == nil {
ao.client = http.DefaultClient
} }
return &dockerAuthorizer{ return &dockerAuthorizer{
credentials: f, credentials: ao.credentials,
client: client, client: ao.client,
ua: "containerd/" + version.Version, header: ao.header,
handlers: make(map[string]*authHandler), handlers: make(map[string]*authHandler),
} }
} }
@ -115,7 +157,7 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R
return err return err
} }
a.handlers[host] = newAuthHandler(a.client, a.ua, c.scheme, common) a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common)
return nil return nil
} else if c.scheme == basicAuth && a.credentials != nil { } else if c.scheme == basicAuth && a.credentials != nil {
username, secret, err := a.credentials(host) username, secret, err := a.credentials(host)
@ -129,7 +171,7 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R
secret: secret, secret: secret,
} }
a.handlers[host] = newAuthHandler(a.client, a.ua, c.scheme, common) a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common)
return nil return nil
} }
} }
@ -179,7 +221,7 @@ type authResult struct {
type authHandler struct { type authHandler struct {
sync.Mutex sync.Mutex
ua string header http.Header
client *http.Client client *http.Client
@ -194,13 +236,9 @@ type authHandler struct {
scopedTokens map[string]*authResult scopedTokens map[string]*authResult
} }
func newAuthHandler(client *http.Client, ua string, scheme authenticationScheme, opts tokenOptions) *authHandler { func newAuthHandler(client *http.Client, hdr http.Header, scheme authenticationScheme, opts tokenOptions) *authHandler {
if client == nil {
client = http.DefaultClient
}
return &authHandler{ return &authHandler{
ua: ua, header: hdr,
client: client, client: client,
scheme: scheme, scheme: scheme,
common: opts, common: opts,
@ -227,7 +265,7 @@ func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) {
} }
auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret)) auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret))
return fmt.Sprintf("%s %s", "Basic", auth), nil return fmt.Sprintf("Basic %s", auth), nil
} }
func (ah *authHandler) doBearerAuth(ctx context.Context) (string, error) { func (ah *authHandler) doBearerAuth(ctx context.Context) (string, error) {
@ -269,7 +307,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (string, error) {
token, err = ah.fetchToken(ctx, to) token, err = ah.fetchToken(ctx, to)
err = errors.Wrap(err, "failed to fetch anonymous token") err = errors.Wrap(err, "failed to fetch anonymous token")
} }
token = fmt.Sprintf("%s %s", "Bearer", token) token = fmt.Sprintf("Bearer %s", token)
r.token, r.err = token, err r.token, r.err = token, err
r.Done() r.Done()
@ -313,8 +351,10 @@ func (ah *authHandler) fetchTokenWithOAuth(ctx context.Context, to tokenOptions)
return "", err return "", err
} }
req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
if ah.ua != "" { if ah.header != nil {
req.Header.Set("User-Agent", ah.ua) for k, v := range ah.header {
req.Header[k] = append(req.Header[k], v...)
}
} }
resp, err := ctxhttp.Do(ctx, ah.client, req) resp, err := ctxhttp.Do(ctx, ah.client, req)
@ -363,8 +403,10 @@ func (ah *authHandler) fetchToken(ctx context.Context, to tokenOptions) (string,
return "", err return "", err
} }
if ah.ua != "" { if ah.header != nil {
req.Header.Set("User-Agent", ah.ua) for k, v := range ah.header {
req.Header[k] = append(req.Header[k], v...)
}
} }
reqParams := req.URL.Query() reqParams := req.URL.Query()

131
vendor/github.com/containerd/containerd/remotes/docker/fetcher.go сгенерированный поставляемый
Просмотреть файл

@ -23,7 +23,7 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"path" "net/url"
"strings" "strings"
"github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/errdefs"
@ -32,7 +32,6 @@ import (
"github.com/docker/distribution/registry/api/errcode" "github.com/docker/distribution/registry/api/errcode"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sirupsen/logrus"
) )
type dockerFetcher struct { type dockerFetcher struct {
@ -40,26 +39,46 @@ type dockerFetcher struct {
} }
func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
ctx = log.WithLogger(ctx, log.G(ctx).WithFields( ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", desc.Digest))
logrus.Fields{
"base": r.base.String(),
"digest": desc.Digest,
},
))
urls, err := r.getV2URLPaths(ctx, desc) hosts := r.filterHosts(HostCapabilityPull)
if err != nil { if len(hosts) == 0 {
return nil, err return nil, errors.Wrap(errdefs.ErrNotFound, "no pull hosts")
} }
ctx, err = contextWithRepositoryScope(ctx, r.refspec, false) ctx, err := contextWithRepositoryScope(ctx, r.refspec, false)
if err != nil { if err != nil {
return nil, err return nil, err
} }
return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) { return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) {
for _, u := range urls { // firstly try fetch via external urls
rc, err := r.open(ctx, u, desc.MediaType, offset) for _, us := range desc.URLs {
ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", us))
u, err := url.Parse(us)
if err != nil {
log.G(ctx).WithError(err).Debug("failed to parse")
continue
}
log.G(ctx).Debug("trying alternative url")
// Try this first, parse it
host := RegistryHost{
Client: http.DefaultClient,
Host: u.Host,
Scheme: u.Scheme,
Path: u.Path,
Capabilities: HostCapabilityPull,
}
req := r.request(host, http.MethodGet)
// Strip namespace from base
req.path = u.Path
if u.RawQuery != "" {
req.path = req.path + "?" + u.RawQuery
}
rc, err := r.open(ctx, req, desc.MediaType, offset)
if err != nil { if err != nil {
if errdefs.IsNotFound(err) { if errdefs.IsNotFound(err) {
continue // try one of the other urls. continue // try one of the other urls.
@ -71,6 +90,44 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R
return rc, nil return rc, nil
} }
// Try manifests endpoints for manifests types
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
images.MediaTypeDockerSchema1Manifest,
ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
for _, host := range r.hosts {
req := r.request(host, http.MethodGet, "manifests", desc.Digest.String())
rc, err := r.open(ctx, req, desc.MediaType, offset)
if err != nil {
if errdefs.IsNotFound(err) {
continue // try another host
}
return nil, err
}
return rc, nil
}
}
// Finally use blobs endpoints
for _, host := range r.hosts {
req := r.request(host, http.MethodGet, "blobs", desc.Digest.String())
rc, err := r.open(ctx, req, desc.MediaType, offset)
if err != nil {
if errdefs.IsNotFound(err) {
continue // try another host
}
return nil, err
}
return rc, nil
}
return nil, errors.Wrapf(errdefs.ErrNotFound, return nil, errors.Wrapf(errdefs.ErrNotFound,
"could not fetch content descriptor %v (%v) from remote", "could not fetch content descriptor %v (%v) from remote",
desc.Digest, desc.MediaType) desc.Digest, desc.MediaType)
@ -78,22 +135,17 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R
}) })
} }
func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int64) (io.ReadCloser, error) { func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (io.ReadCloser, error) {
req, err := http.NewRequest(http.MethodGet, u, nil) req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", "))
if err != nil {
return nil, err
}
req.Header.Set("Accept", strings.Join([]string{mediatype, `*`}, ", "))
if offset > 0 { if offset > 0 {
// Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints // Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints
// will return the header without supporting the range. The content // will return the header without supporting the range. The content
// range must always be checked. // range must always be checked.
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) req.header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
} }
resp, err := r.doRequestWithRetries(ctx, req, nil) resp, err := req.doWithRetries(ctx, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -106,13 +158,13 @@ func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode == http.StatusNotFound { if resp.StatusCode == http.StatusNotFound {
return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", u) return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", req.String())
} }
var registryErr errcode.Errors var registryErr errcode.Errors
if err := json.NewDecoder(resp.Body).Decode(&registryErr); err != nil || registryErr.Len() < 1 { if err := json.NewDecoder(resp.Body).Decode(&registryErr); err != nil || registryErr.Len() < 1 {
return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status) return nil, errors.Errorf("unexpected status code %v: %v", req.String(), resp.Status)
} }
return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", u, resp.Status, registryErr.Error()) return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error())
} }
if offset > 0 { if offset > 0 {
cr := resp.Header.Get("content-range") cr := resp.Header.Get("content-range")
@ -141,30 +193,3 @@ func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int
return resp.Body, nil return resp.Body, nil
} }
// getV2URLPaths generates the candidate urls paths for the object based on the
// set of hints and the provided object id. URLs are returned in the order of
// most to least likely succeed.
func (r *dockerFetcher) getV2URLPaths(ctx context.Context, desc ocispec.Descriptor) ([]string, error) {
var urls []string
if len(desc.URLs) > 0 {
// handle fetch via external urls.
for _, u := range desc.URLs {
log.G(ctx).WithField("url", u).Debug("adding alternative url")
urls = append(urls, u)
}
}
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
images.MediaTypeDockerSchema1Manifest,
ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
urls = append(urls, r.url(path.Join("manifests", desc.Digest.String())))
}
// always fallback to attempting to get the object out of the blobs store.
urls = append(urls, r.url(path.Join("blobs", desc.Digest.String())))
return urls, nil
}

173
vendor/github.com/containerd/containerd/remotes/docker/pusher.go сгенерированный поставляемый
Просмотреть файл

@ -21,7 +21,7 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"path" "net/url"
"strings" "strings"
"time" "time"
@ -37,7 +37,7 @@ import (
type dockerPusher struct { type dockerPusher struct {
*dockerBase *dockerBase
tag string object string
// TODO: namespace tracker // TODO: namespace tracker
tracker StatusTracker tracker StatusTracker
@ -59,31 +59,32 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
return nil, errors.Wrap(err, "failed to get status") return nil, errors.Wrap(err, "failed to get status")
} }
hosts := p.filterHosts(HostCapabilityPush)
if len(hosts) == 0 {
return nil, errors.Wrap(errdefs.ErrNotFound, "no push hosts")
}
var ( var (
isManifest bool isManifest bool
existCheck string existCheck []string
host = hosts[0]
) )
switch desc.MediaType { switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
isManifest = true isManifest = true
if p.tag == "" { existCheck = getManifestPath(p.object, desc.Digest)
existCheck = path.Join("manifests", desc.Digest.String())
} else {
existCheck = path.Join("manifests", p.tag)
}
default: default:
existCheck = path.Join("blobs", desc.Digest.String()) existCheck = []string{"blobs", desc.Digest.String()}
} }
req, err := http.NewRequest(http.MethodHead, p.url(existCheck), nil) req := p.request(host, http.MethodHead, existCheck...)
if err != nil { req.header.Set("Accept", strings.Join([]string{desc.MediaType, `*/*`}, ", "))
return nil, err
}
req.Header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", ")) log.G(ctx).WithField("url", req.String()).Debugf("checking and pushing to")
resp, err := p.doRequestWithRetries(ctx, req, nil)
resp, err := req.doWithRetries(ctx, nil)
if err != nil { if err != nil {
if errors.Cause(err) != ErrInvalidAuthorization { if errors.Cause(err) != ErrInvalidAuthorization {
return nil, err return nil, err
@ -92,7 +93,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
} else { } else {
if resp.StatusCode == http.StatusOK { if resp.StatusCode == http.StatusOK {
var exists bool var exists bool
if isManifest && p.tag != "" { if isManifest && existCheck[1] != desc.Digest.String() {
dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
if dgstHeader == desc.Digest { if dgstHeader == desc.Digest {
exists = true exists = true
@ -117,28 +118,16 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
} }
if isManifest { if isManifest {
var putPath string putPath := getManifestPath(p.object, desc.Digest)
if p.tag != "" { req = p.request(host, http.MethodPut, putPath...)
putPath = path.Join("manifests", p.tag) req.header.Add("Content-Type", desc.MediaType)
} else {
putPath = path.Join("manifests", desc.Digest.String())
}
req, err = http.NewRequest(http.MethodPut, p.url(putPath), nil)
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", desc.MediaType)
} else { } else {
// Start upload request // Start upload request
req, err = http.NewRequest(http.MethodPost, p.url("blobs", "uploads")+"/", nil) req = p.request(host, http.MethodPost, "blobs", "uploads/")
if err != nil {
return nil, err
}
var resp *http.Response var resp *http.Response
if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" { if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" {
req = requestWithMountFrom(req, desc.Digest.String(), fromRepo) preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo)
pctx := contextWithAppendPullRepositoryScope(ctx, fromRepo) pctx := contextWithAppendPullRepositoryScope(ctx, fromRepo)
// NOTE: the fromRepo might be private repo and // NOTE: the fromRepo might be private repo and
@ -147,7 +136,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
// //
// for the private repo, we should remove mount-from // for the private repo, we should remove mount-from
// query and send the request again. // query and send the request again.
resp, err = p.doRequest(pctx, req) resp, err = preq.do(pctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -157,16 +146,11 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
resp.Body.Close() resp.Body.Close()
resp = nil resp = nil
req, err = removeMountFromQuery(req)
if err != nil {
return nil, err
}
} }
} }
if resp == nil { if resp == nil {
resp, err = p.doRequestWithRetries(ctx, req, nil) resp, err = req.doWithRetries(ctx, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -186,31 +170,41 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
return nil, errors.Errorf("unexpected response: %s", resp.Status) return nil, errors.Errorf("unexpected response: %s", resp.Status)
} }
location := resp.Header.Get("Location") var (
location = resp.Header.Get("Location")
lurl *url.URL
lhost = host
)
// Support paths without host in location // Support paths without host in location
if strings.HasPrefix(location, "/") { if strings.HasPrefix(location, "/") {
// Support location string containing path and query lurl, err = url.Parse(lhost.Scheme + "://" + lhost.Host + location)
qmIndex := strings.Index(location, "?") if err != nil {
if qmIndex > 0 { return nil, errors.Wrapf(err, "unable to parse location %v", location)
u := p.base }
u.Path = location[:qmIndex] } else {
u.RawQuery = location[qmIndex+1:] if !strings.Contains(location, "://") {
location = u.String() location = lhost.Scheme + "://" + location
} else { }
u := p.base lurl, err = url.Parse(location)
u.Path = location if err != nil {
location = u.String() return nil, errors.Wrapf(err, "unable to parse location %v", location)
}
if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme {
lhost.Scheme = lurl.Scheme
lhost.Host = lurl.Host
log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination")
// Strip authorizer if change to host or scheme
lhost.Authorizer = nil
} }
} }
q := lurl.Query()
req, err = http.NewRequest(http.MethodPut, location, nil)
if err != nil {
return nil, err
}
q := req.URL.Query()
q.Add("digest", desc.Digest.String()) q.Add("digest", desc.Digest.String())
req.URL.RawQuery = q.Encode()
req = p.request(lhost, http.MethodPut)
req.path = lurl.Path + "?" + q.Encode()
} }
p.tracker.SetStatus(ref, Status{ p.tracker.SetStatus(ref, Status{
Status: content.Status{ Status: content.Status{
@ -225,13 +219,22 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
pr, pw := io.Pipe() pr, pw := io.Pipe()
respC := make(chan *http.Response, 1) respC := make(chan *http.Response, 1)
body := ioutil.NopCloser(pr)
req.Body = ioutil.NopCloser(pr) req.body = func() (io.ReadCloser, error) {
req.ContentLength = desc.Size if body == nil {
return nil, errors.New("cannot reuse body, request must be retried")
}
// Only use the body once since pipe cannot be seeked
ob := body
body = nil
return ob, nil
}
req.size = desc.Size
go func() { go func() {
defer close(respC) defer close(respC)
resp, err = p.doRequest(ctx, req) resp, err = req.do(ctx)
if err != nil { if err != nil {
pr.CloseWithError(err) pr.CloseWithError(err)
return return
@ -257,6 +260,25 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
}, nil }, nil
} }
func getManifestPath(object string, dgst digest.Digest) []string {
if i := strings.IndexByte(object, '@'); i >= 0 {
if object[i+1:] != dgst.String() {
// use digest, not tag
object = ""
} else {
// strip @<digest> for registry path to make tag
object = object[:i]
}
}
if object == "" {
return []string{"manifests", dgst.String()}
}
return []string{"manifests", object}
}
type pushWriter struct { type pushWriter struct {
base *dockerBase base *dockerBase
ref string ref string
@ -330,7 +352,7 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di
} }
if size > 0 && size != status.Offset { if size > 0 && size != status.Offset {
return errors.Errorf("unxpected size %d, expected %d", status.Offset, size) return errors.Errorf("unexpected size %d, expected %d", status.Offset, size)
} }
if expected == "" { if expected == "" {
@ -355,24 +377,15 @@ func (pw *pushWriter) Truncate(size int64) error {
return errors.New("cannot truncate remote upload") return errors.New("cannot truncate remote upload")
} }
func requestWithMountFrom(req *http.Request, mount, from string) *http.Request { func requestWithMountFrom(req *request, mount, from string) *request {
q := req.URL.Query() creq := *req
q.Set("mount", mount) sep := "?"
q.Set("from", from) if strings.Contains(creq.path, sep) {
req.URL.RawQuery = q.Encode() sep = "&"
return req
}
func removeMountFromQuery(req *http.Request) (*http.Request, error) {
req, err := copyRequest(req)
if err != nil {
return nil, err
} }
q := req.URL.Query() creq.path = creq.path + sep + "mount=" + mount + "&from=" + from
q.Del("mount")
q.Del("from") return &creq
req.URL.RawQuery = q.Encode()
return req, nil
} }

202
vendor/github.com/containerd/containerd/remotes/docker/registry.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,202 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"net/http"
)
// HostCapabilities represent the capabilities of the registry
// host. This also represents the set of operations for which
// the registry host may be trusted to perform.
//
// For example pushing is a capability which should only be
// performed on an upstream source, not a mirror.
// Resolving (the process of converting a name into a digest)
// must be considered a trusted operation and only done by
// a host which is trusted (or more preferably by secure process
// which can prove the provenance of the mapping). A public
// mirror should never be trusted to do a resolve action.
//
// | Registry Type | Pull | Resolve | Push |
// |------------------|------|---------|------|
// | Public Registry | yes | yes | yes |
// | Private Registry | yes | yes | yes |
// | Public Mirror | yes | no | no |
// | Private Mirror | yes | yes | no |
type HostCapabilities uint8
const (
// HostCapabilityPull represents the capability to fetch manifests
// and blobs by digest
HostCapabilityPull HostCapabilities = 1 << iota
// HostCapabilityResolve represents the capability to fetch manifests
// by name
HostCapabilityResolve
// HostCapabilityPush represents the capability to push blobs and
// manifests
HostCapabilityPush
// Reserved for future capabilities (i.e. search, catalog, remove)
)
func (c HostCapabilities) Has(t HostCapabilities) bool {
return c&t == t
}
// RegistryHost represents a complete configuration for a registry
// host, representing the capabilities, authorizations, connection
// configuration, and location.
type RegistryHost struct {
Client *http.Client
Authorizer Authorizer
Host string
Scheme string
Path string
Capabilities HostCapabilities
}
// RegistryHosts fetches the registry hosts for a given namespace,
// provided by the host component of an distribution image reference.
type RegistryHosts func(string) ([]RegistryHost, error)
// Registries joins multiple registry configuration functions, using the same
// order as provided within the arguments. When an empty registry configuration
// is returned with a nil error, the next function will be called.
// NOTE: This function will not join configurations, as soon as a non-empty
// configuration is returned from a configuration function, it will be returned
// to the caller.
func Registries(registries ...RegistryHosts) RegistryHosts {
return func(host string) ([]RegistryHost, error) {
for _, registry := range registries {
config, err := registry(host)
if err != nil {
return config, err
}
if len(config) > 0 {
return config, nil
}
}
return nil, nil
}
}
type registryOpts struct {
authorizer Authorizer
plainHTTP func(string) (bool, error)
host func(string) (string, error)
client *http.Client
}
// RegistryOpt defines a registry default option
type RegistryOpt func(*registryOpts)
// WithPlainHTTP configures registries to use plaintext http scheme
// for the provided host match function.
func WithPlainHTTP(f func(string) (bool, error)) RegistryOpt {
return func(opts *registryOpts) {
opts.plainHTTP = f
}
}
// WithAuthorizer configures the default authorizer for a registry
func WithAuthorizer(a Authorizer) RegistryOpt {
return func(opts *registryOpts) {
opts.authorizer = a
}
}
// WithHostTranslator defines the default translator to use for registry hosts
func WithHostTranslator(h func(string) (string, error)) RegistryOpt {
return func(opts *registryOpts) {
opts.host = h
}
}
// WithClient configures the default http client for a registry
func WithClient(c *http.Client) RegistryOpt {
return func(opts *registryOpts) {
opts.client = c
}
}
// ConfigureDefaultRegistries is used to create a default configuration for
// registries. For more advanced configurations or per-domain setups,
// the RegistryHosts interface should be used directly.
// NOTE: This function will always return a non-empty value or error
func ConfigureDefaultRegistries(ropts ...RegistryOpt) RegistryHosts {
var opts registryOpts
for _, opt := range ropts {
opt(&opts)
}
return func(host string) ([]RegistryHost, error) {
config := RegistryHost{
Client: opts.client,
Authorizer: opts.authorizer,
Host: host,
Scheme: "https",
Path: "/v2",
Capabilities: HostCapabilityPull | HostCapabilityResolve | HostCapabilityPush,
}
if config.Client == nil {
config.Client = http.DefaultClient
}
if opts.plainHTTP != nil {
match, err := opts.plainHTTP(host)
if err != nil {
return nil, err
}
if match {
config.Scheme = "http"
}
}
if opts.host != nil {
var err error
config.Host, err = opts.host(config.Host)
if err != nil {
return nil, err
}
} else if host == "docker.io" {
config.Host = "registry-1.docker.io"
}
return []RegistryHost{config}, nil
}
}
// MatchAllHosts is a host match function which is always true.
func MatchAllHosts(string) (bool, error) {
return true, nil
}
// MatchLocalhost is a host match function which returns true for
// localhost.
func MatchLocalhost(host string) (bool, error) {
for _, s := range []string{"localhost", "127.0.0.1", "[::1]"} {
if len(host) >= len(s) && host[0:len(s)] == s && (len(host) == len(s) || host[len(s)] == ':') {
return true, nil
}
}
return host == "::1", nil
}

528
vendor/github.com/containerd/containerd/remotes/docker/resolver.go сгенерированный поставляемый
Просмотреть файл

@ -18,9 +18,10 @@ package docker
import ( import (
"context" "context"
"fmt"
"io" "io"
"io/ioutil"
"net/http" "net/http"
"net/url"
"path" "path"
"strings" "strings"
@ -46,6 +47,19 @@ var (
// ErrInvalidAuthorization is used when credentials are passed to a server but // ErrInvalidAuthorization is used when credentials are passed to a server but
// those credentials are rejected. // those credentials are rejected.
ErrInvalidAuthorization = errors.New("authorization failed") ErrInvalidAuthorization = errors.New("authorization failed")
// MaxManifestSize represents the largest size accepted from a registry
// during resolution. Larger manifests may be accepted using a
// resolution method other than the registry.
//
// NOTE: The max supported layers by some runtimes is 128 and individual
// layers will not contribute more than 256 bytes, making a
// reasonable limit for a large image manifests of 32K bytes.
// 4M bytes represents a much larger upper bound for images which may
// contain large annotations or be non-images. A proper manifest
// design puts large metadata in subobjects, as is consistent the
// intent of the manifest design.
MaxManifestSize int64 = 4 * 1048 * 1048
) )
// Authorizer is used to authorize HTTP requests based on 401 HTTP responses. // Authorizer is used to authorize HTTP requests based on 401 HTTP responses.
@ -72,31 +86,38 @@ type Authorizer interface {
// ResolverOptions are used to configured a new Docker register resolver // ResolverOptions are used to configured a new Docker register resolver
type ResolverOptions struct { type ResolverOptions struct {
// Authorizer is used to authorize registry requests // Hosts returns registry host configurations for a namespace.
Authorizer Authorizer Hosts RegistryHosts
// Credentials provides username and secret given a host.
// If username is empty but a secret is given, that secret
// is interpreted as a long lived token.
// Deprecated: use Authorizer
Credentials func(string) (string, string, error)
// Host provides the hostname given a namespace.
Host func(string) (string, error)
// Headers are the HTTP request header fields sent by the resolver // Headers are the HTTP request header fields sent by the resolver
Headers http.Header Headers http.Header
// PlainHTTP specifies to use plain http and not https
PlainHTTP bool
// Client is the http client to used when making registry requests
Client *http.Client
// Tracker is used to track uploads to the registry. This is used // Tracker is used to track uploads to the registry. This is used
// since the registry does not have upload tracking and the existing // since the registry does not have upload tracking and the existing
// mechanism for getting blob upload status is expensive. // mechanism for getting blob upload status is expensive.
Tracker StatusTracker Tracker StatusTracker
// Authorizer is used to authorize registry requests
// Deprecated: use Hosts
Authorizer Authorizer
// Credentials provides username and secret given a host.
// If username is empty but a secret is given, that secret
// is interpreted as a long lived token.
// Deprecated: use Hosts
Credentials func(string) (string, string, error)
// Host provides the hostname given a namespace.
// Deprecated: use Hosts
Host func(string) (string, error)
// PlainHTTP specifies to use plain http and not https
// Deprecated: use Hosts
PlainHTTP bool
// Client is the http client to used when making registry requests
// Deprecated: use Hosts
Client *http.Client
} }
// DefaultHost is the default host function. // DefaultHost is the default host function.
@ -108,13 +129,10 @@ func DefaultHost(ns string) (string, error) {
} }
type dockerResolver struct { type dockerResolver struct {
auth Authorizer hosts RegistryHosts
host func(string) (string, error) header http.Header
headers http.Header resolveHeader http.Header
uagent string tracker StatusTracker
plainHTTP bool
client *http.Client
tracker StatusTracker
} }
// NewResolver returns a new resolver to a Docker registry // NewResolver returns a new resolver to a Docker registry
@ -122,39 +140,56 @@ func NewResolver(options ResolverOptions) remotes.Resolver {
if options.Tracker == nil { if options.Tracker == nil {
options.Tracker = NewInMemoryTracker() options.Tracker = NewInMemoryTracker()
} }
if options.Host == nil {
options.Host = DefaultHost
}
if options.Headers == nil { if options.Headers == nil {
options.Headers = make(http.Header) options.Headers = make(http.Header)
} }
if _, ok := options.Headers["User-Agent"]; !ok {
options.Headers.Set("User-Agent", "containerd/"+version.Version)
}
resolveHeader := http.Header{}
if _, ok := options.Headers["Accept"]; !ok { if _, ok := options.Headers["Accept"]; !ok {
// set headers for all the types we support for resolution. // set headers for all the types we support for resolution.
options.Headers.Set("Accept", strings.Join([]string{ resolveHeader.Set("Accept", strings.Join([]string{
images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2Manifest,
images.MediaTypeDockerSchema2ManifestList, images.MediaTypeDockerSchema2ManifestList,
ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageManifest,
ocispec.MediaTypeImageIndex, "*"}, ", ")) ocispec.MediaTypeImageIndex, "*/*"}, ", "))
}
ua := options.Headers.Get("User-Agent")
if ua != "" {
options.Headers.Del("User-Agent")
} else { } else {
ua = "containerd/" + version.Version resolveHeader["Accept"] = options.Headers["Accept"]
delete(options.Headers, "Accept")
} }
if options.Authorizer == nil { if options.Hosts == nil {
options.Authorizer = NewAuthorizer(options.Client, options.Credentials) opts := []RegistryOpt{}
options.Authorizer.(*dockerAuthorizer).ua = ua if options.Host != nil {
opts = append(opts, WithHostTranslator(options.Host))
}
if options.Authorizer == nil {
options.Authorizer = NewDockerAuthorizer(
WithAuthClient(options.Client),
WithAuthHeader(options.Headers),
WithAuthCreds(options.Credentials))
}
opts = append(opts, WithAuthorizer(options.Authorizer))
if options.Client != nil {
opts = append(opts, WithClient(options.Client))
}
if options.PlainHTTP {
opts = append(opts, WithPlainHTTP(MatchAllHosts))
} else {
opts = append(opts, WithPlainHTTP(MatchLocalhost))
}
options.Hosts = ConfigureDefaultRegistries(opts...)
} }
return &dockerResolver{ return &dockerResolver{
auth: options.Authorizer, hosts: options.Hosts,
host: options.Host, header: options.Headers,
headers: options.Headers, resolveHeader: resolveHeader,
uagent: ua, tracker: options.Tracker,
plainHTTP: options.PlainHTTP,
client: options.Client,
tracker: options.Tracker,
} }
} }
@ -201,13 +236,11 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
return "", ocispec.Descriptor{}, err return "", ocispec.Descriptor{}, err
} }
fetcher := dockerFetcher{
dockerBase: base,
}
var ( var (
urls []string lastErr error
dgst = refspec.Digest() paths [][]string
dgst = refspec.Digest()
caps = HostCapabilityPull
) )
if dgst != "" { if dgst != "" {
@ -218,100 +251,130 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
} }
// turns out, we have a valid digest, make a url. // turns out, we have a valid digest, make a url.
urls = append(urls, fetcher.url("manifests", dgst.String())) paths = append(paths, []string{"manifests", dgst.String()})
// fallback to blobs on not found. // fallback to blobs on not found.
urls = append(urls, fetcher.url("blobs", dgst.String())) paths = append(paths, []string{"blobs", dgst.String()})
} else { } else {
urls = append(urls, fetcher.url("manifests", refspec.Object)) // Add
paths = append(paths, []string{"manifests", refspec.Object})
caps |= HostCapabilityResolve
}
hosts := base.filterHosts(caps)
if len(hosts) == 0 {
return "", ocispec.Descriptor{}, errors.Wrap(errdefs.ErrNotFound, "no resolve hosts")
} }
ctx, err = contextWithRepositoryScope(ctx, refspec, false) ctx, err = contextWithRepositoryScope(ctx, refspec, false)
if err != nil { if err != nil {
return "", ocispec.Descriptor{}, err return "", ocispec.Descriptor{}, err
} }
for _, u := range urls {
req, err := http.NewRequest(http.MethodHead, u, nil)
if err != nil {
return "", ocispec.Descriptor{}, err
}
req.Header = r.headers for _, u := range paths {
for _, host := range hosts {
ctx := log.WithLogger(ctx, log.G(ctx).WithField("host", host.Host))
log.G(ctx).Debug("resolving") req := base.request(host, http.MethodHead, u...)
resp, err := fetcher.doRequestWithRetries(ctx, req, nil) for key, value := range r.resolveHeader {
if err != nil { req.header[key] = append(req.header[key], value...)
if errors.Cause(err) == ErrInvalidAuthorization {
err = errors.Wrapf(err, "pull access denied, repository does not exist or may require authorization")
} }
return "", ocispec.Descriptor{}, err
}
resp.Body.Close() // don't care about body contents.
if resp.StatusCode > 299 { log.G(ctx).Debug("resolving")
if resp.StatusCode == http.StatusNotFound { resp, err := req.doWithRetries(ctx, nil)
if err != nil {
if errors.Cause(err) == ErrInvalidAuthorization {
err = errors.Wrapf(err, "pull access denied, repository does not exist or may require authorization")
}
return "", ocispec.Descriptor{}, err
}
resp.Body.Close() // don't care about body contents.
if resp.StatusCode > 299 {
if resp.StatusCode == http.StatusNotFound {
continue
}
return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
}
size := resp.ContentLength
contentType := getManifestMediaType(resp)
// if no digest was provided, then only a resolve
// trusted registry was contacted, in this case use
// the digest header (or content from GET)
if dgst == "" {
// this is the only point at which we trust the registry. we use the
// content headers to assemble a descriptor for the name. when this becomes
// more robust, we mostly get this information from a secure trust store.
dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
if dgstHeader != "" && size != -1 {
if err := dgstHeader.Validate(); err != nil {
return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader)
}
dgst = dgstHeader
}
}
if dgst == "" || size == -1 {
log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead")
req = base.request(host, http.MethodGet, u...)
for key, value := range r.resolveHeader {
req.header[key] = append(req.header[key], value...)
}
resp, err := req.doWithRetries(ctx, nil)
if err != nil {
return "", ocispec.Descriptor{}, err
}
defer resp.Body.Close()
bodyReader := countingReader{reader: resp.Body}
contentType = getManifestMediaType(resp)
if dgst == "" {
if contentType == images.MediaTypeDockerSchema1Manifest {
b, err := schema1.ReadStripSignature(&bodyReader)
if err != nil {
return "", ocispec.Descriptor{}, err
}
dgst = digest.FromBytes(b)
} else {
dgst, err = digest.FromReader(&bodyReader)
if err != nil {
return "", ocispec.Descriptor{}, err
}
}
} else if _, err := io.Copy(ioutil.Discard, &bodyReader); err != nil {
return "", ocispec.Descriptor{}, err
}
size = bodyReader.bytesRead
}
// Prevent resolving to excessively large manifests
if size > MaxManifestSize {
if lastErr == nil {
lastErr = errors.Wrapf(errdefs.ErrNotFound, "rejecting %d byte manifest for %s", size, ref)
}
continue continue
} }
return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
desc := ocispec.Descriptor{
Digest: dgst,
MediaType: contentType,
Size: size,
}
log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved")
return ref, desc, nil
} }
size := resp.ContentLength
// this is the only point at which we trust the registry. we use the
// content headers to assemble a descriptor for the name. when this becomes
// more robust, we mostly get this information from a secure trust store.
dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
contentType := getManifestMediaType(resp)
if dgstHeader != "" && size != -1 {
if err := dgstHeader.Validate(); err != nil {
return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader)
}
dgst = dgstHeader
} else {
log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead")
req, err := http.NewRequest(http.MethodGet, u, nil)
if err != nil {
return "", ocispec.Descriptor{}, err
}
req.Header = r.headers
resp, err := fetcher.doRequestWithRetries(ctx, req, nil)
if err != nil {
return "", ocispec.Descriptor{}, err
}
defer resp.Body.Close()
bodyReader := countingReader{reader: resp.Body}
contentType = getManifestMediaType(resp)
if contentType == images.MediaTypeDockerSchema1Manifest {
b, err := schema1.ReadStripSignature(&bodyReader)
if err != nil {
return "", ocispec.Descriptor{}, err
}
dgst = digest.FromBytes(b)
} else {
dgst, err = digest.FromReader(&bodyReader)
if err != nil {
return "", ocispec.Descriptor{}, err
}
}
size = bodyReader.bytesRead
}
desc := ocispec.Descriptor{
Digest: dgst,
MediaType: contentType,
Size: size,
}
log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved")
return ref, desc, nil
} }
return "", ocispec.Descriptor{}, errors.Errorf("%v not found", ref) if lastErr == nil {
lastErr = errors.Wrap(errdefs.ErrNotFound, ref)
}
return "", ocispec.Descriptor{}, lastErr
} }
func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) {
@ -336,13 +399,6 @@ func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher
return nil, err return nil, err
} }
// Manifests can be pushed by digest like any other object, but the passed in
// reference cannot take a digest without the associated content. A tag is allowed
// and will be used to tag pushed manifests.
if refspec.Object != "" && strings.Contains(refspec.Object, "@") {
return nil, errors.New("cannot use digest reference for push locator")
}
base, err := r.base(refspec) base, err := r.base(refspec)
if err != nil { if err != nil {
return nil, err return nil, err
@ -350,62 +406,64 @@ func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher
return dockerPusher{ return dockerPusher{
dockerBase: base, dockerBase: base,
tag: refspec.Object, object: refspec.Object,
tracker: r.tracker, tracker: r.tracker,
}, nil }, nil
} }
type dockerBase struct { type dockerBase struct {
refspec reference.Spec refspec reference.Spec
base url.URL namespace string
uagent string hosts []RegistryHost
header http.Header
client *http.Client
auth Authorizer
} }
func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) { func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) {
var (
err error
base url.URL
)
host := refspec.Hostname() host := refspec.Hostname()
base.Host = host hosts, err := r.hosts(host)
if r.host != nil { if err != nil {
base.Host, err = r.host(host) return nil, err
if err != nil {
return nil, err
}
} }
base.Scheme = "https"
if r.plainHTTP || strings.HasPrefix(base.Host, "localhost:") {
base.Scheme = "http"
}
prefix := strings.TrimPrefix(refspec.Locator, host+"/")
base.Path = path.Join("/v2", prefix)
return &dockerBase{ return &dockerBase{
refspec: refspec, refspec: refspec,
base: base, namespace: strings.TrimPrefix(refspec.Locator, host+"/"),
uagent: r.uagent, hosts: hosts,
client: r.client, header: r.header,
auth: r.auth,
}, nil }, nil
} }
func (r *dockerBase) url(ps ...string) string { func (r *dockerBase) filterHosts(caps HostCapabilities) (hosts []RegistryHost) {
url := r.base for _, host := range r.hosts {
url.Path = path.Join(url.Path, path.Join(ps...)) if host.Capabilities.Has(caps) {
return url.String() hosts = append(hosts, host)
}
}
return
} }
func (r *dockerBase) authorize(ctx context.Context, req *http.Request) error { func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *request {
header := http.Header{}
for key, value := range r.header {
header[key] = append(header[key], value...)
}
parts := append([]string{"/", host.Path, r.namespace}, ps...)
p := path.Join(parts...)
// Join strips trailing slash, re-add ending "/" if included
if len(parts) > 0 && strings.HasSuffix(parts[len(parts)-1], "/") {
p = p + "/"
}
return &request{
method: method,
path: p,
header: header,
host: host,
}
}
func (r *request) authorize(ctx context.Context, req *http.Request) error {
// Check if has header for host // Check if has header for host
if r.auth != nil { if r.host.Authorizer != nil {
if err := r.auth.Authorize(ctx, req); err != nil { if err := r.host.Authorizer.Authorize(ctx, req); err != nil {
return err return err
} }
} }
@ -413,83 +471,137 @@ func (r *dockerBase) authorize(ctx context.Context, req *http.Request) error {
return nil return nil
} }
func (r *dockerBase) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) { type request struct {
ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String())) method string
log.G(ctx).WithField("request.headers", req.Header).WithField("request.method", req.Method).Debug("do request") path string
req.Header.Set("User-Agent", r.uagent) header http.Header
host RegistryHost
body func() (io.ReadCloser, error)
size int64
}
func (r *request) do(ctx context.Context) (*http.Response, error) {
u := r.host.Scheme + "://" + r.host.Host + r.path
req, err := http.NewRequest(r.method, u, nil)
if err != nil {
return nil, err
}
req.Header = r.header
if r.body != nil {
body, err := r.body()
if err != nil {
return nil, err
}
req.Body = body
req.GetBody = r.body
if r.size > 0 {
req.ContentLength = r.size
}
}
ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u))
log.G(ctx).WithFields(requestFields(req)).Debug("do request")
if err := r.authorize(ctx, req); err != nil { if err := r.authorize(ctx, req); err != nil {
return nil, errors.Wrap(err, "failed to authorize") return nil, errors.Wrap(err, "failed to authorize")
} }
resp, err := ctxhttp.Do(ctx, r.client, req) resp, err := ctxhttp.Do(ctx, r.host.Client, req)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "failed to do request") return nil, errors.Wrap(err, "failed to do request")
} }
log.G(ctx).WithFields(logrus.Fields{ log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received")
"status": resp.Status,
"response.headers": resp.Header,
}).Debug("fetch response received")
return resp, nil return resp, nil
} }
func (r *dockerBase) doRequestWithRetries(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Response, error) { func (r *request) doWithRetries(ctx context.Context, responses []*http.Response) (*http.Response, error) {
resp, err := r.doRequest(ctx, req) resp, err := r.do(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
responses = append(responses, resp) responses = append(responses, resp)
req, err = r.retryRequest(ctx, req, responses) retry, err := r.retryRequest(ctx, responses)
if err != nil { if err != nil {
resp.Body.Close() resp.Body.Close()
return nil, err return nil, err
} }
if req != nil { if retry {
resp.Body.Close() resp.Body.Close()
return r.doRequestWithRetries(ctx, req, responses) return r.doWithRetries(ctx, responses)
} }
return resp, err return resp, err
} }
func (r *dockerBase) retryRequest(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Request, error) { func (r *request) retryRequest(ctx context.Context, responses []*http.Response) (bool, error) {
if len(responses) > 5 { if len(responses) > 5 {
return nil, nil return false, nil
} }
last := responses[len(responses)-1] last := responses[len(responses)-1]
switch last.StatusCode { switch last.StatusCode {
case http.StatusUnauthorized: case http.StatusUnauthorized:
log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized") log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized")
if r.auth != nil { if r.host.Authorizer != nil {
if err := r.auth.AddResponses(ctx, responses); err == nil { if err := r.host.Authorizer.AddResponses(ctx, responses); err == nil {
return copyRequest(req) return true, nil
} else if !errdefs.IsNotImplemented(err) { } else if !errdefs.IsNotImplemented(err) {
return nil, err return false, err
} }
} }
return nil, nil
return false, nil
case http.StatusMethodNotAllowed: case http.StatusMethodNotAllowed:
// Support registries which have not properly implemented the HEAD method for // Support registries which have not properly implemented the HEAD method for
// manifests endpoint // manifests endpoint
if req.Method == http.MethodHead && strings.Contains(req.URL.Path, "/manifests/") { if r.method == http.MethodHead && strings.Contains(r.path, "/manifests/") {
// TODO: copy request? r.method = http.MethodGet
req.Method = http.MethodGet return true, nil
return copyRequest(req)
} }
case http.StatusRequestTimeout, http.StatusTooManyRequests: case http.StatusRequestTimeout, http.StatusTooManyRequests:
return copyRequest(req) return true, nil
} }
// TODO: Handle 50x errors accounting for attempt history // TODO: Handle 50x errors accounting for attempt history
return nil, nil return false, nil
} }
func copyRequest(req *http.Request) (*http.Request, error) { func (r *request) String() string {
ireq := *req return r.host.Scheme + "://" + r.host.Host + r.path
if ireq.GetBody != nil { }
var err error
ireq.Body, err = ireq.GetBody() func requestFields(req *http.Request) logrus.Fields {
if err != nil { fields := map[string]interface{}{
return nil, err "request.method": req.Method,
}
for k, vals := range req.Header {
k = strings.ToLower(k)
if k == "authorization" {
continue
}
for i, v := range vals {
field := "request.header." + k
if i > 0 {
field = fmt.Sprintf("%s.%d", field, i)
}
fields[field] = v
} }
} }
return &ireq, nil
return logrus.Fields(fields)
}
func responseFields(resp *http.Response) logrus.Fields {
fields := map[string]interface{}{
"response.status": resp.Status,
}
for k, vals := range resp.Header {
k = strings.ToLower(k)
for i, v := range vals {
field := "response.header." + k
if i > 0 {
field = fmt.Sprintf("%s.%d", field, i)
}
fields[field] = v
}
}
return logrus.Fields(fields)
} }

22
vendor/github.com/containerd/containerd/remotes/docker/scope.go сгенерированный поставляемый
Просмотреть файл

@ -51,19 +51,25 @@ func contextWithRepositoryScope(ctx context.Context, refspec reference.Spec, pus
if err != nil { if err != nil {
return nil, err return nil, err
} }
return context.WithValue(ctx, tokenScopesKey{}, []string{s}), nil return WithScope(ctx, s), nil
}
// WithScope appends a custom registry auth scope to the context.
func WithScope(ctx context.Context, scope string) context.Context {
var scopes []string
if v := ctx.Value(tokenScopesKey{}); v != nil {
scopes = v.([]string)
scopes = append(scopes, scope)
} else {
scopes = []string{scope}
}
return context.WithValue(ctx, tokenScopesKey{}, scopes)
} }
// contextWithAppendPullRepositoryScope is used to append repository pull // contextWithAppendPullRepositoryScope is used to append repository pull
// scope into existing scopes indexed by the tokenScopesKey{}. // scope into existing scopes indexed by the tokenScopesKey{}.
func contextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context { func contextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context {
var scopes []string return WithScope(ctx, fmt.Sprintf("repository:%s:pull", repo))
if v := ctx.Value(tokenScopesKey{}); v != nil {
scopes = append(scopes, v.([]string)...)
}
scopes = append(scopes, fmt.Sprintf("repository:%s:pull", repo))
return context.WithValue(ctx, tokenScopesKey{}, scopes)
} }
// getTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and common scopes. // getTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and common scopes.

43
vendor/github.com/containerd/containerd/remotes/handlers.go сгенерированный поставляемый
Просмотреть файл

@ -33,27 +33,46 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
type refKeyPrefix struct{}
// WithMediaTypeKeyPrefix adds a custom key prefix for a media type which is used when storing
// data in the content store from the FetchHandler.
//
// Used in `MakeRefKey` to determine what the key prefix should be.
func WithMediaTypeKeyPrefix(ctx context.Context, mediaType, prefix string) context.Context {
var values map[string]string
if v := ctx.Value(refKeyPrefix{}); v != nil {
values = v.(map[string]string)
} else {
values = make(map[string]string)
}
values[mediaType] = prefix
return context.WithValue(ctx, refKeyPrefix{}, values)
}
// MakeRefKey returns a unique reference for the descriptor. This reference can be // MakeRefKey returns a unique reference for the descriptor. This reference can be
// used to lookup ongoing processes related to the descriptor. This function // used to lookup ongoing processes related to the descriptor. This function
// may look to the context to namespace the reference appropriately. // may look to the context to namespace the reference appropriately.
func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string { func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string {
// TODO(stevvooe): Need better remote key selection here. Should be a if v := ctx.Value(refKeyPrefix{}); v != nil {
// product of the context, which may include information about the ongoing values := v.(map[string]string)
// fetch process. if prefix := values[desc.MediaType]; prefix != "" {
switch desc.MediaType { return prefix + "-" + desc.Digest.String()
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: }
}
switch mt := desc.MediaType; {
case mt == images.MediaTypeDockerSchema2Manifest || mt == ocispec.MediaTypeImageManifest:
return "manifest-" + desc.Digest.String() return "manifest-" + desc.Digest.String()
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: case mt == images.MediaTypeDockerSchema2ManifestList || mt == ocispec.MediaTypeImageIndex:
return "index-" + desc.Digest.String() return "index-" + desc.Digest.String()
case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, case images.IsLayerType(mt):
images.MediaTypeDockerSchema2LayerForeign, images.MediaTypeDockerSchema2LayerForeignGzip,
ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip,
ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip:
return "layer-" + desc.Digest.String() return "layer-" + desc.Digest.String()
case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: case images.IsKnownConfig(mt):
return "config-" + desc.Digest.String() return "config-" + desc.Digest.String()
default: default:
log.G(ctx).Warnf("reference for unknown type: %s", desc.MediaType) log.G(ctx).Warnf("reference for unknown type: %s", mt)
return "unknown-" + desc.Digest.String() return "unknown-" + desc.Digest.String()
} }
} }

26
vendor/github.com/containerd/containerd/rootfs/apply.go сгенерированный поставляемый
Просмотреть файл

@ -48,6 +48,14 @@ type Layer struct {
// Layers are applied in order they are given, making the first layer the // Layers are applied in order they are given, making the first layer the
// bottom-most layer in the layer chain. // bottom-most layer in the layer chain.
func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Applier) (digest.Digest, error) { func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Applier) (digest.Digest, error) {
return ApplyLayersWithOpts(ctx, layers, sn, a, nil)
}
// ApplyLayersWithOpts applies all the layers using the given snapshotter, applier, and apply opts.
// The returned result is a chain id digest representing all the applied layers.
// Layers are applied in order they are given, making the first layer the
// bottom-most layer in the layer chain.
func ApplyLayersWithOpts(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Applier, applyOpts []diff.ApplyOpt) (digest.Digest, error) {
chain := make([]digest.Digest, len(layers)) chain := make([]digest.Digest, len(layers))
for i, layer := range layers { for i, layer := range layers {
chain[i] = layer.Diff.Digest chain[i] = layer.Diff.Digest
@ -63,7 +71,7 @@ func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter,
return "", errors.Wrapf(err, "failed to stat snapshot %s", chainID) return "", errors.Wrapf(err, "failed to stat snapshot %s", chainID)
} }
if err := applyLayers(ctx, layers, chain, sn, a); err != nil && !errdefs.IsAlreadyExists(err) { if err := applyLayers(ctx, layers, chain, sn, a, nil, applyOpts); err != nil && !errdefs.IsAlreadyExists(err) {
return "", err return "", err
} }
} }
@ -75,6 +83,13 @@ func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter,
// using the provided snapshotter and applier. If the layer was unpacked true // using the provided snapshotter and applier. If the layer was unpacked true
// is returned, if the layer already exists false is returned. // is returned, if the layer already exists false is returned.
func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts ...snapshots.Opt) (bool, error) { func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts ...snapshots.Opt) (bool, error) {
return ApplyLayerWithOpts(ctx, layer, chain, sn, a, opts, nil)
}
// ApplyLayerWithOpts applies a single layer on top of the given provided layer chain,
// using the provided snapshotter, applier, and apply opts. If the layer was unpacked true
// is returned, if the layer already exists false is returned.
func ApplyLayerWithOpts(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts []snapshots.Opt, applyOpts []diff.ApplyOpt) (bool, error) {
var ( var (
chainID = identity.ChainID(append(chain, layer.Diff.Digest)).String() chainID = identity.ChainID(append(chain, layer.Diff.Digest)).String()
applied bool applied bool
@ -84,7 +99,7 @@ func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snap
return false, errors.Wrapf(err, "failed to stat snapshot %s", chainID) return false, errors.Wrapf(err, "failed to stat snapshot %s", chainID)
} }
if err := applyLayers(ctx, []Layer{layer}, append(chain, layer.Diff.Digest), sn, a, opts...); err != nil { if err := applyLayers(ctx, []Layer{layer}, append(chain, layer.Diff.Digest), sn, a, opts, applyOpts); err != nil {
if !errdefs.IsAlreadyExists(err) { if !errdefs.IsAlreadyExists(err) {
return false, err return false, err
} }
@ -93,9 +108,10 @@ func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snap
} }
} }
return applied, nil return applied, nil
} }
func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts ...snapshots.Opt) error { func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts []snapshots.Opt, applyOpts []diff.ApplyOpt) error {
var ( var (
parent = identity.ChainID(chain[:len(chain)-1]) parent = identity.ChainID(chain[:len(chain)-1])
chainID = identity.ChainID(chain) chainID = identity.ChainID(chain)
@ -113,7 +129,7 @@ func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn
mounts, err = sn.Prepare(ctx, key, parent.String(), opts...) mounts, err = sn.Prepare(ctx, key, parent.String(), opts...)
if err != nil { if err != nil {
if errdefs.IsNotFound(err) && len(layers) > 1 { if errdefs.IsNotFound(err) && len(layers) > 1 {
if err := applyLayers(ctx, layers[:len(layers)-1], chain[:len(chain)-1], sn, a); err != nil { if err := applyLayers(ctx, layers[:len(layers)-1], chain[:len(chain)-1], sn, a, nil, applyOpts); err != nil {
if !errdefs.IsAlreadyExists(err) { if !errdefs.IsAlreadyExists(err) {
return err return err
} }
@ -144,7 +160,7 @@ func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn
} }
}() }()
diff, err = a.Apply(ctx, layer.Blob, mounts) diff, err = a.Apply(ctx, layer.Blob, mounts, applyOpts...)
if err != nil { if err != nil {
err = errors.Wrapf(err, "failed to extract layer %s", layer.Diff.Digest) err = errors.Wrapf(err, "failed to extract layer %s", layer.Diff.Digest)
return err return err

12
vendor/github.com/containerd/containerd/rootfs/diff.go сгенерированный поставляемый
Просмотреть файл

@ -22,6 +22,7 @@ import (
"github.com/containerd/containerd/diff" "github.com/containerd/containerd/diff"
"github.com/containerd/containerd/mount" "github.com/containerd/containerd/mount"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/snapshots" "github.com/containerd/containerd/snapshots"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
) )
@ -31,6 +32,13 @@ import (
// the content creation and the provided snapshotter and mount differ are used // the content creation and the provided snapshotter and mount differ are used
// for calculating the diff. The descriptor for the layer diff is returned. // for calculating the diff. The descriptor for the layer diff is returned.
func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter, d diff.Comparer, opts ...diff.Opt) (ocispec.Descriptor, error) { func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter, d diff.Comparer, opts ...diff.Opt) (ocispec.Descriptor, error) {
// dctx is used to handle cleanup things just in case the param ctx
// has been canceled, which causes that the defer cleanup fails.
dctx := context.Background()
if ns, ok := namespaces.Namespace(ctx); ok {
dctx = namespaces.WithNamespace(dctx, ns)
}
info, err := sn.Stat(ctx, snapshotID) info, err := sn.Stat(ctx, snapshotID)
if err != nil { if err != nil {
return ocispec.Descriptor{}, err return ocispec.Descriptor{}, err
@ -41,7 +49,7 @@ func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter
if err != nil { if err != nil {
return ocispec.Descriptor{}, err return ocispec.Descriptor{}, err
} }
defer sn.Remove(ctx, lowerKey) defer sn.Remove(dctx, lowerKey)
var upper []mount.Mount var upper []mount.Mount
if info.Kind == snapshots.KindActive { if info.Kind == snapshots.KindActive {
@ -55,7 +63,7 @@ func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter
if err != nil { if err != nil {
return ocispec.Descriptor{}, err return ocispec.Descriptor{}, err
} }
defer sn.Remove(ctx, upperKey) defer sn.Remove(dctx, upperKey)
} }
return d.Compare(ctx, lower, upper, opts...) return d.Compare(ctx, lower, upper, opts...)

243
vendor/github.com/containerd/containerd/unpacker.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,243 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package containerd
import (
"context"
"encoding/json"
"fmt"
"sync"
"sync/atomic"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/rootfs"
"github.com/opencontainers/go-digest"
"github.com/opencontainers/image-spec/identity"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
type layerState struct {
layer rootfs.Layer
downloaded bool
unpacked bool
}
type unpacker struct {
updateCh chan ocispec.Descriptor
snapshotter string
config UnpackConfig
c *Client
}
func (c *Client) newUnpacker(ctx context.Context, rCtx *RemoteContext) (*unpacker, error) {
snapshotter, err := c.resolveSnapshotterName(ctx, rCtx.Snapshotter)
if err != nil {
return nil, err
}
var config UnpackConfig
for _, o := range rCtx.UnpackOpts {
if err := o(ctx, &config); err != nil {
return nil, err
}
}
return &unpacker{
updateCh: make(chan ocispec.Descriptor, 128),
snapshotter: snapshotter,
config: config,
c: c,
}, nil
}
func (u *unpacker) unpack(ctx context.Context, config ocispec.Descriptor, layers []ocispec.Descriptor) error {
p, err := content.ReadBlob(ctx, u.c.ContentStore(), config)
if err != nil {
return err
}
var i ocispec.Image
if err := json.Unmarshal(p, &i); err != nil {
return errors.Wrap(err, "unmarshal image config")
}
diffIDs := i.RootFS.DiffIDs
if len(layers) != len(diffIDs) {
return errors.Errorf("number of layers and diffIDs don't match: %d != %d", len(layers), len(diffIDs))
}
var (
sn = u.c.SnapshotService(u.snapshotter)
a = u.c.DiffService()
cs = u.c.ContentStore()
states []layerState
chain []digest.Digest
)
for i, desc := range layers {
states = append(states, layerState{
layer: rootfs.Layer{
Blob: desc,
Diff: ocispec.Descriptor{
MediaType: ocispec.MediaTypeImageLayer,
Digest: diffIDs[i],
},
},
})
}
for {
var layer ocispec.Descriptor
select {
case layer = <-u.updateCh:
case <-ctx.Done():
return ctx.Err()
}
log.G(ctx).WithField("desc", layer).Debug("layer downloaded")
for i := range states {
if states[i].layer.Blob.Digest != layer.Digest {
continue
}
// Different layers may have the same digest. When that
// happens, we should continue marking the next layer
// as downloaded.
if states[i].downloaded {
continue
}
states[i].downloaded = true
break
}
for i := range states {
if !states[i].downloaded {
break
}
if states[i].unpacked {
continue
}
log.G(ctx).WithFields(logrus.Fields{
"desc": states[i].layer.Blob,
"diff": states[i].layer.Diff,
}).Debug("unpack layer")
unpacked, err := rootfs.ApplyLayerWithOpts(ctx, states[i].layer, chain, sn, a,
u.config.SnapshotOpts, u.config.ApplyOpts)
if err != nil {
return err
}
if unpacked {
// Set the uncompressed label after the uncompressed
// digest has been verified through apply.
cinfo := content.Info{
Digest: states[i].layer.Blob.Digest,
Labels: map[string]string{
"containerd.io/uncompressed": states[i].layer.Diff.Digest.String(),
},
}
if _, err := cs.Update(ctx, cinfo, "labels.containerd.io/uncompressed"); err != nil {
return err
}
}
chain = append(chain, states[i].layer.Diff.Digest)
states[i].unpacked = true
log.G(ctx).WithFields(logrus.Fields{
"desc": states[i].layer.Blob,
"diff": states[i].layer.Diff,
}).Debug("layer unpacked")
}
// Check whether all layers are unpacked.
if states[len(states)-1].unpacked {
break
}
}
chainID := identity.ChainID(chain).String()
cinfo := content.Info{
Digest: config.Digest,
Labels: map[string]string{
fmt.Sprintf("containerd.io/gc.ref.snapshot.%s", u.snapshotter): chainID,
},
}
_, err = cs.Update(ctx, cinfo, fmt.Sprintf("labels.containerd.io/gc.ref.snapshot.%s", u.snapshotter))
if err != nil {
return err
}
log.G(ctx).WithFields(logrus.Fields{
"config": config.Digest,
"chainID": chainID,
}).Debug("image unpacked")
return nil
}
func (u *unpacker) handlerWrapper(uctx context.Context, unpacks *int32) (func(images.Handler) images.Handler, *errgroup.Group) {
eg, uctx := errgroup.WithContext(uctx)
return func(f images.Handler) images.Handler {
var (
lock sync.Mutex
layers []ocispec.Descriptor
schema1 bool
)
return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
children, err := f.Handle(ctx, desc)
if err != nil {
return children, err
}
// `Pull` only supports one platform, so there is only
// one manifest to handle, and manifest list can be
// safely skipped.
// TODO: support multi-platform unpack.
switch mt := desc.MediaType; {
case mt == images.MediaTypeDockerSchema1Manifest:
lock.Lock()
schema1 = true
lock.Unlock()
case mt == images.MediaTypeDockerSchema2Manifest || mt == ocispec.MediaTypeImageManifest:
lock.Lock()
for _, child := range children {
if child.MediaType == images.MediaTypeDockerSchema2Config ||
child.MediaType == ocispec.MediaTypeImageConfig {
continue
}
layers = append(layers, child)
}
lock.Unlock()
case mt == images.MediaTypeDockerSchema2Config || mt == ocispec.MediaTypeImageConfig:
lock.Lock()
l := append([]ocispec.Descriptor{}, layers...)
lock.Unlock()
if len(l) > 0 {
atomic.AddInt32(unpacks, 1)
eg.Go(func() error {
return u.unpack(uctx, desc, l)
})
}
case images.IsLayerType(mt):
lock.Lock()
update := !schema1
lock.Unlock()
if update {
u.updateCh <- desc
}
}
return children, nil
})
}, eg
}

2
vendor/github.com/containerd/containerd/version/version.go сгенерированный поставляемый
Просмотреть файл

@ -21,7 +21,7 @@ var (
Package = "github.com/containerd/containerd" Package = "github.com/containerd/containerd"
// Version holds the complete version number. Filled in at linking time. // Version holds the complete version number. Filled in at linking time.
Version = "1.2.0+unknown" Version = "1.3.0+unknown"
// Revision is filled with the VCS (e.g. git) revision being used to build // Revision is filled with the VCS (e.g. git) revision being used to build
// the program at linking time. // the program at linking time.

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше