diff --git a/Gopkg.lock b/Gopkg.lock index 78a14963..6e2710f7 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -80,14 +80,6 @@ pruneopts = "NUT" revision = "3a771d992973f24aa725d07868b467d1ddfceafb" -[[projects]] - branch = "master" - digest = "1:fdd135f63857b858859a5a2e361a117622824826a78ce9d8c058144a0f89c7ba" - name = "github.com/containerd/cgroups" - packages = ["."] - pruneopts = "NUT" - revision = "d596c78861b15c27a9ac82975aca4413390b9b49" - [[projects]] branch = "master" digest = "1:da4daad2ec1737eec4ebeeed7afedb631711f96bbac0c361a17a4d0369d00c6d" @@ -97,8 +89,7 @@ revision = "0650fd9eeb50bab4fc99dceb9f2e14cf58f36e7f" [[projects]] - branch = "master" - digest = "1:4bda9967609021b43808181e8606bf60e15fd19500f9a0b2bbf74e4c6e4114c5" + digest = "1:a40540af5e9f6f578a0473156f6ddb554aba4e96de5527648cfcc4d074f3120a" name = "github.com/containerd/containerd" packages = [ ".", @@ -155,7 +146,8 @@ "version", ] pruneopts = "NUT" - revision = "f2b6c31d0fa7f5541f6a37d525ed461b294d785f" + revision = "36cf5b690dcc00ff0f34ff7799209050c3d0c59a" + version = "v1.3.0" [[projects]] branch = "master" @@ -202,14 +194,6 @@ revision = "d57e8b8d97adfc4a6c224fe116714bf1a1f3beb9" version = "v3.3.12" -[[projects]] - digest = "1:84c51760e27ff758724218a0b234a8264aa0992f49af4da50bedd682e196f996" - name = "github.com/coreos/go-systemd" - packages = ["dbus"] - pruneopts = "NUT" - revision = "e64a0ec8b42a61e2a9801dc1d0abe539dea79197" - version = "v20" - [[projects]] digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" name = "github.com/davecgh/go-spew" @@ -219,7 +203,7 @@ version = "v1.1.1" [[projects]] - digest = "1:c0a6ed57acd48a4a3ef5cfe1089fd2278827600415ab05af13444cd710881d6e" + digest = "1:e7719c3f4db444af314e43231f9bff4c4c9bcbb327faffe186bf2e026473abd9" name = "github.com/deislabs/cnab-go" packages = [ "action", @@ -232,8 +216,8 @@ "utils/crud", ] pruneopts = "NUT" - revision = "0ce7659aab8dbd37cc912ea64cb78f403534de42" - version = "v0.3.2-beta1" + revision = "a1f17dcdaaff3e06c95ed85c3a34c21c35833831" + version = "v0.7.1-beta1" [[projects]] digest = "1:fdf76ff539694e8aba8e9c60e7cc1dead88d21c9dc09e5a330104adad0887440" @@ -494,14 +478,6 @@ pruneopts = "NUT" revision = "eeefdecb41b842af6dc652aaea4026e8403e62df" -[[projects]] - digest = "1:08a552769d7194c0268ba31010f5da91878212603d11b4b551583a7586a76632" - name = "github.com/godbus/dbus" - packages = ["."] - pruneopts = "NUT" - revision = "2ff6f7ffd60f0f2410b3105864bdd12c7894f844" - version = "v5.0.1" - [[projects]] digest = "1:2dcc32af5bf38c37286c96f0077efa98007f67946b690fce98a3e07b9659bdf1" name = "github.com/gofrs/flock" diff --git a/Gopkg.toml b/Gopkg.toml index 01106779..5585b836 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -46,7 +46,7 @@ required = ["github.com/wadey/gocovmerge"] [[override]] name = "github.com/containerd/containerd" - branch = "master" + version = "v1.3.0" [[override]] name = "github.com/docker/cli" @@ -54,7 +54,7 @@ required = ["github.com/wadey/gocovmerge"] [[override]] name = "github.com/deislabs/cnab-go" - version = "v0.3.2-beta1" + version = "v0.7.1-beta1" [[constraint]] name = "github.com/sirupsen/logrus" @@ -82,7 +82,7 @@ required = ["github.com/wadey/gocovmerge"] [[override]] name = "github.com/docker/distribution" - revision = "0d3efadf0154c2b8a4e7b6621fff9809655cc580" + revision = "0d3efadf0154c2b8a4e7b6621fff9809655cc580" # version needed by containerd v1.3.0 [[override]] name = "github.com/docker/swarmkit" diff --git a/e2e/cnab_test.go b/e2e/cnab_test.go index 3bcf9476..ab5e35cb 100644 --- a/e2e/cnab_test.go +++ b/e2e/cnab_test.go @@ -81,12 +81,10 @@ func TestCnabParameters(t *testing.T) { cmd.Command = dockerCli.Command("app", "run", "--cnab-bundle-json", path.Join(testDir, "bundle.json"), "--name", "cnab-parameters", "--set", "boolParam=true", "--set", "stringParam=value", - "--set", "intParam=42", - "--set", "floatParam=3.14") + "--set", "intParam=42") result := icmd.RunCmd(cmd).Assert(t, icmd.Success) expectedOutput := `boolParam=true stringParam=value -intParam=42 -floatParam=3.14` +intParam=42` assert.Assert(t, is.Contains(result.Combined(), expectedOutput)) } diff --git a/e2e/testdata/cnab-parameters/bundle.json b/e2e/testdata/cnab-parameters/bundle.json index eebc783c..0930e393 100644 --- a/e2e/testdata/cnab-parameters/bundle.json +++ b/e2e/testdata/cnab-parameters/bundle.json @@ -40,12 +40,6 @@ "destination": { "env": "INT_PARAM" } - }, - "floatParam": { - "definition": "floatParam_type", - "destination": { - "env": "FLOAT_PARAM" - } } } } \ No newline at end of file diff --git a/e2e/testdata/cnab-parameters/cnab/app/run b/e2e/testdata/cnab-parameters/cnab/app/run index fb2ddc4f..1fbc74e3 100755 --- a/e2e/testdata/cnab-parameters/cnab/app/run +++ b/e2e/testdata/cnab-parameters/cnab/app/run @@ -9,7 +9,6 @@ case $action in echo "boolParam=$BOOL_PARAM" echo "stringParam=$STRING_PARAM" echo "intParam=$INT_PARAM" - echo "floatParam=$FLOAT_PARAM" ;; uninstall) echo "uninstall action" diff --git a/internal/cnab/driver.go b/internal/cnab/driver.go index 6090c156..e78ff560 100644 --- a/internal/cnab/driver.go +++ b/internal/cnab/driver.go @@ -110,7 +110,9 @@ func prepareDriver(dockerCli command.Cli, bindMount BindMount, stdout io.Writer) // Load any driver-specific config out of the environment. driverCfg := map[string]string{} for env := range d.Config() { - driverCfg[env] = os.Getenv(env) + if value, ok := os.LookupEnv(env); ok { + driverCfg[env] = value + } } d.SetConfig(driverCfg) diff --git a/internal/commands/image/render.go b/internal/commands/image/render.go index 0e27c555..ff107003 100644 --- a/internal/commands/image/render.go +++ b/internal/commands/image/render.go @@ -6,6 +6,8 @@ import ( "io" "os" + "github.com/deislabs/cnab-go/driver" + "github.com/deislabs/cnab-go/action" "github.com/docker/app/internal" bdl "github.com/docker/app/internal/bundle" @@ -59,13 +61,18 @@ func runRender(dockerCli command.Cli, appname string, opts renderOptions) error w = f } + cfgFunc := func(op *driver.Operation) error { + op.Out = w + return nil + } + action, installation, errBuf, err := prepareCustomAction(internal.ActionRenderName, dockerCli, appname, w, opts) if err != nil { return err } installation.Parameters[internal.ParameterRenderFormatName] = opts.formatDriver - if err := action.Run(&installation.Claim, nil, w); err != nil { + if err := action.Run(&installation.Claim, nil, cfgFunc); err != nil { return fmt.Errorf("render failed: %s\n%s", err, errBuf) } return nil diff --git a/internal/commands/remove.go b/internal/commands/remove.go index 7df1d080..f995e910 100644 --- a/internal/commands/remove.go +++ b/internal/commands/remove.go @@ -6,6 +6,7 @@ import ( "github.com/docker/app/internal/cnab" + "github.com/deislabs/cnab-go/driver" "github.com/docker/app/internal/cliopts" "github.com/deislabs/cnab-go/action" @@ -79,7 +80,11 @@ func runRemove(dockerCli command.Cli, installationName string, opts removeOption uninst := &action.Uninstall{ Driver: driverImpl, } - if err := uninst.Run(&installation.Claim, creds, os.Stdout); err != nil { + cfgFunc := func(op *driver.Operation) error { + op.Out = dockerCli.Out() + return nil + } + if err := uninst.Run(&installation.Claim, creds, cfgFunc); err != nil { if err2 := installationStore.Store(installation); err2 != nil { return fmt.Errorf("%s while %s", err2, errBuf) } diff --git a/internal/commands/run.go b/internal/commands/run.go index bfcfc63a..9a10ebae 100644 --- a/internal/commands/run.go +++ b/internal/commands/run.go @@ -4,6 +4,7 @@ import ( "fmt" "os" + "github.com/deislabs/cnab-go/driver" "github.com/docker/app/internal/cliopts" "github.com/deislabs/cnab-go/action" @@ -152,7 +153,11 @@ func runBundle(dockerCli command.Cli, bndl *bundle.Bundle, opts runOptions, ref } { defer muteDockerCli(dockerCli)() - err = inst.Run(&installation.Claim, creds, os.Stdout) + cfgFunc := func(op *driver.Operation) error { + op.Out = dockerCli.Out() + return nil + } + err = inst.Run(&installation.Claim, creds, cfgFunc) } // Even if the installation failed, the installation is persisted with its failure status, // so any installation needs a clean uninstallation. diff --git a/internal/commands/update.go b/internal/commands/update.go index c2ea3deb..12ba3f81 100644 --- a/internal/commands/update.go +++ b/internal/commands/update.go @@ -4,6 +4,8 @@ import ( "fmt" "os" + "github.com/deislabs/cnab-go/driver" + "github.com/deislabs/cnab-go/action" "github.com/deislabs/cnab-go/credentials" "github.com/docker/app/internal/bundle" @@ -85,7 +87,11 @@ func runUpdate(dockerCli command.Cli, installationName string, opts updateOption u := &action.Upgrade{ Driver: driverImpl, } - err = u.Run(&installation.Claim, creds, os.Stdout) + cfgFunc := func(op *driver.Operation) error { + op.Out = dockerCli.Out() + return nil + } + err = u.Run(&installation.Claim, creds, cfgFunc) err2 := installationStore.Store(installation) if err != nil { return fmt.Errorf("Update failed: %s\n%s", err, errBuf) diff --git a/vendor/github.com/containerd/cgroups/LICENSE b/vendor/github.com/containerd/cgroups/LICENSE deleted file mode 100644 index 261eeb9e..00000000 --- a/vendor/github.com/containerd/cgroups/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/cgroups/blkio.go b/vendor/github.com/containerd/cgroups/blkio.go deleted file mode 100644 index 875fb554..00000000 --- a/vendor/github.com/containerd/cgroups/blkio.go +++ /dev/null @@ -1,337 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func NewBlkio(root string) *blkioController { - return &blkioController{ - root: filepath.Join(root, string(Blkio)), - } -} - -type blkioController struct { - root string -} - -func (b *blkioController) Name() Name { - return Blkio -} - -func (b *blkioController) Path(path string) string { - return filepath.Join(b.root, path) -} - -func (b *blkioController) Create(path string, resources *specs.LinuxResources) error { - if err := os.MkdirAll(b.Path(path), defaultDirPerm); err != nil { - return err - } - if resources.BlockIO == nil { - return nil - } - for _, t := range createBlkioSettings(resources.BlockIO) { - if t.value != nil { - if err := ioutil.WriteFile( - filepath.Join(b.Path(path), fmt.Sprintf("blkio.%s", t.name)), - t.format(t.value), - defaultFilePerm, - ); err != nil { - return err - } - } - } - return nil -} - -func (b *blkioController) Update(path string, resources *specs.LinuxResources) error { - return b.Create(path, resources) -} - -func (b *blkioController) Stat(path string, stats *Metrics) error { - stats.Blkio = &BlkIOStat{} - settings := []blkioStatSettings{ - { - name: "throttle.io_serviced", - entry: &stats.Blkio.IoServicedRecursive, - }, - { - name: "throttle.io_service_bytes", - entry: &stats.Blkio.IoServiceBytesRecursive, - }, - } - // Try to read CFQ stats available on all CFQ enabled kernels first - if _, err := os.Lstat(filepath.Join(b.Path(path), fmt.Sprintf("blkio.io_serviced_recursive"))); err == nil { - settings = append(settings, - blkioStatSettings{ - name: "sectors_recursive", - entry: &stats.Blkio.SectorsRecursive, - }, - blkioStatSettings{ - name: "io_service_bytes_recursive", - entry: &stats.Blkio.IoServiceBytesRecursive, - }, - blkioStatSettings{ - name: "io_serviced_recursive", - entry: &stats.Blkio.IoServicedRecursive, - }, - blkioStatSettings{ - name: "io_queued_recursive", - entry: &stats.Blkio.IoQueuedRecursive, - }, - blkioStatSettings{ - name: "io_service_time_recursive", - entry: &stats.Blkio.IoServiceTimeRecursive, - }, - blkioStatSettings{ - name: "io_wait_time_recursive", - entry: &stats.Blkio.IoWaitTimeRecursive, - }, - blkioStatSettings{ - name: "io_merged_recursive", - entry: &stats.Blkio.IoMergedRecursive, - }, - blkioStatSettings{ - name: "time_recursive", - entry: &stats.Blkio.IoTimeRecursive, - }, - ) - } - f, err := os.Open("/proc/diskstats") - if err != nil { - return err - } - defer f.Close() - - devices, err := getDevices(f) - if err != nil { - return err - } - - for _, t := range settings { - if err := b.readEntry(devices, path, t.name, t.entry); err != nil { - return err - } - } - return nil -} - -func (b *blkioController) readEntry(devices map[deviceKey]string, path, name string, entry *[]*BlkIOEntry) error { - f, err := os.Open(filepath.Join(b.Path(path), fmt.Sprintf("blkio.%s", name))) - if err != nil { - return err - } - defer f.Close() - sc := bufio.NewScanner(f) - for sc.Scan() { - if err := sc.Err(); err != nil { - return err - } - // format: dev type amount - fields := strings.FieldsFunc(sc.Text(), splitBlkIOStatLine) - if len(fields) < 3 { - if len(fields) == 2 && fields[0] == "Total" { - // skip total line - continue - } else { - return fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text()) - } - } - major, err := strconv.ParseUint(fields[0], 10, 64) - if err != nil { - return err - } - minor, err := strconv.ParseUint(fields[1], 10, 64) - if err != nil { - return err - } - op := "" - valueField := 2 - if len(fields) == 4 { - op = fields[2] - valueField = 3 - } - v, err := strconv.ParseUint(fields[valueField], 10, 64) - if err != nil { - return err - } - *entry = append(*entry, &BlkIOEntry{ - Device: devices[deviceKey{major, minor}], - Major: major, - Minor: minor, - Op: op, - Value: v, - }) - } - return nil -} - -func createBlkioSettings(blkio *specs.LinuxBlockIO) []blkioSettings { - settings := []blkioSettings{} - - if blkio.Weight != nil { - settings = append(settings, - blkioSettings{ - name: "weight", - value: blkio.Weight, - format: uintf, - }) - } - if blkio.LeafWeight != nil { - settings = append(settings, - blkioSettings{ - name: "leaf_weight", - value: blkio.LeafWeight, - format: uintf, - }) - } - for _, wd := range blkio.WeightDevice { - if wd.Weight != nil { - settings = append(settings, - blkioSettings{ - name: "weight_device", - value: wd, - format: weightdev, - }) - } - if wd.LeafWeight != nil { - settings = append(settings, - blkioSettings{ - name: "leaf_weight_device", - value: wd, - format: weightleafdev, - }) - } - } - for _, t := range []struct { - name string - list []specs.LinuxThrottleDevice - }{ - { - name: "throttle.read_bps_device", - list: blkio.ThrottleReadBpsDevice, - }, - { - name: "throttle.read_iops_device", - list: blkio.ThrottleReadIOPSDevice, - }, - { - name: "throttle.write_bps_device", - list: blkio.ThrottleWriteBpsDevice, - }, - { - name: "throttle.write_iops_device", - list: blkio.ThrottleWriteIOPSDevice, - }, - } { - for _, td := range t.list { - settings = append(settings, blkioSettings{ - name: t.name, - value: td, - format: throttleddev, - }) - } - } - return settings -} - -type blkioSettings struct { - name string - value interface{} - format func(v interface{}) []byte -} - -type blkioStatSettings struct { - name string - entry *[]*BlkIOEntry -} - -func uintf(v interface{}) []byte { - return []byte(strconv.FormatUint(uint64(*v.(*uint16)), 10)) -} - -func weightdev(v interface{}) []byte { - wd := v.(specs.LinuxWeightDevice) - return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, *wd.Weight)) -} - -func weightleafdev(v interface{}) []byte { - wd := v.(specs.LinuxWeightDevice) - return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, *wd.LeafWeight)) -} - -func throttleddev(v interface{}) []byte { - td := v.(specs.LinuxThrottleDevice) - return []byte(fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate)) -} - -func splitBlkIOStatLine(r rune) bool { - return r == ' ' || r == ':' -} - -type deviceKey struct { - major, minor uint64 -} - -// getDevices makes a best effort attempt to read all the devices into a map -// keyed by major and minor number. Since devices may be mapped multiple times, -// we err on taking the first occurrence. -func getDevices(r io.Reader) (map[deviceKey]string, error) { - - var ( - s = bufio.NewScanner(r) - devices = make(map[deviceKey]string) - ) - for s.Scan() { - fields := strings.Fields(s.Text()) - major, err := strconv.Atoi(fields[0]) - if err != nil { - return nil, err - } - minor, err := strconv.Atoi(fields[1]) - if err != nil { - return nil, err - } - key := deviceKey{ - major: uint64(major), - minor: uint64(minor), - } - if _, ok := devices[key]; ok { - continue - } - devices[key] = filepath.Join("/dev", fields[2]) - } - return devices, s.Err() -} - -func major(devNumber uint64) uint64 { - return (devNumber >> 8) & 0xfff -} - -func minor(devNumber uint64) uint64 { - return (devNumber & 0xff) | ((devNumber >> 12) & 0xfff00) -} diff --git a/vendor/github.com/containerd/cgroups/cgroup.go b/vendor/github.com/containerd/cgroups/cgroup.go deleted file mode 100644 index 53866685..00000000 --- a/vendor/github.com/containerd/cgroups/cgroup.go +++ /dev/null @@ -1,532 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - - specs "github.com/opencontainers/runtime-spec/specs-go" - "github.com/pkg/errors" -) - -// New returns a new control via the cgroup cgroups interface -func New(hierarchy Hierarchy, path Path, resources *specs.LinuxResources, opts ...InitOpts) (Cgroup, error) { - config := newInitConfig() - for _, o := range opts { - if err := o(config); err != nil { - return nil, err - } - } - subsystems, err := hierarchy() - if err != nil { - return nil, err - } - var active []Subsystem - for _, s := range subsystems { - // check if subsystem exists - if err := initializeSubsystem(s, path, resources); err != nil { - if err == ErrControllerNotActive { - if config.InitCheck != nil { - if skerr := config.InitCheck(s, path, err); skerr != nil { - if skerr != ErrIgnoreSubsystem { - return nil, skerr - } - } - } - continue - } - return nil, err - } - active = append(active, s) - } - return &cgroup{ - path: path, - subsystems: active, - }, nil -} - -// Load will load an existing cgroup and allow it to be controlled -func Load(hierarchy Hierarchy, path Path, opts ...InitOpts) (Cgroup, error) { - config := newInitConfig() - for _, o := range opts { - if err := o(config); err != nil { - return nil, err - } - } - var activeSubsystems []Subsystem - subsystems, err := hierarchy() - if err != nil { - return nil, err - } - // check that the subsystems still exist, and keep only those that actually exist - for _, s := range pathers(subsystems) { - p, err := path(s.Name()) - if err != nil { - if os.IsNotExist(errors.Cause(err)) { - return nil, ErrCgroupDeleted - } - if err == ErrControllerNotActive { - if config.InitCheck != nil { - if skerr := config.InitCheck(s, path, err); skerr != nil { - if skerr != ErrIgnoreSubsystem { - return nil, skerr - } - } - } - continue - } - return nil, err - } - if _, err := os.Lstat(s.Path(p)); err != nil { - if os.IsNotExist(err) { - continue - } - return nil, err - } - activeSubsystems = append(activeSubsystems, s) - } - // if we do not have any active systems then the cgroup is deleted - if len(activeSubsystems) == 0 { - return nil, ErrCgroupDeleted - } - return &cgroup{ - path: path, - subsystems: activeSubsystems, - }, nil -} - -type cgroup struct { - path Path - - subsystems []Subsystem - mu sync.Mutex - err error -} - -// New returns a new sub cgroup -func (c *cgroup) New(name string, resources *specs.LinuxResources) (Cgroup, error) { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return nil, c.err - } - path := subPath(c.path, name) - for _, s := range c.subsystems { - if err := initializeSubsystem(s, path, resources); err != nil { - return nil, err - } - } - return &cgroup{ - path: path, - subsystems: c.subsystems, - }, nil -} - -// Subsystems returns all the subsystems that are currently being -// consumed by the group -func (c *cgroup) Subsystems() []Subsystem { - return c.subsystems -} - -// Add moves the provided process into the new cgroup -func (c *cgroup) Add(process Process) error { - if process.Pid <= 0 { - return ErrInvalidPid - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return c.err - } - return c.add(process) -} - -func (c *cgroup) add(process Process) error { - for _, s := range pathers(c.subsystems) { - p, err := c.path(s.Name()) - if err != nil { - return err - } - if err := ioutil.WriteFile( - filepath.Join(s.Path(p), cgroupProcs), - []byte(strconv.Itoa(process.Pid)), - defaultFilePerm, - ); err != nil { - return err - } - } - return nil -} - -// AddTask moves the provided tasks (threads) into the new cgroup -func (c *cgroup) AddTask(process Process) error { - if process.Pid <= 0 { - return ErrInvalidPid - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return c.err - } - return c.addTask(process) -} - -func (c *cgroup) addTask(process Process) error { - for _, s := range pathers(c.subsystems) { - p, err := c.path(s.Name()) - if err != nil { - return err - } - if err := ioutil.WriteFile( - filepath.Join(s.Path(p), cgroupTasks), - []byte(strconv.Itoa(process.Pid)), - defaultFilePerm, - ); err != nil { - return err - } - } - return nil -} - -// Delete will remove the control group from each of the subsystems registered -func (c *cgroup) Delete() error { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return c.err - } - var errors []string - for _, s := range c.subsystems { - if d, ok := s.(deleter); ok { - sp, err := c.path(s.Name()) - if err != nil { - return err - } - if err := d.Delete(sp); err != nil { - errors = append(errors, string(s.Name())) - } - continue - } - if p, ok := s.(pather); ok { - sp, err := c.path(s.Name()) - if err != nil { - return err - } - path := p.Path(sp) - if err := remove(path); err != nil { - errors = append(errors, path) - } - } - } - if len(errors) > 0 { - return fmt.Errorf("cgroups: unable to remove paths %s", strings.Join(errors, ", ")) - } - c.err = ErrCgroupDeleted - return nil -} - -// Stat returns the current metrics for the cgroup -func (c *cgroup) Stat(handlers ...ErrorHandler) (*Metrics, error) { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return nil, c.err - } - if len(handlers) == 0 { - handlers = append(handlers, errPassthrough) - } - var ( - stats = &Metrics{ - CPU: &CPUStat{ - Throttling: &Throttle{}, - Usage: &CPUUsage{}, - }, - } - wg = &sync.WaitGroup{} - errs = make(chan error, len(c.subsystems)) - ) - for _, s := range c.subsystems { - if ss, ok := s.(stater); ok { - sp, err := c.path(s.Name()) - if err != nil { - return nil, err - } - wg.Add(1) - go func() { - defer wg.Done() - if err := ss.Stat(sp, stats); err != nil { - for _, eh := range handlers { - if herr := eh(err); herr != nil { - errs <- herr - } - } - } - }() - } - } - wg.Wait() - close(errs) - for err := range errs { - return nil, err - } - return stats, nil -} - -// Update updates the cgroup with the new resource values provided -// -// Be prepared to handle EBUSY when trying to update a cgroup with -// live processes and other operations like Stats being performed at the -// same time -func (c *cgroup) Update(resources *specs.LinuxResources) error { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return c.err - } - for _, s := range c.subsystems { - if u, ok := s.(updater); ok { - sp, err := c.path(s.Name()) - if err != nil { - return err - } - if err := u.Update(sp, resources); err != nil { - return err - } - } - } - return nil -} - -// Processes returns the processes running inside the cgroup along -// with the subsystem used, pid, and path -func (c *cgroup) Processes(subsystem Name, recursive bool) ([]Process, error) { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return nil, c.err - } - return c.processes(subsystem, recursive) -} - -func (c *cgroup) processes(subsystem Name, recursive bool) ([]Process, error) { - s := c.getSubsystem(subsystem) - sp, err := c.path(subsystem) - if err != nil { - return nil, err - } - path := s.(pather).Path(sp) - var processes []Process - err = filepath.Walk(path, func(p string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !recursive && info.IsDir() { - if p == path { - return nil - } - return filepath.SkipDir - } - dir, name := filepath.Split(p) - if name != cgroupProcs { - return nil - } - procs, err := readPids(dir, subsystem) - if err != nil { - return err - } - processes = append(processes, procs...) - return nil - }) - return processes, err -} - -// Tasks returns the tasks running inside the cgroup along -// with the subsystem used, pid, and path -func (c *cgroup) Tasks(subsystem Name, recursive bool) ([]Task, error) { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return nil, c.err - } - return c.tasks(subsystem, recursive) -} - -func (c *cgroup) tasks(subsystem Name, recursive bool) ([]Task, error) { - s := c.getSubsystem(subsystem) - sp, err := c.path(subsystem) - if err != nil { - return nil, err - } - path := s.(pather).Path(sp) - var tasks []Task - err = filepath.Walk(path, func(p string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !recursive && info.IsDir() { - if p == path { - return nil - } - return filepath.SkipDir - } - dir, name := filepath.Split(p) - if name != cgroupTasks { - return nil - } - procs, err := readTasksPids(dir, subsystem) - if err != nil { - return err - } - tasks = append(tasks, procs...) - return nil - }) - return tasks, err -} - -// Freeze freezes the entire cgroup and all the processes inside it -func (c *cgroup) Freeze() error { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return c.err - } - s := c.getSubsystem(Freezer) - if s == nil { - return ErrFreezerNotSupported - } - sp, err := c.path(Freezer) - if err != nil { - return err - } - return s.(*freezerController).Freeze(sp) -} - -// Thaw thaws out the cgroup and all the processes inside it -func (c *cgroup) Thaw() error { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return c.err - } - s := c.getSubsystem(Freezer) - if s == nil { - return ErrFreezerNotSupported - } - sp, err := c.path(Freezer) - if err != nil { - return err - } - return s.(*freezerController).Thaw(sp) -} - -// OOMEventFD returns the memory cgroup's out of memory event fd that triggers -// when processes inside the cgroup receive an oom event. Returns -// ErrMemoryNotSupported if memory cgroups is not supported. -func (c *cgroup) OOMEventFD() (uintptr, error) { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return 0, c.err - } - s := c.getSubsystem(Memory) - if s == nil { - return 0, ErrMemoryNotSupported - } - sp, err := c.path(Memory) - if err != nil { - return 0, err - } - return s.(*memoryController).OOMEventFD(sp) -} - -// State returns the state of the cgroup and its processes -func (c *cgroup) State() State { - c.mu.Lock() - defer c.mu.Unlock() - c.checkExists() - if c.err != nil && c.err == ErrCgroupDeleted { - return Deleted - } - s := c.getSubsystem(Freezer) - if s == nil { - return Thawed - } - sp, err := c.path(Freezer) - if err != nil { - return Unknown - } - state, err := s.(*freezerController).state(sp) - if err != nil { - return Unknown - } - return state -} - -// MoveTo does a recursive move subsystem by subsystem of all the processes -// inside the group -func (c *cgroup) MoveTo(destination Cgroup) error { - c.mu.Lock() - defer c.mu.Unlock() - if c.err != nil { - return c.err - } - for _, s := range c.subsystems { - processes, err := c.processes(s.Name(), true) - if err != nil { - return err - } - for _, p := range processes { - if err := destination.Add(p); err != nil { - if strings.Contains(err.Error(), "no such process") { - continue - } - return err - } - } - } - return nil -} - -func (c *cgroup) getSubsystem(n Name) Subsystem { - for _, s := range c.subsystems { - if s.Name() == n { - return s - } - } - return nil -} - -func (c *cgroup) checkExists() { - for _, s := range pathers(c.subsystems) { - p, err := c.path(s.Name()) - if err != nil { - return - } - if _, err := os.Lstat(s.Path(p)); err != nil { - if os.IsNotExist(err) { - c.err = ErrCgroupDeleted - return - } - } - } -} diff --git a/vendor/github.com/containerd/cgroups/control.go b/vendor/github.com/containerd/cgroups/control.go deleted file mode 100644 index 1f62c54f..00000000 --- a/vendor/github.com/containerd/cgroups/control.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "os" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -const ( - cgroupProcs = "cgroup.procs" - cgroupTasks = "tasks" - defaultDirPerm = 0755 -) - -// defaultFilePerm is a var so that the test framework can change the filemode -// of all files created when the tests are running. The difference between the -// tests and real world use is that files like "cgroup.procs" will exist when writing -// to a read cgroup filesystem and do not exist prior when running in the tests. -// this is set to a non 0 value in the test code -var defaultFilePerm = os.FileMode(0) - -type Process struct { - // Subsystem is the name of the subsystem that the process is in - Subsystem Name - // Pid is the process id of the process - Pid int - // Path is the full path of the subsystem and location that the process is in - Path string -} - -type Task struct { - // Subsystem is the name of the subsystem that the task is in - Subsystem Name - // Pid is the process id of the task - Pid int - // Path is the full path of the subsystem and location that the task is in - Path string -} - -// Cgroup handles interactions with the individual groups to perform -// actions on them as them main interface to this cgroup package -type Cgroup interface { - // New creates a new cgroup under the calling cgroup - New(string, *specs.LinuxResources) (Cgroup, error) - // Add adds a process to the cgroup (cgroup.procs) - Add(Process) error - // AddTask adds a process to the cgroup (tasks) - AddTask(Process) error - // Delete removes the cgroup as a whole - Delete() error - // MoveTo moves all the processes under the calling cgroup to the provided one - // subsystems are moved one at a time - MoveTo(Cgroup) error - // Stat returns the stats for all subsystems in the cgroup - Stat(...ErrorHandler) (*Metrics, error) - // Update updates all the subsystems with the provided resource changes - Update(resources *specs.LinuxResources) error - // Processes returns all the processes in a select subsystem for the cgroup - Processes(Name, bool) ([]Process, error) - // Tasks returns all the tasks in a select subsystem for the cgroup - Tasks(Name, bool) ([]Task, error) - // Freeze freezes or pauses all processes inside the cgroup - Freeze() error - // Thaw thaw or resumes all processes inside the cgroup - Thaw() error - // OOMEventFD returns the memory subsystem's event fd for OOM events - OOMEventFD() (uintptr, error) - // State returns the cgroups current state - State() State - // Subsystems returns all the subsystems in the cgroup - Subsystems() []Subsystem -} diff --git a/vendor/github.com/containerd/cgroups/cpu.go b/vendor/github.com/containerd/cgroups/cpu.go deleted file mode 100644 index 431cd3e5..00000000 --- a/vendor/github.com/containerd/cgroups/cpu.go +++ /dev/null @@ -1,129 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func NewCpu(root string) *cpuController { - return &cpuController{ - root: filepath.Join(root, string(Cpu)), - } -} - -type cpuController struct { - root string -} - -func (c *cpuController) Name() Name { - return Cpu -} - -func (c *cpuController) Path(path string) string { - return filepath.Join(c.root, path) -} - -func (c *cpuController) Create(path string, resources *specs.LinuxResources) error { - if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil { - return err - } - if cpu := resources.CPU; cpu != nil { - for _, t := range []struct { - name string - ivalue *int64 - uvalue *uint64 - }{ - { - name: "rt_period_us", - uvalue: cpu.RealtimePeriod, - }, - { - name: "rt_runtime_us", - ivalue: cpu.RealtimeRuntime, - }, - { - name: "shares", - uvalue: cpu.Shares, - }, - { - name: "cfs_period_us", - uvalue: cpu.Period, - }, - { - name: "cfs_quota_us", - ivalue: cpu.Quota, - }, - } { - var value []byte - if t.uvalue != nil { - value = []byte(strconv.FormatUint(*t.uvalue, 10)) - } else if t.ivalue != nil { - value = []byte(strconv.FormatInt(*t.ivalue, 10)) - } - if value != nil { - if err := ioutil.WriteFile( - filepath.Join(c.Path(path), fmt.Sprintf("cpu.%s", t.name)), - value, - defaultFilePerm, - ); err != nil { - return err - } - } - } - } - return nil -} - -func (c *cpuController) Update(path string, resources *specs.LinuxResources) error { - return c.Create(path, resources) -} - -func (c *cpuController) Stat(path string, stats *Metrics) error { - f, err := os.Open(filepath.Join(c.Path(path), "cpu.stat")) - if err != nil { - return err - } - defer f.Close() - // get or create the cpu field because cpuacct can also set values on this struct - sc := bufio.NewScanner(f) - for sc.Scan() { - if err := sc.Err(); err != nil { - return err - } - key, v, err := parseKV(sc.Text()) - if err != nil { - return err - } - switch key { - case "nr_periods": - stats.CPU.Throttling.Periods = v - case "nr_throttled": - stats.CPU.Throttling.ThrottledPeriods = v - case "throttled_time": - stats.CPU.Throttling.ThrottledTime = v - } - } - return nil -} diff --git a/vendor/github.com/containerd/cgroups/cpuacct.go b/vendor/github.com/containerd/cgroups/cpuacct.go deleted file mode 100644 index 42a490a8..00000000 --- a/vendor/github.com/containerd/cgroups/cpuacct.go +++ /dev/null @@ -1,121 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "fmt" - "io/ioutil" - "path/filepath" - "strconv" - "strings" -) - -const nanosecondsInSecond = 1000000000 - -var clockTicks = getClockTicks() - -func NewCpuacct(root string) *cpuacctController { - return &cpuacctController{ - root: filepath.Join(root, string(Cpuacct)), - } -} - -type cpuacctController struct { - root string -} - -func (c *cpuacctController) Name() Name { - return Cpuacct -} - -func (c *cpuacctController) Path(path string) string { - return filepath.Join(c.root, path) -} - -func (c *cpuacctController) Stat(path string, stats *Metrics) error { - user, kernel, err := c.getUsage(path) - if err != nil { - return err - } - total, err := readUint(filepath.Join(c.Path(path), "cpuacct.usage")) - if err != nil { - return err - } - percpu, err := c.percpuUsage(path) - if err != nil { - return err - } - stats.CPU.Usage.Total = total - stats.CPU.Usage.User = user - stats.CPU.Usage.Kernel = kernel - stats.CPU.Usage.PerCPU = percpu - return nil -} - -func (c *cpuacctController) percpuUsage(path string) ([]uint64, error) { - var usage []uint64 - data, err := ioutil.ReadFile(filepath.Join(c.Path(path), "cpuacct.usage_percpu")) - if err != nil { - return nil, err - } - for _, v := range strings.Fields(string(data)) { - u, err := strconv.ParseUint(v, 10, 64) - if err != nil { - return nil, err - } - usage = append(usage, u) - } - return usage, nil -} - -func (c *cpuacctController) getUsage(path string) (user uint64, kernel uint64, err error) { - statPath := filepath.Join(c.Path(path), "cpuacct.stat") - data, err := ioutil.ReadFile(statPath) - if err != nil { - return 0, 0, err - } - fields := strings.Fields(string(data)) - if len(fields) != 4 { - return 0, 0, fmt.Errorf("%q is expected to have 4 fields", statPath) - } - for _, t := range []struct { - index int - name string - value *uint64 - }{ - { - index: 0, - name: "user", - value: &user, - }, - { - index: 2, - name: "system", - value: &kernel, - }, - } { - if fields[t.index] != t.name { - return 0, 0, fmt.Errorf("expected field %q but found %q in %q", t.name, fields[t.index], statPath) - } - v, err := strconv.ParseUint(fields[t.index+1], 10, 64) - if err != nil { - return 0, 0, err - } - *t.value = v - } - return (user * nanosecondsInSecond) / clockTicks, (kernel * nanosecondsInSecond) / clockTicks, nil -} diff --git a/vendor/github.com/containerd/cgroups/cpuset.go b/vendor/github.com/containerd/cgroups/cpuset.go deleted file mode 100644 index 30208515..00000000 --- a/vendor/github.com/containerd/cgroups/cpuset.go +++ /dev/null @@ -1,159 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func NewCputset(root string) *cpusetController { - return &cpusetController{ - root: filepath.Join(root, string(Cpuset)), - } -} - -type cpusetController struct { - root string -} - -func (c *cpusetController) Name() Name { - return Cpuset -} - -func (c *cpusetController) Path(path string) string { - return filepath.Join(c.root, path) -} - -func (c *cpusetController) Create(path string, resources *specs.LinuxResources) error { - if err := c.ensureParent(c.Path(path), c.root); err != nil { - return err - } - if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil { - return err - } - if err := c.copyIfNeeded(c.Path(path), filepath.Dir(c.Path(path))); err != nil { - return err - } - if resources.CPU != nil { - for _, t := range []struct { - name string - value string - }{ - { - name: "cpus", - value: resources.CPU.Cpus, - }, - { - name: "mems", - value: resources.CPU.Mems, - }, - } { - if t.value != "" { - if err := ioutil.WriteFile( - filepath.Join(c.Path(path), fmt.Sprintf("cpuset.%s", t.name)), - []byte(t.value), - defaultFilePerm, - ); err != nil { - return err - } - } - } - } - return nil -} - -func (c *cpusetController) Update(path string, resources *specs.LinuxResources) error { - return c.Create(path, resources) -} - -func (c *cpusetController) getValues(path string) (cpus []byte, mems []byte, err error) { - if cpus, err = ioutil.ReadFile(filepath.Join(path, "cpuset.cpus")); err != nil && !os.IsNotExist(err) { - return - } - if mems, err = ioutil.ReadFile(filepath.Join(path, "cpuset.mems")); err != nil && !os.IsNotExist(err) { - return - } - return cpus, mems, nil -} - -// ensureParent makes sure that the parent directory of current is created -// and populated with the proper cpus and mems files copied from -// it's parent. -func (c *cpusetController) ensureParent(current, root string) error { - parent := filepath.Dir(current) - if _, err := filepath.Rel(root, parent); err != nil { - return nil - } - // Avoid infinite recursion. - if parent == current { - return fmt.Errorf("cpuset: cgroup parent path outside cgroup root") - } - if cleanPath(parent) != root { - if err := c.ensureParent(parent, root); err != nil { - return err - } - } - if err := os.MkdirAll(current, defaultDirPerm); err != nil { - return err - } - return c.copyIfNeeded(current, parent) -} - -// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent -// directory to the current directory if the file's contents are 0 -func (c *cpusetController) copyIfNeeded(current, parent string) error { - var ( - err error - currentCpus, currentMems []byte - parentCpus, parentMems []byte - ) - if currentCpus, currentMems, err = c.getValues(current); err != nil { - return err - } - if parentCpus, parentMems, err = c.getValues(parent); err != nil { - return err - } - if isEmpty(currentCpus) { - if err := ioutil.WriteFile( - filepath.Join(current, "cpuset.cpus"), - parentCpus, - defaultFilePerm, - ); err != nil { - return err - } - } - if isEmpty(currentMems) { - if err := ioutil.WriteFile( - filepath.Join(current, "cpuset.mems"), - parentMems, - defaultFilePerm, - ); err != nil { - return err - } - } - return nil -} - -func isEmpty(b []byte) bool { - return len(bytes.Trim(b, "\n")) == 0 -} diff --git a/vendor/github.com/containerd/cgroups/devices.go b/vendor/github.com/containerd/cgroups/devices.go deleted file mode 100644 index f6a3b194..00000000 --- a/vendor/github.com/containerd/cgroups/devices.go +++ /dev/null @@ -1,93 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -const ( - allowDeviceFile = "devices.allow" - denyDeviceFile = "devices.deny" - wildcard = -1 -) - -func NewDevices(root string) *devicesController { - return &devicesController{ - root: filepath.Join(root, string(Devices)), - } -} - -type devicesController struct { - root string -} - -func (d *devicesController) Name() Name { - return Devices -} - -func (d *devicesController) Path(path string) string { - return filepath.Join(d.root, path) -} - -func (d *devicesController) Create(path string, resources *specs.LinuxResources) error { - if err := os.MkdirAll(d.Path(path), defaultDirPerm); err != nil { - return err - } - for _, device := range resources.Devices { - file := denyDeviceFile - if device.Allow { - file = allowDeviceFile - } - if device.Type == "" { - device.Type = "a" - } - if err := ioutil.WriteFile( - filepath.Join(d.Path(path), file), - []byte(deviceString(device)), - defaultFilePerm, - ); err != nil { - return err - } - } - return nil -} - -func (d *devicesController) Update(path string, resources *specs.LinuxResources) error { - return d.Create(path, resources) -} - -func deviceString(device specs.LinuxDeviceCgroup) string { - return fmt.Sprintf("%s %s:%s %s", - device.Type, - deviceNumber(device.Major), - deviceNumber(device.Minor), - device.Access, - ) -} - -func deviceNumber(number *int64) string { - if number == nil || *number == wildcard { - return "*" - } - return fmt.Sprint(*number) -} diff --git a/vendor/github.com/containerd/cgroups/errors.go b/vendor/github.com/containerd/cgroups/errors.go deleted file mode 100644 index f1ad8315..00000000 --- a/vendor/github.com/containerd/cgroups/errors.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "errors" - "os" -) - -var ( - ErrInvalidPid = errors.New("cgroups: pid must be greater than 0") - ErrMountPointNotExist = errors.New("cgroups: cgroup mountpoint does not exist") - ErrInvalidFormat = errors.New("cgroups: parsing file with invalid format failed") - ErrFreezerNotSupported = errors.New("cgroups: freezer cgroup not supported on this system") - ErrMemoryNotSupported = errors.New("cgroups: memory cgroup not supported on this system") - ErrCgroupDeleted = errors.New("cgroups: cgroup deleted") - ErrNoCgroupMountDestination = errors.New("cgroups: cannot find cgroup mount destination") -) - -// ErrorHandler is a function that handles and acts on errors -type ErrorHandler func(err error) error - -// IgnoreNotExist ignores any errors that are for not existing files -func IgnoreNotExist(err error) error { - if os.IsNotExist(err) { - return nil - } - return err -} - -func errPassthrough(err error) error { - return err -} diff --git a/vendor/github.com/containerd/cgroups/freezer.go b/vendor/github.com/containerd/cgroups/freezer.go deleted file mode 100644 index 5e668408..00000000 --- a/vendor/github.com/containerd/cgroups/freezer.go +++ /dev/null @@ -1,82 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "io/ioutil" - "path/filepath" - "strings" - "time" -) - -func NewFreezer(root string) *freezerController { - return &freezerController{ - root: filepath.Join(root, string(Freezer)), - } -} - -type freezerController struct { - root string -} - -func (f *freezerController) Name() Name { - return Freezer -} - -func (f *freezerController) Path(path string) string { - return filepath.Join(f.root, path) -} - -func (f *freezerController) Freeze(path string) error { - return f.waitState(path, Frozen) -} - -func (f *freezerController) Thaw(path string) error { - return f.waitState(path, Thawed) -} - -func (f *freezerController) changeState(path string, state State) error { - return ioutil.WriteFile( - filepath.Join(f.root, path, "freezer.state"), - []byte(strings.ToUpper(string(state))), - defaultFilePerm, - ) -} - -func (f *freezerController) state(path string) (State, error) { - current, err := ioutil.ReadFile(filepath.Join(f.root, path, "freezer.state")) - if err != nil { - return "", err - } - return State(strings.ToLower(strings.TrimSpace(string(current)))), nil -} - -func (f *freezerController) waitState(path string, state State) error { - for { - if err := f.changeState(path, state); err != nil { - return err - } - current, err := f.state(path) - if err != nil { - return err - } - if current == state { - return nil - } - time.Sleep(1 * time.Millisecond) - } -} diff --git a/vendor/github.com/containerd/cgroups/hierarchy.go b/vendor/github.com/containerd/cgroups/hierarchy.go deleted file mode 100644 index 9221bf3f..00000000 --- a/vendor/github.com/containerd/cgroups/hierarchy.go +++ /dev/null @@ -1,20 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -// Hierarchy enableds both unified and split hierarchy for cgroups -type Hierarchy func() ([]Subsystem, error) diff --git a/vendor/github.com/containerd/cgroups/hugetlb.go b/vendor/github.com/containerd/cgroups/hugetlb.go deleted file mode 100644 index 3718706d..00000000 --- a/vendor/github.com/containerd/cgroups/hugetlb.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func NewHugetlb(root string) (*hugetlbController, error) { - sizes, err := hugePageSizes() - if err != nil { - return nil, err - } - - return &hugetlbController{ - root: filepath.Join(root, string(Hugetlb)), - sizes: sizes, - }, nil -} - -type hugetlbController struct { - root string - sizes []string -} - -func (h *hugetlbController) Name() Name { - return Hugetlb -} - -func (h *hugetlbController) Path(path string) string { - return filepath.Join(h.root, path) -} - -func (h *hugetlbController) Create(path string, resources *specs.LinuxResources) error { - if err := os.MkdirAll(h.Path(path), defaultDirPerm); err != nil { - return err - } - for _, limit := range resources.HugepageLimits { - if err := ioutil.WriteFile( - filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", limit.Pagesize, "limit_in_bytes"}, ".")), - []byte(strconv.FormatUint(limit.Limit, 10)), - defaultFilePerm, - ); err != nil { - return err - } - } - return nil -} - -func (h *hugetlbController) Stat(path string, stats *Metrics) error { - for _, size := range h.sizes { - s, err := h.readSizeStat(path, size) - if err != nil { - return err - } - stats.Hugetlb = append(stats.Hugetlb, s) - } - return nil -} - -func (h *hugetlbController) readSizeStat(path, size string) (*HugetlbStat, error) { - s := HugetlbStat{ - Pagesize: size, - } - for _, t := range []struct { - name string - value *uint64 - }{ - { - name: "usage_in_bytes", - value: &s.Usage, - }, - { - name: "max_usage_in_bytes", - value: &s.Max, - }, - { - name: "failcnt", - value: &s.Failcnt, - }, - } { - v, err := readUint(filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", size, t.name}, "."))) - if err != nil { - return nil, err - } - *t.value = v - } - return &s, nil -} diff --git a/vendor/github.com/containerd/cgroups/memory.go b/vendor/github.com/containerd/cgroups/memory.go deleted file mode 100644 index b8b5fe7e..00000000 --- a/vendor/github.com/containerd/cgroups/memory.go +++ /dev/null @@ -1,329 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "syscall" - - "golang.org/x/sys/unix" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func NewMemory(root string) *memoryController { - return &memoryController{ - root: filepath.Join(root, string(Memory)), - } -} - -type memoryController struct { - root string -} - -func (m *memoryController) Name() Name { - return Memory -} - -func (m *memoryController) Path(path string) string { - return filepath.Join(m.root, path) -} - -func (m *memoryController) Create(path string, resources *specs.LinuxResources) error { - if err := os.MkdirAll(m.Path(path), defaultDirPerm); err != nil { - return err - } - if resources.Memory == nil { - return nil - } - if resources.Memory.Kernel != nil { - // Check if kernel memory is enabled - // We have to limit the kernel memory here as it won't be accounted at all - // until a limit is set on the cgroup and limit cannot be set once the - // cgroup has children, or if there are already tasks in the cgroup. - for _, i := range []int64{1, -1} { - if err := ioutil.WriteFile( - filepath.Join(m.Path(path), "memory.kmem.limit_in_bytes"), - []byte(strconv.FormatInt(i, 10)), - defaultFilePerm, - ); err != nil { - return checkEBUSY(err) - } - } - } - return m.set(path, getMemorySettings(resources)) -} - -func (m *memoryController) Update(path string, resources *specs.LinuxResources) error { - if resources.Memory == nil { - return nil - } - g := func(v *int64) bool { - return v != nil && *v > 0 - } - settings := getMemorySettings(resources) - if g(resources.Memory.Limit) && g(resources.Memory.Swap) { - // if the updated swap value is larger than the current memory limit set the swap changes first - // then set the memory limit as swap must always be larger than the current limit - current, err := readUint(filepath.Join(m.Path(path), "memory.limit_in_bytes")) - if err != nil { - return err - } - if current < uint64(*resources.Memory.Swap) { - settings[0], settings[1] = settings[1], settings[0] - } - } - return m.set(path, settings) -} - -func (m *memoryController) Stat(path string, stats *Metrics) error { - f, err := os.Open(filepath.Join(m.Path(path), "memory.stat")) - if err != nil { - return err - } - defer f.Close() - stats.Memory = &MemoryStat{ - Usage: &MemoryEntry{}, - Swap: &MemoryEntry{}, - Kernel: &MemoryEntry{}, - KernelTCP: &MemoryEntry{}, - } - if err := m.parseStats(f, stats.Memory); err != nil { - return err - } - for _, t := range []struct { - module string - entry *MemoryEntry - }{ - { - module: "", - entry: stats.Memory.Usage, - }, - { - module: "memsw", - entry: stats.Memory.Swap, - }, - { - module: "kmem", - entry: stats.Memory.Kernel, - }, - { - module: "kmem.tcp", - entry: stats.Memory.KernelTCP, - }, - } { - for _, tt := range []struct { - name string - value *uint64 - }{ - { - name: "usage_in_bytes", - value: &t.entry.Usage, - }, - { - name: "max_usage_in_bytes", - value: &t.entry.Max, - }, - { - name: "failcnt", - value: &t.entry.Failcnt, - }, - { - name: "limit_in_bytes", - value: &t.entry.Limit, - }, - } { - parts := []string{"memory"} - if t.module != "" { - parts = append(parts, t.module) - } - parts = append(parts, tt.name) - v, err := readUint(filepath.Join(m.Path(path), strings.Join(parts, "."))) - if err != nil { - return err - } - *tt.value = v - } - } - return nil -} - -func (m *memoryController) OOMEventFD(path string) (uintptr, error) { - root := m.Path(path) - f, err := os.Open(filepath.Join(root, "memory.oom_control")) - if err != nil { - return 0, err - } - defer f.Close() - fd, _, serr := unix.RawSyscall(unix.SYS_EVENTFD2, 0, unix.EFD_CLOEXEC, 0) - if serr != 0 { - return 0, serr - } - if err := writeEventFD(root, f.Fd(), fd); err != nil { - unix.Close(int(fd)) - return 0, err - } - return fd, nil -} - -func writeEventFD(root string, cfd, efd uintptr) error { - f, err := os.OpenFile(filepath.Join(root, "cgroup.event_control"), os.O_WRONLY, 0) - if err != nil { - return err - } - _, err = f.WriteString(fmt.Sprintf("%d %d", efd, cfd)) - f.Close() - return err -} - -func (m *memoryController) parseStats(r io.Reader, stat *MemoryStat) error { - var ( - raw = make(map[string]uint64) - sc = bufio.NewScanner(r) - line int - ) - for sc.Scan() { - if err := sc.Err(); err != nil { - return err - } - key, v, err := parseKV(sc.Text()) - if err != nil { - return fmt.Errorf("%d: %v", line, err) - } - raw[key] = v - line++ - } - stat.Cache = raw["cache"] - stat.RSS = raw["rss"] - stat.RSSHuge = raw["rss_huge"] - stat.MappedFile = raw["mapped_file"] - stat.Dirty = raw["dirty"] - stat.Writeback = raw["writeback"] - stat.PgPgIn = raw["pgpgin"] - stat.PgPgOut = raw["pgpgout"] - stat.PgFault = raw["pgfault"] - stat.PgMajFault = raw["pgmajfault"] - stat.InactiveAnon = raw["inactive_anon"] - stat.ActiveAnon = raw["active_anon"] - stat.InactiveFile = raw["inactive_file"] - stat.ActiveFile = raw["active_file"] - stat.Unevictable = raw["unevictable"] - stat.HierarchicalMemoryLimit = raw["hierarchical_memory_limit"] - stat.HierarchicalSwapLimit = raw["hierarchical_memsw_limit"] - stat.TotalCache = raw["total_cache"] - stat.TotalRSS = raw["total_rss"] - stat.TotalRSSHuge = raw["total_rss_huge"] - stat.TotalMappedFile = raw["total_mapped_file"] - stat.TotalDirty = raw["total_dirty"] - stat.TotalWriteback = raw["total_writeback"] - stat.TotalPgPgIn = raw["total_pgpgin"] - stat.TotalPgPgOut = raw["total_pgpgout"] - stat.TotalPgFault = raw["total_pgfault"] - stat.TotalPgMajFault = raw["total_pgmajfault"] - stat.TotalInactiveAnon = raw["total_inactive_anon"] - stat.TotalActiveAnon = raw["total_active_anon"] - stat.TotalInactiveFile = raw["total_inactive_file"] - stat.TotalActiveFile = raw["total_active_file"] - stat.TotalUnevictable = raw["total_unevictable"] - return nil -} - -func (m *memoryController) set(path string, settings []memorySettings) error { - for _, t := range settings { - if t.value != nil { - if err := ioutil.WriteFile( - filepath.Join(m.Path(path), fmt.Sprintf("memory.%s", t.name)), - []byte(strconv.FormatInt(*t.value, 10)), - defaultFilePerm, - ); err != nil { - return err - } - } - } - return nil -} - -type memorySettings struct { - name string - value *int64 -} - -func getMemorySettings(resources *specs.LinuxResources) []memorySettings { - mem := resources.Memory - var swappiness *int64 - if mem.Swappiness != nil { - v := int64(*mem.Swappiness) - swappiness = &v - } - return []memorySettings{ - { - name: "limit_in_bytes", - value: mem.Limit, - }, - { - name: "soft_limit_in_bytes", - value: mem.Reservation, - }, - { - name: "memsw.limit_in_bytes", - value: mem.Swap, - }, - { - name: "kmem.limit_in_bytes", - value: mem.Kernel, - }, - { - name: "kmem.tcp.limit_in_bytes", - value: mem.KernelTCP, - }, - { - name: "oom_control", - value: getOomControlValue(mem), - }, - { - name: "swappiness", - value: swappiness, - }, - } -} - -func checkEBUSY(err error) error { - if pathErr, ok := err.(*os.PathError); ok { - if errNo, ok := pathErr.Err.(syscall.Errno); ok { - if errNo == unix.EBUSY { - return fmt.Errorf( - "failed to set memory.kmem.limit_in_bytes, because either tasks have already joined this cgroup or it has children") - } - } - } - return err -} - -func getOomControlValue(mem *specs.LinuxMemory) *int64 { - if mem.DisableOOMKiller != nil && *mem.DisableOOMKiller { - i := int64(1) - return &i - } - return nil -} diff --git a/vendor/github.com/containerd/cgroups/metrics.pb.go b/vendor/github.com/containerd/cgroups/metrics.pb.go deleted file mode 100644 index 7dd7f6f3..00000000 --- a/vendor/github.com/containerd/cgroups/metrics.pb.go +++ /dev/null @@ -1,4696 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/containerd/cgroups/metrics.proto - -/* - Package cgroups is a generated protocol buffer package. - - It is generated from these files: - github.com/containerd/cgroups/metrics.proto - - It has these top-level messages: - Metrics - HugetlbStat - PidsStat - CPUStat - CPUUsage - Throttle - MemoryStat - MemoryEntry - BlkIOStat - BlkIOEntry - RdmaStat - RdmaEntry - NetworkStat -*/ -package cgroups - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/gogo/protobuf/gogoproto" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type Metrics struct { - Hugetlb []*HugetlbStat `protobuf:"bytes,1,rep,name=hugetlb" json:"hugetlb,omitempty"` - Pids *PidsStat `protobuf:"bytes,2,opt,name=pids" json:"pids,omitempty"` - CPU *CPUStat `protobuf:"bytes,3,opt,name=cpu" json:"cpu,omitempty"` - Memory *MemoryStat `protobuf:"bytes,4,opt,name=memory" json:"memory,omitempty"` - Blkio *BlkIOStat `protobuf:"bytes,5,opt,name=blkio" json:"blkio,omitempty"` - Rdma *RdmaStat `protobuf:"bytes,6,opt,name=rdma" json:"rdma,omitempty"` - Network []*NetworkStat `protobuf:"bytes,7,rep,name=network" json:"network,omitempty"` -} - -func (m *Metrics) Reset() { *m = Metrics{} } -func (*Metrics) ProtoMessage() {} -func (*Metrics) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{0} } - -type HugetlbStat struct { - Usage uint64 `protobuf:"varint,1,opt,name=usage,proto3" json:"usage,omitempty"` - Max uint64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"` - Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt,proto3" json:"failcnt,omitempty"` - Pagesize string `protobuf:"bytes,4,opt,name=pagesize,proto3" json:"pagesize,omitempty"` -} - -func (m *HugetlbStat) Reset() { *m = HugetlbStat{} } -func (*HugetlbStat) ProtoMessage() {} -func (*HugetlbStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{1} } - -type PidsStat struct { - Current uint64 `protobuf:"varint,1,opt,name=current,proto3" json:"current,omitempty"` - Limit uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"` -} - -func (m *PidsStat) Reset() { *m = PidsStat{} } -func (*PidsStat) ProtoMessage() {} -func (*PidsStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{2} } - -type CPUStat struct { - Usage *CPUUsage `protobuf:"bytes,1,opt,name=usage" json:"usage,omitempty"` - Throttling *Throttle `protobuf:"bytes,2,opt,name=throttling" json:"throttling,omitempty"` -} - -func (m *CPUStat) Reset() { *m = CPUStat{} } -func (*CPUStat) ProtoMessage() {} -func (*CPUStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{3} } - -type CPUUsage struct { - // values in nanoseconds - Total uint64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"` - Kernel uint64 `protobuf:"varint,2,opt,name=kernel,proto3" json:"kernel,omitempty"` - User uint64 `protobuf:"varint,3,opt,name=user,proto3" json:"user,omitempty"` - PerCPU []uint64 `protobuf:"varint,4,rep,packed,name=per_cpu,json=perCpu" json:"per_cpu,omitempty"` -} - -func (m *CPUUsage) Reset() { *m = CPUUsage{} } -func (*CPUUsage) ProtoMessage() {} -func (*CPUUsage) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{4} } - -type Throttle struct { - Periods uint64 `protobuf:"varint,1,opt,name=periods,proto3" json:"periods,omitempty"` - ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods,proto3" json:"throttled_periods,omitempty"` - ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime,proto3" json:"throttled_time,omitempty"` -} - -func (m *Throttle) Reset() { *m = Throttle{} } -func (*Throttle) ProtoMessage() {} -func (*Throttle) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{5} } - -type MemoryStat struct { - Cache uint64 `protobuf:"varint,1,opt,name=cache,proto3" json:"cache,omitempty"` - RSS uint64 `protobuf:"varint,2,opt,name=rss,proto3" json:"rss,omitempty"` - RSSHuge uint64 `protobuf:"varint,3,opt,name=rss_huge,json=rssHuge,proto3" json:"rss_huge,omitempty"` - MappedFile uint64 `protobuf:"varint,4,opt,name=mapped_file,json=mappedFile,proto3" json:"mapped_file,omitempty"` - Dirty uint64 `protobuf:"varint,5,opt,name=dirty,proto3" json:"dirty,omitempty"` - Writeback uint64 `protobuf:"varint,6,opt,name=writeback,proto3" json:"writeback,omitempty"` - PgPgIn uint64 `protobuf:"varint,7,opt,name=pg_pg_in,json=pgPgIn,proto3" json:"pg_pg_in,omitempty"` - PgPgOut uint64 `protobuf:"varint,8,opt,name=pg_pg_out,json=pgPgOut,proto3" json:"pg_pg_out,omitempty"` - PgFault uint64 `protobuf:"varint,9,opt,name=pg_fault,json=pgFault,proto3" json:"pg_fault,omitempty"` - PgMajFault uint64 `protobuf:"varint,10,opt,name=pg_maj_fault,json=pgMajFault,proto3" json:"pg_maj_fault,omitempty"` - InactiveAnon uint64 `protobuf:"varint,11,opt,name=inactive_anon,json=inactiveAnon,proto3" json:"inactive_anon,omitempty"` - ActiveAnon uint64 `protobuf:"varint,12,opt,name=active_anon,json=activeAnon,proto3" json:"active_anon,omitempty"` - InactiveFile uint64 `protobuf:"varint,13,opt,name=inactive_file,json=inactiveFile,proto3" json:"inactive_file,omitempty"` - ActiveFile uint64 `protobuf:"varint,14,opt,name=active_file,json=activeFile,proto3" json:"active_file,omitempty"` - Unevictable uint64 `protobuf:"varint,15,opt,name=unevictable,proto3" json:"unevictable,omitempty"` - HierarchicalMemoryLimit uint64 `protobuf:"varint,16,opt,name=hierarchical_memory_limit,json=hierarchicalMemoryLimit,proto3" json:"hierarchical_memory_limit,omitempty"` - HierarchicalSwapLimit uint64 `protobuf:"varint,17,opt,name=hierarchical_swap_limit,json=hierarchicalSwapLimit,proto3" json:"hierarchical_swap_limit,omitempty"` - TotalCache uint64 `protobuf:"varint,18,opt,name=total_cache,json=totalCache,proto3" json:"total_cache,omitempty"` - TotalRSS uint64 `protobuf:"varint,19,opt,name=total_rss,json=totalRss,proto3" json:"total_rss,omitempty"` - TotalRSSHuge uint64 `protobuf:"varint,20,opt,name=total_rss_huge,json=totalRssHuge,proto3" json:"total_rss_huge,omitempty"` - TotalMappedFile uint64 `protobuf:"varint,21,opt,name=total_mapped_file,json=totalMappedFile,proto3" json:"total_mapped_file,omitempty"` - TotalDirty uint64 `protobuf:"varint,22,opt,name=total_dirty,json=totalDirty,proto3" json:"total_dirty,omitempty"` - TotalWriteback uint64 `protobuf:"varint,23,opt,name=total_writeback,json=totalWriteback,proto3" json:"total_writeback,omitempty"` - TotalPgPgIn uint64 `protobuf:"varint,24,opt,name=total_pg_pg_in,json=totalPgPgIn,proto3" json:"total_pg_pg_in,omitempty"` - TotalPgPgOut uint64 `protobuf:"varint,25,opt,name=total_pg_pg_out,json=totalPgPgOut,proto3" json:"total_pg_pg_out,omitempty"` - TotalPgFault uint64 `protobuf:"varint,26,opt,name=total_pg_fault,json=totalPgFault,proto3" json:"total_pg_fault,omitempty"` - TotalPgMajFault uint64 `protobuf:"varint,27,opt,name=total_pg_maj_fault,json=totalPgMajFault,proto3" json:"total_pg_maj_fault,omitempty"` - TotalInactiveAnon uint64 `protobuf:"varint,28,opt,name=total_inactive_anon,json=totalInactiveAnon,proto3" json:"total_inactive_anon,omitempty"` - TotalActiveAnon uint64 `protobuf:"varint,29,opt,name=total_active_anon,json=totalActiveAnon,proto3" json:"total_active_anon,omitempty"` - TotalInactiveFile uint64 `protobuf:"varint,30,opt,name=total_inactive_file,json=totalInactiveFile,proto3" json:"total_inactive_file,omitempty"` - TotalActiveFile uint64 `protobuf:"varint,31,opt,name=total_active_file,json=totalActiveFile,proto3" json:"total_active_file,omitempty"` - TotalUnevictable uint64 `protobuf:"varint,32,opt,name=total_unevictable,json=totalUnevictable,proto3" json:"total_unevictable,omitempty"` - Usage *MemoryEntry `protobuf:"bytes,33,opt,name=usage" json:"usage,omitempty"` - Swap *MemoryEntry `protobuf:"bytes,34,opt,name=swap" json:"swap,omitempty"` - Kernel *MemoryEntry `protobuf:"bytes,35,opt,name=kernel" json:"kernel,omitempty"` - KernelTCP *MemoryEntry `protobuf:"bytes,36,opt,name=kernel_tcp,json=kernelTcp" json:"kernel_tcp,omitempty"` -} - -func (m *MemoryStat) Reset() { *m = MemoryStat{} } -func (*MemoryStat) ProtoMessage() {} -func (*MemoryStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{6} } - -type MemoryEntry struct { - Limit uint64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` - Usage uint64 `protobuf:"varint,2,opt,name=usage,proto3" json:"usage,omitempty"` - Max uint64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty"` - Failcnt uint64 `protobuf:"varint,4,opt,name=failcnt,proto3" json:"failcnt,omitempty"` -} - -func (m *MemoryEntry) Reset() { *m = MemoryEntry{} } -func (*MemoryEntry) ProtoMessage() {} -func (*MemoryEntry) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{7} } - -type BlkIOStat struct { - IoServiceBytesRecursive []*BlkIOEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive" json:"io_service_bytes_recursive,omitempty"` - IoServicedRecursive []*BlkIOEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive" json:"io_serviced_recursive,omitempty"` - IoQueuedRecursive []*BlkIOEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive" json:"io_queued_recursive,omitempty"` - IoServiceTimeRecursive []*BlkIOEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive" json:"io_service_time_recursive,omitempty"` - IoWaitTimeRecursive []*BlkIOEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive" json:"io_wait_time_recursive,omitempty"` - IoMergedRecursive []*BlkIOEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive" json:"io_merged_recursive,omitempty"` - IoTimeRecursive []*BlkIOEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive" json:"io_time_recursive,omitempty"` - SectorsRecursive []*BlkIOEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive" json:"sectors_recursive,omitempty"` -} - -func (m *BlkIOStat) Reset() { *m = BlkIOStat{} } -func (*BlkIOStat) ProtoMessage() {} -func (*BlkIOStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{8} } - -type BlkIOEntry struct { - Op string `protobuf:"bytes,1,opt,name=op,proto3" json:"op,omitempty"` - Device string `protobuf:"bytes,2,opt,name=device,proto3" json:"device,omitempty"` - Major uint64 `protobuf:"varint,3,opt,name=major,proto3" json:"major,omitempty"` - Minor uint64 `protobuf:"varint,4,opt,name=minor,proto3" json:"minor,omitempty"` - Value uint64 `protobuf:"varint,5,opt,name=value,proto3" json:"value,omitempty"` -} - -func (m *BlkIOEntry) Reset() { *m = BlkIOEntry{} } -func (*BlkIOEntry) ProtoMessage() {} -func (*BlkIOEntry) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{9} } - -type RdmaStat struct { - Current []*RdmaEntry `protobuf:"bytes,1,rep,name=current" json:"current,omitempty"` - Limit []*RdmaEntry `protobuf:"bytes,2,rep,name=limit" json:"limit,omitempty"` -} - -func (m *RdmaStat) Reset() { *m = RdmaStat{} } -func (*RdmaStat) ProtoMessage() {} -func (*RdmaStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{10} } - -type RdmaEntry struct { - Device string `protobuf:"bytes,1,opt,name=device,proto3" json:"device,omitempty"` - HcaHandles uint32 `protobuf:"varint,2,opt,name=hca_handles,json=hcaHandles,proto3" json:"hca_handles,omitempty"` - HcaObjects uint32 `protobuf:"varint,3,opt,name=hca_objects,json=hcaObjects,proto3" json:"hca_objects,omitempty"` -} - -func (m *RdmaEntry) Reset() { *m = RdmaEntry{} } -func (*RdmaEntry) ProtoMessage() {} -func (*RdmaEntry) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{11} } - -type NetworkStat struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"` - RxPackets uint64 `protobuf:"varint,3,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"` - RxErrors uint64 `protobuf:"varint,4,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"` - RxDropped uint64 `protobuf:"varint,5,opt,name=rx_dropped,json=rxDropped,proto3" json:"rx_dropped,omitempty"` - TxBytes uint64 `protobuf:"varint,6,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"` - TxPackets uint64 `protobuf:"varint,7,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"` - TxErrors uint64 `protobuf:"varint,8,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"` - TxDropped uint64 `protobuf:"varint,9,opt,name=tx_dropped,json=txDropped,proto3" json:"tx_dropped,omitempty"` -} - -func (m *NetworkStat) Reset() { *m = NetworkStat{} } -func (*NetworkStat) ProtoMessage() {} -func (*NetworkStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{12} } - -func init() { - proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v1.Metrics") - proto.RegisterType((*HugetlbStat)(nil), "io.containerd.cgroups.v1.HugetlbStat") - proto.RegisterType((*PidsStat)(nil), "io.containerd.cgroups.v1.PidsStat") - proto.RegisterType((*CPUStat)(nil), "io.containerd.cgroups.v1.CPUStat") - proto.RegisterType((*CPUUsage)(nil), "io.containerd.cgroups.v1.CPUUsage") - proto.RegisterType((*Throttle)(nil), "io.containerd.cgroups.v1.Throttle") - proto.RegisterType((*MemoryStat)(nil), "io.containerd.cgroups.v1.MemoryStat") - proto.RegisterType((*MemoryEntry)(nil), "io.containerd.cgroups.v1.MemoryEntry") - proto.RegisterType((*BlkIOStat)(nil), "io.containerd.cgroups.v1.BlkIOStat") - proto.RegisterType((*BlkIOEntry)(nil), "io.containerd.cgroups.v1.BlkIOEntry") - proto.RegisterType((*RdmaStat)(nil), "io.containerd.cgroups.v1.RdmaStat") - proto.RegisterType((*RdmaEntry)(nil), "io.containerd.cgroups.v1.RdmaEntry") - proto.RegisterType((*NetworkStat)(nil), "io.containerd.cgroups.v1.NetworkStat") -} -func (m *Metrics) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Metrics) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Hugetlb) > 0 { - for _, msg := range m.Hugetlb { - dAtA[i] = 0xa - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Pids != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Pids.Size())) - n1, err := m.Pids.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.CPU != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.CPU.Size())) - n2, err := m.CPU.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.Memory != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Memory.Size())) - n3, err := m.Memory.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - if m.Blkio != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Blkio.Size())) - n4, err := m.Blkio.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - if m.Rdma != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Rdma.Size())) - n5, err := m.Rdma.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - if len(m.Network) > 0 { - for _, msg := range m.Network { - dAtA[i] = 0x3a - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *HugetlbStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HugetlbStat) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Usage != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) - } - if m.Max != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) - } - if m.Failcnt != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) - } - if len(m.Pagesize) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Pagesize))) - i += copy(dAtA[i:], m.Pagesize) - } - return i, nil -} - -func (m *PidsStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PidsStat) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Current != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Current)) - } - if m.Limit != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) - } - return i, nil -} - -func (m *CPUStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CPUStat) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Usage != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size())) - n6, err := m.Usage.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if m.Throttling != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Throttling.Size())) - n7, err := m.Throttling.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - return i, nil -} - -func (m *CPUUsage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CPUUsage) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Total != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Total)) - } - if m.Kernel != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel)) - } - if m.User != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.User)) - } - if len(m.PerCPU) > 0 { - dAtA9 := make([]byte, len(m.PerCPU)*10) - var j8 int - for _, num := range m.PerCPU { - for num >= 1<<7 { - dAtA9[j8] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j8++ - } - dAtA9[j8] = uint8(num) - j8++ - } - dAtA[i] = 0x22 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(j8)) - i += copy(dAtA[i:], dAtA9[:j8]) - } - return i, nil -} - -func (m *Throttle) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Throttle) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Periods != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Periods)) - } - if m.ThrottledPeriods != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledPeriods)) - } - if m.ThrottledTime != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledTime)) - } - return i, nil -} - -func (m *MemoryStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Cache != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Cache)) - } - if m.RSS != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.RSS)) - } - if m.RSSHuge != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.RSSHuge)) - } - if m.MappedFile != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.MappedFile)) - } - if m.Dirty != 0 { - dAtA[i] = 0x28 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Dirty)) - } - if m.Writeback != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Writeback)) - } - if m.PgPgIn != 0 { - dAtA[i] = 0x38 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgIn)) - } - if m.PgPgOut != 0 { - dAtA[i] = 0x40 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgOut)) - } - if m.PgFault != 0 { - dAtA[i] = 0x48 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.PgFault)) - } - if m.PgMajFault != 0 { - dAtA[i] = 0x50 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.PgMajFault)) - } - if m.InactiveAnon != 0 { - dAtA[i] = 0x58 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveAnon)) - } - if m.ActiveAnon != 0 { - dAtA[i] = 0x60 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveAnon)) - } - if m.InactiveFile != 0 { - dAtA[i] = 0x68 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveFile)) - } - if m.ActiveFile != 0 { - dAtA[i] = 0x70 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveFile)) - } - if m.Unevictable != 0 { - dAtA[i] = 0x78 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Unevictable)) - } - if m.HierarchicalMemoryLimit != 0 { - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalMemoryLimit)) - } - if m.HierarchicalSwapLimit != 0 { - dAtA[i] = 0x88 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalSwapLimit)) - } - if m.TotalCache != 0 { - dAtA[i] = 0x90 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalCache)) - } - if m.TotalRSS != 0 { - dAtA[i] = 0x98 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSS)) - } - if m.TotalRSSHuge != 0 { - dAtA[i] = 0xa0 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSSHuge)) - } - if m.TotalMappedFile != 0 { - dAtA[i] = 0xa8 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalMappedFile)) - } - if m.TotalDirty != 0 { - dAtA[i] = 0xb0 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalDirty)) - } - if m.TotalWriteback != 0 { - dAtA[i] = 0xb8 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalWriteback)) - } - if m.TotalPgPgIn != 0 { - dAtA[i] = 0xc0 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgIn)) - } - if m.TotalPgPgOut != 0 { - dAtA[i] = 0xc8 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgOut)) - } - if m.TotalPgFault != 0 { - dAtA[i] = 0xd0 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgFault)) - } - if m.TotalPgMajFault != 0 { - dAtA[i] = 0xd8 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgMajFault)) - } - if m.TotalInactiveAnon != 0 { - dAtA[i] = 0xe0 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveAnon)) - } - if m.TotalActiveAnon != 0 { - dAtA[i] = 0xe8 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveAnon)) - } - if m.TotalInactiveFile != 0 { - dAtA[i] = 0xf0 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveFile)) - } - if m.TotalActiveFile != 0 { - dAtA[i] = 0xf8 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveFile)) - } - if m.TotalUnevictable != 0 { - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TotalUnevictable)) - } - if m.Usage != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size())) - n10, err := m.Usage.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - if m.Swap != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Swap.Size())) - n11, err := m.Swap.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - if m.Kernel != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel.Size())) - n12, err := m.Kernel.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - } - if m.KernelTCP != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x2 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.KernelTCP.Size())) - n13, err := m.KernelTCP.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - } - return i, nil -} - -func (m *MemoryEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MemoryEntry) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Limit != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Limit)) - } - if m.Usage != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Usage)) - } - if m.Max != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Max)) - } - if m.Failcnt != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt)) - } - return i, nil -} - -func (m *BlkIOStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BlkIOStat) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.IoServiceBytesRecursive) > 0 { - for _, msg := range m.IoServiceBytesRecursive { - dAtA[i] = 0xa - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.IoServicedRecursive) > 0 { - for _, msg := range m.IoServicedRecursive { - dAtA[i] = 0x12 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.IoQueuedRecursive) > 0 { - for _, msg := range m.IoQueuedRecursive { - dAtA[i] = 0x1a - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.IoServiceTimeRecursive) > 0 { - for _, msg := range m.IoServiceTimeRecursive { - dAtA[i] = 0x22 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.IoWaitTimeRecursive) > 0 { - for _, msg := range m.IoWaitTimeRecursive { - dAtA[i] = 0x2a - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.IoMergedRecursive) > 0 { - for _, msg := range m.IoMergedRecursive { - dAtA[i] = 0x32 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.IoTimeRecursive) > 0 { - for _, msg := range m.IoTimeRecursive { - dAtA[i] = 0x3a - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.SectorsRecursive) > 0 { - for _, msg := range m.SectorsRecursive { - dAtA[i] = 0x42 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *BlkIOEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BlkIOEntry) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Op) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Op))) - i += copy(dAtA[i:], m.Op) - } - if len(m.Device) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) - i += copy(dAtA[i:], m.Device) - } - if m.Major != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Major)) - } - if m.Minor != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Minor)) - } - if m.Value != 0 { - dAtA[i] = 0x28 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.Value)) - } - return i, nil -} - -func (m *RdmaStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RdmaStat) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Current) > 0 { - for _, msg := range m.Current { - dAtA[i] = 0xa - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Limit) > 0 { - for _, msg := range m.Limit { - dAtA[i] = 0x12 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RdmaEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RdmaEntry) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Device) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device))) - i += copy(dAtA[i:], m.Device) - } - if m.HcaHandles != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.HcaHandles)) - } - if m.HcaObjects != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.HcaObjects)) - } - return i, nil -} - -func (m *NetworkStat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkStat) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Name) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintMetrics(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - } - if m.RxBytes != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.RxBytes)) - } - if m.RxPackets != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.RxPackets)) - } - if m.RxErrors != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.RxErrors)) - } - if m.RxDropped != 0 { - dAtA[i] = 0x28 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.RxDropped)) - } - if m.TxBytes != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TxBytes)) - } - if m.TxPackets != 0 { - dAtA[i] = 0x38 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TxPackets)) - } - if m.TxErrors != 0 { - dAtA[i] = 0x40 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TxErrors)) - } - if m.TxDropped != 0 { - dAtA[i] = 0x48 - i++ - i = encodeVarintMetrics(dAtA, i, uint64(m.TxDropped)) - } - return i, nil -} - -func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Metrics) Size() (n int) { - var l int - _ = l - if len(m.Hugetlb) > 0 { - for _, e := range m.Hugetlb { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if m.Pids != nil { - l = m.Pids.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.CPU != nil { - l = m.CPU.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Memory != nil { - l = m.Memory.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Blkio != nil { - l = m.Blkio.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Rdma != nil { - l = m.Rdma.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if len(m.Network) > 0 { - for _, e := range m.Network { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *HugetlbStat) Size() (n int) { - var l int - _ = l - if m.Usage != 0 { - n += 1 + sovMetrics(uint64(m.Usage)) - } - if m.Max != 0 { - n += 1 + sovMetrics(uint64(m.Max)) - } - if m.Failcnt != 0 { - n += 1 + sovMetrics(uint64(m.Failcnt)) - } - l = len(m.Pagesize) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - return n -} - -func (m *PidsStat) Size() (n int) { - var l int - _ = l - if m.Current != 0 { - n += 1 + sovMetrics(uint64(m.Current)) - } - if m.Limit != 0 { - n += 1 + sovMetrics(uint64(m.Limit)) - } - return n -} - -func (m *CPUStat) Size() (n int) { - var l int - _ = l - if m.Usage != nil { - l = m.Usage.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Throttling != nil { - l = m.Throttling.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - return n -} - -func (m *CPUUsage) Size() (n int) { - var l int - _ = l - if m.Total != 0 { - n += 1 + sovMetrics(uint64(m.Total)) - } - if m.Kernel != 0 { - n += 1 + sovMetrics(uint64(m.Kernel)) - } - if m.User != 0 { - n += 1 + sovMetrics(uint64(m.User)) - } - if len(m.PerCPU) > 0 { - l = 0 - for _, e := range m.PerCPU { - l += sovMetrics(uint64(e)) - } - n += 1 + sovMetrics(uint64(l)) + l - } - return n -} - -func (m *Throttle) Size() (n int) { - var l int - _ = l - if m.Periods != 0 { - n += 1 + sovMetrics(uint64(m.Periods)) - } - if m.ThrottledPeriods != 0 { - n += 1 + sovMetrics(uint64(m.ThrottledPeriods)) - } - if m.ThrottledTime != 0 { - n += 1 + sovMetrics(uint64(m.ThrottledTime)) - } - return n -} - -func (m *MemoryStat) Size() (n int) { - var l int - _ = l - if m.Cache != 0 { - n += 1 + sovMetrics(uint64(m.Cache)) - } - if m.RSS != 0 { - n += 1 + sovMetrics(uint64(m.RSS)) - } - if m.RSSHuge != 0 { - n += 1 + sovMetrics(uint64(m.RSSHuge)) - } - if m.MappedFile != 0 { - n += 1 + sovMetrics(uint64(m.MappedFile)) - } - if m.Dirty != 0 { - n += 1 + sovMetrics(uint64(m.Dirty)) - } - if m.Writeback != 0 { - n += 1 + sovMetrics(uint64(m.Writeback)) - } - if m.PgPgIn != 0 { - n += 1 + sovMetrics(uint64(m.PgPgIn)) - } - if m.PgPgOut != 0 { - n += 1 + sovMetrics(uint64(m.PgPgOut)) - } - if m.PgFault != 0 { - n += 1 + sovMetrics(uint64(m.PgFault)) - } - if m.PgMajFault != 0 { - n += 1 + sovMetrics(uint64(m.PgMajFault)) - } - if m.InactiveAnon != 0 { - n += 1 + sovMetrics(uint64(m.InactiveAnon)) - } - if m.ActiveAnon != 0 { - n += 1 + sovMetrics(uint64(m.ActiveAnon)) - } - if m.InactiveFile != 0 { - n += 1 + sovMetrics(uint64(m.InactiveFile)) - } - if m.ActiveFile != 0 { - n += 1 + sovMetrics(uint64(m.ActiveFile)) - } - if m.Unevictable != 0 { - n += 1 + sovMetrics(uint64(m.Unevictable)) - } - if m.HierarchicalMemoryLimit != 0 { - n += 2 + sovMetrics(uint64(m.HierarchicalMemoryLimit)) - } - if m.HierarchicalSwapLimit != 0 { - n += 2 + sovMetrics(uint64(m.HierarchicalSwapLimit)) - } - if m.TotalCache != 0 { - n += 2 + sovMetrics(uint64(m.TotalCache)) - } - if m.TotalRSS != 0 { - n += 2 + sovMetrics(uint64(m.TotalRSS)) - } - if m.TotalRSSHuge != 0 { - n += 2 + sovMetrics(uint64(m.TotalRSSHuge)) - } - if m.TotalMappedFile != 0 { - n += 2 + sovMetrics(uint64(m.TotalMappedFile)) - } - if m.TotalDirty != 0 { - n += 2 + sovMetrics(uint64(m.TotalDirty)) - } - if m.TotalWriteback != 0 { - n += 2 + sovMetrics(uint64(m.TotalWriteback)) - } - if m.TotalPgPgIn != 0 { - n += 2 + sovMetrics(uint64(m.TotalPgPgIn)) - } - if m.TotalPgPgOut != 0 { - n += 2 + sovMetrics(uint64(m.TotalPgPgOut)) - } - if m.TotalPgFault != 0 { - n += 2 + sovMetrics(uint64(m.TotalPgFault)) - } - if m.TotalPgMajFault != 0 { - n += 2 + sovMetrics(uint64(m.TotalPgMajFault)) - } - if m.TotalInactiveAnon != 0 { - n += 2 + sovMetrics(uint64(m.TotalInactiveAnon)) - } - if m.TotalActiveAnon != 0 { - n += 2 + sovMetrics(uint64(m.TotalActiveAnon)) - } - if m.TotalInactiveFile != 0 { - n += 2 + sovMetrics(uint64(m.TotalInactiveFile)) - } - if m.TotalActiveFile != 0 { - n += 2 + sovMetrics(uint64(m.TotalActiveFile)) - } - if m.TotalUnevictable != 0 { - n += 2 + sovMetrics(uint64(m.TotalUnevictable)) - } - if m.Usage != nil { - l = m.Usage.Size() - n += 2 + l + sovMetrics(uint64(l)) - } - if m.Swap != nil { - l = m.Swap.Size() - n += 2 + l + sovMetrics(uint64(l)) - } - if m.Kernel != nil { - l = m.Kernel.Size() - n += 2 + l + sovMetrics(uint64(l)) - } - if m.KernelTCP != nil { - l = m.KernelTCP.Size() - n += 2 + l + sovMetrics(uint64(l)) - } - return n -} - -func (m *MemoryEntry) Size() (n int) { - var l int - _ = l - if m.Limit != 0 { - n += 1 + sovMetrics(uint64(m.Limit)) - } - if m.Usage != 0 { - n += 1 + sovMetrics(uint64(m.Usage)) - } - if m.Max != 0 { - n += 1 + sovMetrics(uint64(m.Max)) - } - if m.Failcnt != 0 { - n += 1 + sovMetrics(uint64(m.Failcnt)) - } - return n -} - -func (m *BlkIOStat) Size() (n int) { - var l int - _ = l - if len(m.IoServiceBytesRecursive) > 0 { - for _, e := range m.IoServiceBytesRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoServicedRecursive) > 0 { - for _, e := range m.IoServicedRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoQueuedRecursive) > 0 { - for _, e := range m.IoQueuedRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoServiceTimeRecursive) > 0 { - for _, e := range m.IoServiceTimeRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoWaitTimeRecursive) > 0 { - for _, e := range m.IoWaitTimeRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoMergedRecursive) > 0 { - for _, e := range m.IoMergedRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.IoTimeRecursive) > 0 { - for _, e := range m.IoTimeRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.SectorsRecursive) > 0 { - for _, e := range m.SectorsRecursive { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *BlkIOEntry) Size() (n int) { - var l int - _ = l - l = len(m.Op) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - l = len(m.Device) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.Major != 0 { - n += 1 + sovMetrics(uint64(m.Major)) - } - if m.Minor != 0 { - n += 1 + sovMetrics(uint64(m.Minor)) - } - if m.Value != 0 { - n += 1 + sovMetrics(uint64(m.Value)) - } - return n -} - -func (m *RdmaStat) Size() (n int) { - var l int - _ = l - if len(m.Current) > 0 { - for _, e := range m.Current { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - if len(m.Limit) > 0 { - for _, e := range m.Limit { - l = e.Size() - n += 1 + l + sovMetrics(uint64(l)) - } - } - return n -} - -func (m *RdmaEntry) Size() (n int) { - var l int - _ = l - l = len(m.Device) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.HcaHandles != 0 { - n += 1 + sovMetrics(uint64(m.HcaHandles)) - } - if m.HcaObjects != 0 { - n += 1 + sovMetrics(uint64(m.HcaObjects)) - } - return n -} - -func (m *NetworkStat) Size() (n int) { - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovMetrics(uint64(l)) - } - if m.RxBytes != 0 { - n += 1 + sovMetrics(uint64(m.RxBytes)) - } - if m.RxPackets != 0 { - n += 1 + sovMetrics(uint64(m.RxPackets)) - } - if m.RxErrors != 0 { - n += 1 + sovMetrics(uint64(m.RxErrors)) - } - if m.RxDropped != 0 { - n += 1 + sovMetrics(uint64(m.RxDropped)) - } - if m.TxBytes != 0 { - n += 1 + sovMetrics(uint64(m.TxBytes)) - } - if m.TxPackets != 0 { - n += 1 + sovMetrics(uint64(m.TxPackets)) - } - if m.TxErrors != 0 { - n += 1 + sovMetrics(uint64(m.TxErrors)) - } - if m.TxDropped != 0 { - n += 1 + sovMetrics(uint64(m.TxDropped)) - } - return n -} - -func sovMetrics(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozMetrics(x uint64) (n int) { - return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Metrics) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Metrics{`, - `Hugetlb:` + strings.Replace(fmt.Sprintf("%v", this.Hugetlb), "HugetlbStat", "HugetlbStat", 1) + `,`, - `Pids:` + strings.Replace(fmt.Sprintf("%v", this.Pids), "PidsStat", "PidsStat", 1) + `,`, - `CPU:` + strings.Replace(fmt.Sprintf("%v", this.CPU), "CPUStat", "CPUStat", 1) + `,`, - `Memory:` + strings.Replace(fmt.Sprintf("%v", this.Memory), "MemoryStat", "MemoryStat", 1) + `,`, - `Blkio:` + strings.Replace(fmt.Sprintf("%v", this.Blkio), "BlkIOStat", "BlkIOStat", 1) + `,`, - `Rdma:` + strings.Replace(fmt.Sprintf("%v", this.Rdma), "RdmaStat", "RdmaStat", 1) + `,`, - `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "NetworkStat", "NetworkStat", 1) + `,`, - `}`, - }, "") - return s -} -func (this *HugetlbStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HugetlbStat{`, - `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`, - `Pagesize:` + fmt.Sprintf("%v", this.Pagesize) + `,`, - `}`, - }, "") - return s -} -func (this *PidsStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PidsStat{`, - `Current:` + fmt.Sprintf("%v", this.Current) + `,`, - `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, - `}`, - }, "") - return s -} -func (this *CPUStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CPUStat{`, - `Usage:` + strings.Replace(fmt.Sprintf("%v", this.Usage), "CPUUsage", "CPUUsage", 1) + `,`, - `Throttling:` + strings.Replace(fmt.Sprintf("%v", this.Throttling), "Throttle", "Throttle", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CPUUsage) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CPUUsage{`, - `Total:` + fmt.Sprintf("%v", this.Total) + `,`, - `Kernel:` + fmt.Sprintf("%v", this.Kernel) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `PerCPU:` + fmt.Sprintf("%v", this.PerCPU) + `,`, - `}`, - }, "") - return s -} -func (this *Throttle) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Throttle{`, - `Periods:` + fmt.Sprintf("%v", this.Periods) + `,`, - `ThrottledPeriods:` + fmt.Sprintf("%v", this.ThrottledPeriods) + `,`, - `ThrottledTime:` + fmt.Sprintf("%v", this.ThrottledTime) + `,`, - `}`, - }, "") - return s -} -func (this *MemoryStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MemoryStat{`, - `Cache:` + fmt.Sprintf("%v", this.Cache) + `,`, - `RSS:` + fmt.Sprintf("%v", this.RSS) + `,`, - `RSSHuge:` + fmt.Sprintf("%v", this.RSSHuge) + `,`, - `MappedFile:` + fmt.Sprintf("%v", this.MappedFile) + `,`, - `Dirty:` + fmt.Sprintf("%v", this.Dirty) + `,`, - `Writeback:` + fmt.Sprintf("%v", this.Writeback) + `,`, - `PgPgIn:` + fmt.Sprintf("%v", this.PgPgIn) + `,`, - `PgPgOut:` + fmt.Sprintf("%v", this.PgPgOut) + `,`, - `PgFault:` + fmt.Sprintf("%v", this.PgFault) + `,`, - `PgMajFault:` + fmt.Sprintf("%v", this.PgMajFault) + `,`, - `InactiveAnon:` + fmt.Sprintf("%v", this.InactiveAnon) + `,`, - `ActiveAnon:` + fmt.Sprintf("%v", this.ActiveAnon) + `,`, - `InactiveFile:` + fmt.Sprintf("%v", this.InactiveFile) + `,`, - `ActiveFile:` + fmt.Sprintf("%v", this.ActiveFile) + `,`, - `Unevictable:` + fmt.Sprintf("%v", this.Unevictable) + `,`, - `HierarchicalMemoryLimit:` + fmt.Sprintf("%v", this.HierarchicalMemoryLimit) + `,`, - `HierarchicalSwapLimit:` + fmt.Sprintf("%v", this.HierarchicalSwapLimit) + `,`, - `TotalCache:` + fmt.Sprintf("%v", this.TotalCache) + `,`, - `TotalRSS:` + fmt.Sprintf("%v", this.TotalRSS) + `,`, - `TotalRSSHuge:` + fmt.Sprintf("%v", this.TotalRSSHuge) + `,`, - `TotalMappedFile:` + fmt.Sprintf("%v", this.TotalMappedFile) + `,`, - `TotalDirty:` + fmt.Sprintf("%v", this.TotalDirty) + `,`, - `TotalWriteback:` + fmt.Sprintf("%v", this.TotalWriteback) + `,`, - `TotalPgPgIn:` + fmt.Sprintf("%v", this.TotalPgPgIn) + `,`, - `TotalPgPgOut:` + fmt.Sprintf("%v", this.TotalPgPgOut) + `,`, - `TotalPgFault:` + fmt.Sprintf("%v", this.TotalPgFault) + `,`, - `TotalPgMajFault:` + fmt.Sprintf("%v", this.TotalPgMajFault) + `,`, - `TotalInactiveAnon:` + fmt.Sprintf("%v", this.TotalInactiveAnon) + `,`, - `TotalActiveAnon:` + fmt.Sprintf("%v", this.TotalActiveAnon) + `,`, - `TotalInactiveFile:` + fmt.Sprintf("%v", this.TotalInactiveFile) + `,`, - `TotalActiveFile:` + fmt.Sprintf("%v", this.TotalActiveFile) + `,`, - `TotalUnevictable:` + fmt.Sprintf("%v", this.TotalUnevictable) + `,`, - `Usage:` + strings.Replace(fmt.Sprintf("%v", this.Usage), "MemoryEntry", "MemoryEntry", 1) + `,`, - `Swap:` + strings.Replace(fmt.Sprintf("%v", this.Swap), "MemoryEntry", "MemoryEntry", 1) + `,`, - `Kernel:` + strings.Replace(fmt.Sprintf("%v", this.Kernel), "MemoryEntry", "MemoryEntry", 1) + `,`, - `KernelTCP:` + strings.Replace(fmt.Sprintf("%v", this.KernelTCP), "MemoryEntry", "MemoryEntry", 1) + `,`, - `}`, - }, "") - return s -} -func (this *MemoryEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MemoryEntry{`, - `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, - `Usage:` + fmt.Sprintf("%v", this.Usage) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`, - `}`, - }, "") - return s -} -func (this *BlkIOStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&BlkIOStat{`, - `IoServiceBytesRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoServiceBytesRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, - `IoServicedRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoServicedRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, - `IoQueuedRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoQueuedRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, - `IoServiceTimeRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoServiceTimeRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, - `IoWaitTimeRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoWaitTimeRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, - `IoMergedRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoMergedRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, - `IoTimeRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoTimeRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, - `SectorsRecursive:` + strings.Replace(fmt.Sprintf("%v", this.SectorsRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`, - `}`, - }, "") - return s -} -func (this *BlkIOEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&BlkIOEntry{`, - `Op:` + fmt.Sprintf("%v", this.Op) + `,`, - `Device:` + fmt.Sprintf("%v", this.Device) + `,`, - `Major:` + fmt.Sprintf("%v", this.Major) + `,`, - `Minor:` + fmt.Sprintf("%v", this.Minor) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *RdmaStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RdmaStat{`, - `Current:` + strings.Replace(fmt.Sprintf("%v", this.Current), "RdmaEntry", "RdmaEntry", 1) + `,`, - `Limit:` + strings.Replace(fmt.Sprintf("%v", this.Limit), "RdmaEntry", "RdmaEntry", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RdmaEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RdmaEntry{`, - `Device:` + fmt.Sprintf("%v", this.Device) + `,`, - `HcaHandles:` + fmt.Sprintf("%v", this.HcaHandles) + `,`, - `HcaObjects:` + fmt.Sprintf("%v", this.HcaObjects) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkStat) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkStat{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `RxBytes:` + fmt.Sprintf("%v", this.RxBytes) + `,`, - `RxPackets:` + fmt.Sprintf("%v", this.RxPackets) + `,`, - `RxErrors:` + fmt.Sprintf("%v", this.RxErrors) + `,`, - `RxDropped:` + fmt.Sprintf("%v", this.RxDropped) + `,`, - `TxBytes:` + fmt.Sprintf("%v", this.TxBytes) + `,`, - `TxPackets:` + fmt.Sprintf("%v", this.TxPackets) + `,`, - `TxErrors:` + fmt.Sprintf("%v", this.TxErrors) + `,`, - `TxDropped:` + fmt.Sprintf("%v", this.TxDropped) + `,`, - `}`, - }, "") - return s -} -func valueToStringMetrics(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Metrics) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Metrics: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Metrics: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hugetlb", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hugetlb = append(m.Hugetlb, &HugetlbStat{}) - if err := m.Hugetlb[len(m.Hugetlb)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pids", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pids == nil { - m.Pids = &PidsStat{} - } - if err := m.Pids.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CPU == nil { - m.CPU = &CPUStat{} - } - if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Memory == nil { - m.Memory = &MemoryStat{} - } - if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Blkio", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Blkio == nil { - m.Blkio = &BlkIOStat{} - } - if err := m.Blkio.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rdma", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Rdma == nil { - m.Rdma = &RdmaStat{} - } - if err := m.Rdma.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Network = append(m.Network, &NetworkStat{}) - if err := m.Network[len(m.Network)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HugetlbStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HugetlbStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HugetlbStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - m.Usage = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Usage |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) - } - m.Failcnt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Failcnt |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagesize", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Pagesize = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PidsStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PidsStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PidsStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - m.Current = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Current |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CPUStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CPUStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CPUStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Usage == nil { - m.Usage = &CPUUsage{} - } - if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Throttling", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Throttling == nil { - m.Throttling = &Throttle{} - } - if err := m.Throttling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CPUUsage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CPUUsage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CPUUsage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) - } - m.Total = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Total |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) - } - m.Kernel = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Kernel |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - m.User = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.User |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType == 0 { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.PerCPU = append(m.PerCPU, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + packedLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - for iNdEx < postIndex { - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.PerCPU = append(m.PerCPU, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field PerCPU", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Throttle) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Throttle: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Throttle: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Periods", wireType) - } - m.Periods = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Periods |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ThrottledPeriods", wireType) - } - m.ThrottledPeriods = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ThrottledPeriods |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ThrottledTime", wireType) - } - m.ThrottledTime = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ThrottledTime |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemoryStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemoryStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemoryStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) - } - m.Cache = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Cache |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RSS", wireType) - } - m.RSS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RSS |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RSSHuge", wireType) - } - m.RSSHuge = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RSSHuge |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MappedFile", wireType) - } - m.MappedFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MappedFile |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Dirty", wireType) - } - m.Dirty = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Dirty |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Writeback", wireType) - } - m.Writeback = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Writeback |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PgPgIn", wireType) - } - m.PgPgIn = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PgPgIn |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PgPgOut", wireType) - } - m.PgPgOut = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PgPgOut |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PgFault", wireType) - } - m.PgFault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PgFault |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PgMajFault", wireType) - } - m.PgMajFault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PgMajFault |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InactiveAnon", wireType) - } - m.InactiveAnon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.InactiveAnon |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveAnon", wireType) - } - m.ActiveAnon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ActiveAnon |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InactiveFile", wireType) - } - m.InactiveFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.InactiveFile |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveFile", wireType) - } - m.ActiveFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ActiveFile |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Unevictable", wireType) - } - m.Unevictable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Unevictable |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalMemoryLimit", wireType) - } - m.HierarchicalMemoryLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HierarchicalMemoryLimit |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalSwapLimit", wireType) - } - m.HierarchicalSwapLimit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HierarchicalSwapLimit |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 18: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalCache", wireType) - } - m.TotalCache = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalCache |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 19: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalRSS", wireType) - } - m.TotalRSS = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalRSS |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 20: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalRSSHuge", wireType) - } - m.TotalRSSHuge = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalRSSHuge |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 21: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalMappedFile", wireType) - } - m.TotalMappedFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalMappedFile |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 22: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalDirty", wireType) - } - m.TotalDirty = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalDirty |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 23: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalWriteback", wireType) - } - m.TotalWriteback = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalWriteback |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 24: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgIn", wireType) - } - m.TotalPgPgIn = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalPgPgIn |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 25: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgOut", wireType) - } - m.TotalPgPgOut = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalPgPgOut |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 26: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalPgFault", wireType) - } - m.TotalPgFault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalPgFault |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 27: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalPgMajFault", wireType) - } - m.TotalPgMajFault = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalPgMajFault |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 28: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveAnon", wireType) - } - m.TotalInactiveAnon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalInactiveAnon |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 29: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveAnon", wireType) - } - m.TotalActiveAnon = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalActiveAnon |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 30: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveFile", wireType) - } - m.TotalInactiveFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalInactiveFile |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 31: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveFile", wireType) - } - m.TotalActiveFile = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalActiveFile |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 32: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TotalUnevictable", wireType) - } - m.TotalUnevictable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TotalUnevictable |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 33: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Usage == nil { - m.Usage = &MemoryEntry{} - } - if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 34: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Swap", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Swap == nil { - m.Swap = &MemoryEntry{} - } - if err := m.Swap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 35: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Kernel == nil { - m.Kernel = &MemoryEntry{} - } - if err := m.Kernel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 36: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KernelTCP", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.KernelTCP == nil { - m.KernelTCP = &MemoryEntry{} - } - if err := m.KernelTCP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MemoryEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MemoryEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MemoryEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) - } - m.Usage = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Usage |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType) - } - m.Failcnt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Failcnt |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BlkIOStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BlkIOStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BlkIOStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoServiceBytesRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoServiceBytesRecursive = append(m.IoServiceBytesRecursive, &BlkIOEntry{}) - if err := m.IoServiceBytesRecursive[len(m.IoServiceBytesRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoServicedRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoServicedRecursive = append(m.IoServicedRecursive, &BlkIOEntry{}) - if err := m.IoServicedRecursive[len(m.IoServicedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoQueuedRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoQueuedRecursive = append(m.IoQueuedRecursive, &BlkIOEntry{}) - if err := m.IoQueuedRecursive[len(m.IoQueuedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoServiceTimeRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoServiceTimeRecursive = append(m.IoServiceTimeRecursive, &BlkIOEntry{}) - if err := m.IoServiceTimeRecursive[len(m.IoServiceTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoWaitTimeRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoWaitTimeRecursive = append(m.IoWaitTimeRecursive, &BlkIOEntry{}) - if err := m.IoWaitTimeRecursive[len(m.IoWaitTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoMergedRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoMergedRecursive = append(m.IoMergedRecursive, &BlkIOEntry{}) - if err := m.IoMergedRecursive[len(m.IoMergedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IoTimeRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IoTimeRecursive = append(m.IoTimeRecursive, &BlkIOEntry{}) - if err := m.IoTimeRecursive[len(m.IoTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SectorsRecursive", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SectorsRecursive = append(m.SectorsRecursive, &BlkIOEntry{}) - if err := m.SectorsRecursive[len(m.SectorsRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BlkIOEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BlkIOEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BlkIOEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Op = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Device = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Major", wireType) - } - m.Major = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Major |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Minor", wireType) - } - m.Minor = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Minor |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - m.Value = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Value |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RdmaStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RdmaStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RdmaStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Current = append(m.Current, &RdmaEntry{}) - if err := m.Current[len(m.Current)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Limit = append(m.Limit, &RdmaEntry{}) - if err := m.Limit[len(m.Limit)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RdmaEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RdmaEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RdmaEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Device = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HcaHandles", wireType) - } - m.HcaHandles = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HcaHandles |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HcaObjects", wireType) - } - m.HcaObjects = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HcaObjects |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkStat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkStat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkStat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthMetrics - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RxBytes", wireType) - } - m.RxBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RxBytes |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RxPackets", wireType) - } - m.RxPackets = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RxPackets |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RxErrors", wireType) - } - m.RxErrors = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RxErrors |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RxDropped", wireType) - } - m.RxDropped = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RxDropped |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TxBytes", wireType) - } - m.TxBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TxBytes |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TxPackets", wireType) - } - m.TxPackets = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TxPackets |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TxErrors", wireType) - } - m.TxErrors = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TxErrors |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TxDropped", wireType) - } - m.TxDropped = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMetrics - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TxDropped |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipMetrics(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthMetrics - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipMetrics(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthMetrics - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMetrics - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipMetrics(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("github.com/containerd/cgroups/metrics.proto", fileDescriptorMetrics) } - -var fileDescriptorMetrics = []byte{ - // 1549 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x57, 0x4d, 0x6f, 0x1b, 0xb7, - 0x16, 0x8d, 0x2c, 0xd9, 0xd2, 0x5c, 0xd9, 0x8e, 0x4d, 0x27, 0xce, 0xd8, 0x49, 0x2c, 0x47, 0xb6, - 0xdf, 0xf3, 0x7b, 0x06, 0x64, 0xbc, 0x3c, 0x20, 0x68, 0xd2, 0x04, 0x45, 0xe4, 0x24, 0x48, 0xd0, - 0xba, 0x51, 0x46, 0x36, 0xd2, 0xae, 0x06, 0xd4, 0x88, 0x19, 0xd1, 0x96, 0x86, 0x13, 0x0e, 0xc7, - 0x96, 0xbb, 0xea, 0xa2, 0x40, 0x57, 0xfd, 0x33, 0xfd, 0x15, 0x59, 0x76, 0x53, 0xa0, 0xdd, 0x18, - 0x8d, 0x7e, 0x49, 0x41, 0x72, 0x3e, 0xa8, 0x24, 0x8e, 0xab, 0xdd, 0x90, 0x3c, 0xe7, 0xdc, 0xcb, - 0x3b, 0x87, 0xc3, 0x3b, 0xb0, 0xe3, 0x53, 0xd1, 0x8b, 0x3b, 0x0d, 0x8f, 0x0d, 0x76, 0x3d, 0x16, - 0x08, 0x4c, 0x03, 0xc2, 0xbb, 0xbb, 0x9e, 0xcf, 0x59, 0x1c, 0x46, 0xbb, 0x03, 0x22, 0x38, 0xf5, - 0xa2, 0x46, 0xc8, 0x99, 0x60, 0xc8, 0xa6, 0xac, 0x91, 0x83, 0x1a, 0x09, 0xa8, 0x71, 0xf2, 0xbf, - 0xd5, 0x6b, 0x3e, 0xf3, 0x99, 0x02, 0xed, 0xca, 0x27, 0x8d, 0xaf, 0xff, 0x5a, 0x84, 0xf2, 0xbe, - 0x56, 0x40, 0x5f, 0x41, 0xb9, 0x17, 0xfb, 0x44, 0xf4, 0x3b, 0x76, 0x61, 0xbd, 0xb8, 0x5d, 0xbd, - 0xbb, 0xd5, 0xb8, 0x48, 0xad, 0xf1, 0x5c, 0x03, 0xdb, 0x02, 0x0b, 0x27, 0x65, 0xa1, 0x7b, 0x50, - 0x0a, 0x69, 0x37, 0xb2, 0xa7, 0xd6, 0x0b, 0xdb, 0xd5, 0xbb, 0xf5, 0x8b, 0xd9, 0x2d, 0xda, 0x8d, - 0x14, 0x55, 0xe1, 0xd1, 0x43, 0x28, 0x7a, 0x61, 0x6c, 0x17, 0x15, 0xed, 0xce, 0xc5, 0xb4, 0xbd, - 0xd6, 0xa1, 0x64, 0x35, 0xcb, 0xa3, 0xf3, 0x5a, 0x71, 0xaf, 0x75, 0xe8, 0x48, 0x1a, 0x7a, 0x08, - 0x33, 0x03, 0x32, 0x60, 0xfc, 0xcc, 0x2e, 0x29, 0x81, 0xcd, 0x8b, 0x05, 0xf6, 0x15, 0x4e, 0x45, - 0x4e, 0x38, 0xe8, 0x3e, 0x4c, 0x77, 0xfa, 0xc7, 0x94, 0xd9, 0xd3, 0x8a, 0xbc, 0x71, 0x31, 0xb9, - 0xd9, 0x3f, 0x7e, 0xf1, 0x52, 0x71, 0x35, 0x43, 0x6e, 0x97, 0x77, 0x07, 0xd8, 0x9e, 0xb9, 0x6c, - 0xbb, 0x4e, 0x77, 0x80, 0xf5, 0x76, 0x25, 0x5e, 0xd6, 0x39, 0x20, 0xe2, 0x94, 0xf1, 0x63, 0xbb, - 0x7c, 0x59, 0x9d, 0xbf, 0xd5, 0x40, 0x5d, 0xe7, 0x84, 0x55, 0x3f, 0x86, 0xaa, 0x51, 0x7f, 0x74, - 0x0d, 0xa6, 0xe3, 0x08, 0xfb, 0xc4, 0x2e, 0xac, 0x17, 0xb6, 0x4b, 0x8e, 0x1e, 0xa0, 0x05, 0x28, - 0x0e, 0xf0, 0x50, 0xbd, 0x8b, 0x92, 0x23, 0x1f, 0x91, 0x0d, 0xe5, 0x37, 0x98, 0xf6, 0xbd, 0x40, - 0xa8, 0x52, 0x97, 0x9c, 0x74, 0x88, 0x56, 0xa1, 0x12, 0x62, 0x9f, 0x44, 0xf4, 0x07, 0xa2, 0x8a, - 0x68, 0x39, 0xd9, 0xb8, 0xfe, 0x00, 0x2a, 0xe9, 0xeb, 0x92, 0x0a, 0x5e, 0xcc, 0x39, 0x09, 0x44, - 0x12, 0x2b, 0x1d, 0xca, 0x1c, 0xfa, 0x74, 0x40, 0x45, 0x12, 0x4f, 0x0f, 0xea, 0x3f, 0x17, 0xa0, - 0x9c, 0xbc, 0x34, 0xf4, 0x85, 0x99, 0xe5, 0x67, 0xcb, 0xb5, 0xd7, 0x3a, 0x3c, 0x94, 0xc8, 0x74, - 0x27, 0x4d, 0x00, 0xd1, 0xe3, 0x4c, 0x88, 0x3e, 0x0d, 0xfc, 0xcb, 0xcd, 0x75, 0xa0, 0xb1, 0xc4, - 0x31, 0x58, 0xf5, 0xb7, 0x50, 0x49, 0x65, 0x65, 0xae, 0x82, 0x09, 0xdc, 0x4f, 0xeb, 0xa5, 0x06, - 0x68, 0x19, 0x66, 0x8e, 0x09, 0x0f, 0x48, 0x3f, 0xd9, 0x42, 0x32, 0x42, 0x08, 0x4a, 0x71, 0x44, - 0x78, 0x52, 0x32, 0xf5, 0x8c, 0x36, 0xa0, 0x1c, 0x12, 0xee, 0x4a, 0xd3, 0x96, 0xd6, 0x8b, 0xdb, - 0xa5, 0x26, 0x8c, 0xce, 0x6b, 0x33, 0x2d, 0xc2, 0xa5, 0x29, 0x67, 0x42, 0xc2, 0xf7, 0xc2, 0xb8, - 0x3e, 0x84, 0x4a, 0x9a, 0x8a, 0x2c, 0x5c, 0x48, 0x38, 0x65, 0xdd, 0x28, 0x2d, 0x5c, 0x32, 0x44, - 0x3b, 0xb0, 0x98, 0xa4, 0x49, 0xba, 0x6e, 0x8a, 0xd1, 0x19, 0x2c, 0x64, 0x0b, 0xad, 0x04, 0xbc, - 0x05, 0xf3, 0x39, 0x58, 0xd0, 0x01, 0x49, 0xb2, 0x9a, 0xcb, 0x66, 0x0f, 0xe8, 0x80, 0xd4, 0xff, - 0xac, 0x02, 0xe4, 0x56, 0x97, 0xfb, 0xf5, 0xb0, 0xd7, 0xcb, 0xfc, 0xa1, 0x06, 0x68, 0x05, 0x8a, - 0x3c, 0x4a, 0x42, 0xe9, 0x13, 0xe5, 0xb4, 0xdb, 0x8e, 0x9c, 0x43, 0xff, 0x82, 0x0a, 0x8f, 0x22, - 0x57, 0x1e, 0x6b, 0x1d, 0xa0, 0x59, 0x1d, 0x9d, 0xd7, 0xca, 0x4e, 0xbb, 0x2d, 0x6d, 0xe7, 0x94, - 0x79, 0x14, 0xc9, 0x07, 0x54, 0x83, 0xea, 0x00, 0x87, 0x21, 0xe9, 0xba, 0x6f, 0x68, 0x5f, 0x3b, - 0xa7, 0xe4, 0x80, 0x9e, 0x7a, 0x46, 0xfb, 0xaa, 0xd2, 0x5d, 0xca, 0xc5, 0x99, 0x3a, 0x5c, 0x25, - 0x47, 0x0f, 0xd0, 0x2d, 0xb0, 0x4e, 0x39, 0x15, 0xa4, 0x83, 0xbd, 0x63, 0x75, 0x78, 0x4a, 0x4e, - 0x3e, 0x81, 0x6c, 0xa8, 0x84, 0xbe, 0x1b, 0xfa, 0x2e, 0x0d, 0xec, 0xb2, 0x7e, 0x13, 0xa1, 0xdf, - 0xf2, 0x5f, 0x04, 0x68, 0x15, 0x2c, 0xbd, 0xc2, 0x62, 0x61, 0x57, 0x92, 0x32, 0xfa, 0x2d, 0xff, - 0x65, 0x2c, 0xd0, 0x8a, 0x62, 0xbd, 0xc1, 0x71, 0x5f, 0xd8, 0x56, 0xba, 0xf4, 0x4c, 0x0e, 0xd1, - 0x3a, 0xcc, 0x86, 0xbe, 0x3b, 0xc0, 0x47, 0xc9, 0x32, 0xe8, 0x34, 0x43, 0x7f, 0x1f, 0x1f, 0x69, - 0xc4, 0x06, 0xcc, 0xd1, 0x00, 0x7b, 0x82, 0x9e, 0x10, 0x17, 0x07, 0x2c, 0xb0, 0xab, 0x0a, 0x32, - 0x9b, 0x4e, 0x3e, 0x0e, 0x58, 0x20, 0x37, 0x6b, 0x42, 0x66, 0xb5, 0x8a, 0x01, 0x30, 0x55, 0x54, - 0x3d, 0xe6, 0xc6, 0x55, 0x54, 0x45, 0x72, 0x15, 0x05, 0x99, 0x37, 0x55, 0x14, 0x60, 0x1d, 0xaa, - 0x71, 0x40, 0x4e, 0xa8, 0x27, 0x70, 0xa7, 0x4f, 0xec, 0xab, 0x0a, 0x60, 0x4e, 0xa1, 0x07, 0xb0, - 0xd2, 0xa3, 0x84, 0x63, 0xee, 0xf5, 0xa8, 0x87, 0xfb, 0xae, 0xfe, 0x90, 0xb9, 0xfa, 0xf8, 0x2d, - 0x28, 0xfc, 0x0d, 0x13, 0xa0, 0x9d, 0xf0, 0x8d, 0x5c, 0x46, 0xf7, 0x60, 0x6c, 0xc9, 0x8d, 0x4e, - 0x71, 0x98, 0x30, 0x17, 0x15, 0xf3, 0xba, 0xb9, 0xdc, 0x3e, 0xc5, 0xa1, 0xe6, 0xd5, 0xa0, 0xaa, - 0x4e, 0x89, 0xab, 0x8d, 0x84, 0x74, 0xda, 0x6a, 0x6a, 0x4f, 0xb9, 0xe9, 0x3f, 0x60, 0x69, 0x80, - 0xf4, 0xd4, 0x92, 0xf2, 0xcc, 0xec, 0xe8, 0xbc, 0x56, 0x39, 0x90, 0x93, 0xd2, 0x58, 0x15, 0xb5, - 0xec, 0x44, 0x11, 0xba, 0x07, 0xf3, 0x19, 0x54, 0x7b, 0xec, 0x9a, 0xc2, 0x2f, 0x8c, 0xce, 0x6b, - 0xb3, 0x29, 0x5e, 0x19, 0x6d, 0x36, 0xe5, 0x28, 0xb7, 0xfd, 0x17, 0x16, 0x35, 0xcf, 0xf4, 0xdc, - 0x75, 0x95, 0xc9, 0x55, 0xb5, 0xb0, 0x9f, 0x1b, 0x2f, 0xcb, 0x57, 0xdb, 0x6f, 0xd9, 0xc8, 0xf7, - 0x89, 0xf2, 0xe0, 0xbf, 0x41, 0x73, 0xdc, 0xdc, 0x89, 0x37, 0x14, 0x48, 0xe7, 0xf6, 0x3a, 0xb3, - 0xe3, 0x46, 0x9a, 0x6d, 0x66, 0x4a, 0x5b, 0xbf, 0x12, 0x35, 0xdb, 0xd2, 0xce, 0xdc, 0x4a, 0xd5, - 0x72, 0x7f, 0xae, 0xe8, 0x97, 0x9f, 0xa1, 0xa4, 0x49, 0x37, 0x0d, 0x2d, 0xed, 0xc5, 0xd5, 0x31, - 0x94, 0x76, 0xe3, 0x0e, 0xa0, 0x0c, 0x95, 0xbb, 0xf6, 0xa6, 0xb1, 0xd1, 0x56, 0x6e, 0xdd, 0x06, - 0x2c, 0x69, 0xf0, 0xb8, 0x81, 0x6f, 0x29, 0xb4, 0xae, 0xd7, 0x0b, 0xd3, 0xc5, 0x59, 0x11, 0x4d, - 0xf4, 0x6d, 0x43, 0xfb, 0x71, 0x8e, 0xfd, 0x58, 0x5b, 0x95, 0x7c, 0xed, 0x13, 0xda, 0xaa, 0xe8, - 0x1f, 0x6a, 0x2b, 0x74, 0xed, 0x23, 0x6d, 0x85, 0xdd, 0x49, 0xb1, 0xa6, 0xd9, 0xd7, 0x93, 0xcf, - 0x9e, 0x5c, 0x38, 0x34, 0x1c, 0xff, 0x65, 0x7a, 0x75, 0xdc, 0x51, 0xdf, 0xfe, 0xad, 0xcb, 0x2e, - 0xf8, 0xa7, 0x81, 0xe0, 0x67, 0xe9, 0xed, 0x71, 0x1f, 0x4a, 0xd2, 0xe5, 0x76, 0x7d, 0x12, 0xae, - 0xa2, 0xa0, 0x47, 0xd9, 0x95, 0xb0, 0x31, 0x09, 0x39, 0xbd, 0x39, 0xda, 0x00, 0xfa, 0xc9, 0x15, - 0x5e, 0x68, 0x6f, 0x4e, 0x20, 0xd1, 0x9c, 0x1b, 0x9d, 0xd7, 0xac, 0xaf, 0x15, 0xf9, 0x60, 0xaf, - 0xe5, 0x58, 0x5a, 0xe7, 0xc0, 0x0b, 0xeb, 0x04, 0xaa, 0x06, 0x30, 0xbf, 0x77, 0x0b, 0xc6, 0xbd, - 0x9b, 0x77, 0x04, 0x53, 0x9f, 0xe8, 0x08, 0x8a, 0x9f, 0xec, 0x08, 0x4a, 0x63, 0x1d, 0x41, 0xfd, - 0xf7, 0x69, 0xb0, 0xb2, 0x86, 0x07, 0x61, 0x58, 0xa5, 0xcc, 0x8d, 0x08, 0x3f, 0xa1, 0x1e, 0x71, - 0x3b, 0x67, 0x82, 0x44, 0x2e, 0x27, 0x5e, 0xcc, 0x23, 0x7a, 0x42, 0x92, 0x66, 0x71, 0xf3, 0x92, - 0xce, 0x49, 0xd7, 0xe6, 0x06, 0x65, 0x6d, 0x2d, 0xd3, 0x94, 0x2a, 0x4e, 0x2a, 0x82, 0xbe, 0x83, - 0xeb, 0x79, 0x88, 0xae, 0xa1, 0x3e, 0x35, 0x81, 0xfa, 0x52, 0xa6, 0xde, 0xcd, 0x95, 0x0f, 0x60, - 0x89, 0x32, 0xf7, 0x6d, 0x4c, 0xe2, 0x31, 0xdd, 0xe2, 0x04, 0xba, 0x8b, 0x94, 0xbd, 0x52, 0xfc, - 0x5c, 0xd5, 0x85, 0x15, 0xa3, 0x24, 0xf2, 0x2e, 0x36, 0xb4, 0x4b, 0x13, 0x68, 0x2f, 0x67, 0x39, - 0xcb, 0xbb, 0x3b, 0x0f, 0xf0, 0x3d, 0x2c, 0x53, 0xe6, 0x9e, 0x62, 0x2a, 0x3e, 0x54, 0x9f, 0x9e, - 0xac, 0x22, 0xaf, 0x31, 0x15, 0xe3, 0xd2, 0xba, 0x22, 0x03, 0xc2, 0xfd, 0xb1, 0x8a, 0xcc, 0x4c, - 0x56, 0x91, 0x7d, 0xc5, 0xcf, 0x55, 0x5b, 0xb0, 0x48, 0xd9, 0x87, 0xb9, 0x96, 0x27, 0xd0, 0xbc, - 0x4a, 0xd9, 0x78, 0x9e, 0xaf, 0x60, 0x31, 0x22, 0x9e, 0x60, 0xdc, 0x74, 0x5b, 0x65, 0x02, 0xc5, - 0x85, 0x84, 0x9e, 0x49, 0xd6, 0x4f, 0x00, 0xf2, 0x75, 0x34, 0x0f, 0x53, 0x2c, 0x54, 0x47, 0xc7, - 0x72, 0xa6, 0x58, 0x28, 0x7b, 0xc0, 0xae, 0xfc, 0xec, 0xe8, 0x83, 0x63, 0x39, 0xc9, 0x48, 0x9e, - 0xa7, 0x01, 0x3e, 0x62, 0x69, 0x13, 0xa8, 0x07, 0x6a, 0x96, 0x06, 0x8c, 0x27, 0x67, 0x47, 0x0f, - 0xe4, 0xec, 0x09, 0xee, 0xc7, 0x24, 0xed, 0x79, 0xd4, 0xa0, 0xfe, 0x53, 0x01, 0x2a, 0xe9, 0x6f, - 0x00, 0x7a, 0x64, 0xb6, 0xd1, 0xc5, 0xcf, 0xff, 0x75, 0x48, 0x92, 0xde, 0x4c, 0xd6, 0x6b, 0xdf, - 0xcf, 0x7b, 0xed, 0x7f, 0x4c, 0x4e, 0x1a, 0x72, 0x02, 0x56, 0x36, 0x67, 0xec, 0xb6, 0x30, 0xb6, - 0xdb, 0x1a, 0x54, 0x7b, 0x1e, 0x76, 0x7b, 0x38, 0xe8, 0xf6, 0x89, 0xee, 0x10, 0xe7, 0x1c, 0xe8, - 0x79, 0xf8, 0xb9, 0x9e, 0x49, 0x01, 0xac, 0x73, 0x44, 0x3c, 0x11, 0xa9, 0xa2, 0x68, 0xc0, 0x4b, - 0x3d, 0x53, 0xff, 0x65, 0x0a, 0xaa, 0xc6, 0x9f, 0x8b, 0xec, 0xa1, 0x03, 0x3c, 0x48, 0xe3, 0xa8, - 0x67, 0xd9, 0xb1, 0xf1, 0xa1, 0xfe, 0x96, 0x24, 0x9f, 0xa9, 0x32, 0x1f, 0xaa, 0x8f, 0x02, 0xba, - 0x0d, 0xc0, 0x87, 0x6e, 0x88, 0xbd, 0x63, 0x92, 0xc8, 0x97, 0x1c, 0x8b, 0x0f, 0x5b, 0x7a, 0x02, - 0xdd, 0x04, 0x8b, 0x0f, 0x5d, 0xc2, 0x39, 0xe3, 0x51, 0x52, 0xfb, 0x0a, 0x1f, 0x3e, 0x55, 0xe3, - 0x84, 0xdb, 0xe5, 0x4c, 0xf6, 0x02, 0xc9, 0x3b, 0xb0, 0xf8, 0xf0, 0x89, 0x9e, 0x90, 0x51, 0x45, - 0x1a, 0x55, 0xb7, 0x9e, 0x65, 0x91, 0x47, 0x15, 0x79, 0x54, 0xdd, 0x7a, 0x5a, 0xc2, 0x8c, 0x2a, - 0xb2, 0xa8, 0xba, 0xfb, 0xac, 0x08, 0x23, 0xaa, 0xc8, 0xa3, 0x5a, 0x29, 0x37, 0x89, 0xda, 0xb4, - 0xdf, 0xbd, 0x5f, 0xbb, 0xf2, 0xc7, 0xfb, 0xb5, 0x2b, 0x3f, 0x8e, 0xd6, 0x0a, 0xef, 0x46, 0x6b, - 0x85, 0xdf, 0x46, 0x6b, 0x85, 0xbf, 0x46, 0x6b, 0x85, 0xce, 0x8c, 0xfa, 0x0d, 0xff, 0xff, 0xdf, - 0x01, 0x00, 0x00, 0xff, 0xff, 0x19, 0x9d, 0xe2, 0xd3, 0xe5, 0x0f, 0x00, 0x00, -} diff --git a/vendor/github.com/containerd/cgroups/named.go b/vendor/github.com/containerd/cgroups/named.go deleted file mode 100644 index 06b16c3b..00000000 --- a/vendor/github.com/containerd/cgroups/named.go +++ /dev/null @@ -1,39 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import "path/filepath" - -func NewNamed(root string, name Name) *namedController { - return &namedController{ - root: root, - name: name, - } -} - -type namedController struct { - root string - name Name -} - -func (n *namedController) Name() Name { - return n.name -} - -func (n *namedController) Path(path string) string { - return filepath.Join(n.root, string(n.name), path) -} diff --git a/vendor/github.com/containerd/cgroups/net_cls.go b/vendor/github.com/containerd/cgroups/net_cls.go deleted file mode 100644 index 8f1a2651..00000000 --- a/vendor/github.com/containerd/cgroups/net_cls.go +++ /dev/null @@ -1,58 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "io/ioutil" - "os" - "path/filepath" - "strconv" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func NewNetCls(root string) *netclsController { - return &netclsController{ - root: filepath.Join(root, string(NetCLS)), - } -} - -type netclsController struct { - root string -} - -func (n *netclsController) Name() Name { - return NetCLS -} - -func (n *netclsController) Path(path string) string { - return filepath.Join(n.root, path) -} - -func (n *netclsController) Create(path string, resources *specs.LinuxResources) error { - if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil { - return err - } - if resources.Network != nil && resources.Network.ClassID != nil && *resources.Network.ClassID > 0 { - return ioutil.WriteFile( - filepath.Join(n.Path(path), "net_cls.classid"), - []byte(strconv.FormatUint(uint64(*resources.Network.ClassID), 10)), - defaultFilePerm, - ) - } - return nil -} diff --git a/vendor/github.com/containerd/cgroups/net_prio.go b/vendor/github.com/containerd/cgroups/net_prio.go deleted file mode 100644 index 612e1bcd..00000000 --- a/vendor/github.com/containerd/cgroups/net_prio.go +++ /dev/null @@ -1,66 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func NewNetPrio(root string) *netprioController { - return &netprioController{ - root: filepath.Join(root, string(NetPrio)), - } -} - -type netprioController struct { - root string -} - -func (n *netprioController) Name() Name { - return NetPrio -} - -func (n *netprioController) Path(path string) string { - return filepath.Join(n.root, path) -} - -func (n *netprioController) Create(path string, resources *specs.LinuxResources) error { - if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil { - return err - } - if resources.Network != nil { - for _, prio := range resources.Network.Priorities { - if err := ioutil.WriteFile( - filepath.Join(n.Path(path), "net_prio.ifpriomap"), - formatPrio(prio.Name, prio.Priority), - defaultFilePerm, - ); err != nil { - return err - } - } - } - return nil -} - -func formatPrio(name string, prio uint32) []byte { - return []byte(fmt.Sprintf("%s %d", name, prio)) -} diff --git a/vendor/github.com/containerd/cgroups/opts.go b/vendor/github.com/containerd/cgroups/opts.go deleted file mode 100644 index 7c5d9fb9..00000000 --- a/vendor/github.com/containerd/cgroups/opts.go +++ /dev/null @@ -1,61 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "github.com/pkg/errors" -) - -var ( - // ErrIgnoreSubsystem allows the specific subsystem to be skipped - ErrIgnoreSubsystem = errors.New("skip subsystem") - // ErrDevicesRequired is returned when the devices subsystem is required but - // does not exist or is not active - ErrDevicesRequired = errors.New("devices subsystem is required") -) - -// InitOpts allows configuration for the creation or loading of a cgroup -type InitOpts func(*InitConfig) error - -// InitConfig provides configuration options for the creation -// or loading of a cgroup and its subsystems -type InitConfig struct { - // InitCheck can be used to check initialization errors from the subsystem - InitCheck InitCheck -} - -func newInitConfig() *InitConfig { - return &InitConfig{ - InitCheck: RequireDevices, - } -} - -// InitCheck allows subsystems errors to be checked when initialized or loaded -type InitCheck func(Subsystem, Path, error) error - -// AllowAny allows any subsystem errors to be skipped -func AllowAny(s Subsystem, p Path, err error) error { - return ErrIgnoreSubsystem -} - -// RequireDevices requires the device subsystem but no others -func RequireDevices(s Subsystem, p Path, err error) error { - if s.Name() == Devices { - return ErrDevicesRequired - } - return ErrIgnoreSubsystem -} diff --git a/vendor/github.com/containerd/cgroups/paths.go b/vendor/github.com/containerd/cgroups/paths.go deleted file mode 100644 index f45fd425..00000000 --- a/vendor/github.com/containerd/cgroups/paths.go +++ /dev/null @@ -1,107 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "fmt" - "path/filepath" - - "github.com/pkg/errors" -) - -type Path func(subsystem Name) (string, error) - -func RootPath(subsysem Name) (string, error) { - return "/", nil -} - -// StaticPath returns a static path to use for all cgroups -func StaticPath(path string) Path { - return func(_ Name) (string, error) { - return path, nil - } -} - -// NestedPath will nest the cgroups based on the calling processes cgroup -// placing its child processes inside its own path -func NestedPath(suffix string) Path { - paths, err := parseCgroupFile("/proc/self/cgroup") - if err != nil { - return errorPath(err) - } - return existingPath(paths, suffix) -} - -// PidPath will return the correct cgroup paths for an existing process running inside a cgroup -// This is commonly used for the Load function to restore an existing container -func PidPath(pid int) Path { - p := fmt.Sprintf("/proc/%d/cgroup", pid) - paths, err := parseCgroupFile(p) - if err != nil { - return errorPath(errors.Wrapf(err, "parse cgroup file %s", p)) - } - return existingPath(paths, "") -} - -// ErrControllerNotActive is returned when a controller is not supported or enabled -var ErrControllerNotActive = errors.New("controller is not supported") - -func existingPath(paths map[string]string, suffix string) Path { - // localize the paths based on the root mount dest for nested cgroups - for n, p := range paths { - dest, err := getCgroupDestination(string(n)) - if err != nil { - return errorPath(err) - } - rel, err := filepath.Rel(dest, p) - if err != nil { - return errorPath(err) - } - if rel == "." { - rel = dest - } - paths[n] = filepath.Join("/", rel) - } - return func(name Name) (string, error) { - root, ok := paths[string(name)] - if !ok { - if root, ok = paths[fmt.Sprintf("name=%s", name)]; !ok { - return "", ErrControllerNotActive - } - } - if suffix != "" { - return filepath.Join(root, suffix), nil - } - return root, nil - } -} - -func subPath(path Path, subName string) Path { - return func(name Name) (string, error) { - p, err := path(name) - if err != nil { - return "", err - } - return filepath.Join(p, subName), nil - } -} - -func errorPath(err error) Path { - return func(_ Name) (string, error) { - return "", err - } -} diff --git a/vendor/github.com/containerd/cgroups/perf_event.go b/vendor/github.com/containerd/cgroups/perf_event.go deleted file mode 100644 index 648786db..00000000 --- a/vendor/github.com/containerd/cgroups/perf_event.go +++ /dev/null @@ -1,37 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import "path/filepath" - -func NewPerfEvent(root string) *PerfEventController { - return &PerfEventController{ - root: filepath.Join(root, string(PerfEvent)), - } -} - -type PerfEventController struct { - root string -} - -func (p *PerfEventController) Name() Name { - return PerfEvent -} - -func (p *PerfEventController) Path(path string) string { - return filepath.Join(p.root, path) -} diff --git a/vendor/github.com/containerd/cgroups/pids.go b/vendor/github.com/containerd/cgroups/pids.go deleted file mode 100644 index a1cfcb88..00000000 --- a/vendor/github.com/containerd/cgroups/pids.go +++ /dev/null @@ -1,85 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -func NewPids(root string) *pidsController { - return &pidsController{ - root: filepath.Join(root, string(Pids)), - } -} - -type pidsController struct { - root string -} - -func (p *pidsController) Name() Name { - return Pids -} - -func (p *pidsController) Path(path string) string { - return filepath.Join(p.root, path) -} - -func (p *pidsController) Create(path string, resources *specs.LinuxResources) error { - if err := os.MkdirAll(p.Path(path), defaultDirPerm); err != nil { - return err - } - if resources.Pids != nil && resources.Pids.Limit > 0 { - return ioutil.WriteFile( - filepath.Join(p.Path(path), "pids.max"), - []byte(strconv.FormatInt(resources.Pids.Limit, 10)), - defaultFilePerm, - ) - } - return nil -} - -func (p *pidsController) Update(path string, resources *specs.LinuxResources) error { - return p.Create(path, resources) -} - -func (p *pidsController) Stat(path string, stats *Metrics) error { - current, err := readUint(filepath.Join(p.Path(path), "pids.current")) - if err != nil { - return err - } - var max uint64 - maxData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "pids.max")) - if err != nil { - return err - } - if maxS := strings.TrimSpace(string(maxData)); maxS != "max" { - if max, err = parseUint(maxS, 10, 64); err != nil { - return err - } - } - stats.Pids = &PidsStat{ - Current: current, - Limit: max, - } - return nil -} diff --git a/vendor/github.com/containerd/cgroups/rdma.go b/vendor/github.com/containerd/cgroups/rdma.go deleted file mode 100644 index 4f423d33..00000000 --- a/vendor/github.com/containerd/cgroups/rdma.go +++ /dev/null @@ -1,153 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "io/ioutil" - "math" - "os" - "path/filepath" - "strconv" - "strings" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -type rdmaController struct { - root string -} - -func (p *rdmaController) Name() Name { - return Rdma -} - -func (p *rdmaController) Path(path string) string { - return filepath.Join(p.root, path) -} - -func NewRdma(root string) *rdmaController { - return &rdmaController{ - root: filepath.Join(root, string(Rdma)), - } -} - -func createCmdString(device string, limits *specs.LinuxRdma) string { - var cmdString string - - cmdString = device - if limits.HcaHandles != nil { - cmdString = cmdString + " " + "hca_handle=" + strconv.FormatUint(uint64(*limits.HcaHandles), 10) - } - - if limits.HcaObjects != nil { - cmdString = cmdString + " " + "hca_object=" + strconv.FormatUint(uint64(*limits.HcaObjects), 10) - } - return cmdString -} - -func (p *rdmaController) Create(path string, resources *specs.LinuxResources) error { - if err := os.MkdirAll(p.Path(path), defaultDirPerm); err != nil { - return err - } - - for device, limit := range resources.Rdma { - if device != "" && (limit.HcaHandles != nil || limit.HcaObjects != nil) { - return ioutil.WriteFile( - filepath.Join(p.Path(path), "rdma.max"), - []byte(createCmdString(device, &limit)), - defaultFilePerm, - ) - } - } - return nil -} - -func (p *rdmaController) Update(path string, resources *specs.LinuxResources) error { - return p.Create(path, resources) -} - -func parseRdmaKV(raw string, entry *RdmaEntry) { - var value uint64 - var err error - - parts := strings.Split(raw, "=") - switch len(parts) { - case 2: - if parts[1] == "max" { - value = math.MaxUint32 - } else { - value, err = parseUint(parts[1], 10, 32) - if err != nil { - return - } - } - if parts[0] == "hca_handle" { - entry.HcaHandles = uint32(value) - } else if parts[0] == "hca_object" { - entry.HcaObjects = uint32(value) - } - } -} - -func toRdmaEntry(strEntries []string) []*RdmaEntry { - var rdmaEntries []*RdmaEntry - for i := range strEntries { - parts := strings.Fields(strEntries[i]) - switch len(parts) { - case 3: - entry := new(RdmaEntry) - entry.Device = parts[0] - parseRdmaKV(parts[1], entry) - parseRdmaKV(parts[2], entry) - - rdmaEntries = append(rdmaEntries, entry) - default: - continue - } - } - return rdmaEntries -} - -func (p *rdmaController) Stat(path string, stats *Metrics) error { - - currentData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "rdma.current")) - if err != nil { - return err - } - currentPerDevices := strings.Split(string(currentData), "\n") - - maxData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "rdma.max")) - if err != nil { - return err - } - maxPerDevices := strings.Split(string(maxData), "\n") - - // If device got removed between reading two files, ignore returning - // stats. - if len(currentPerDevices) != len(maxPerDevices) { - return nil - } - - currentEntries := toRdmaEntry(currentPerDevices) - maxEntries := toRdmaEntry(maxPerDevices) - - stats.Rdma = &RdmaStat{ - Current: currentEntries, - Limit: maxEntries, - } - return nil -} diff --git a/vendor/github.com/containerd/cgroups/state.go b/vendor/github.com/containerd/cgroups/state.go deleted file mode 100644 index cfeabbbc..00000000 --- a/vendor/github.com/containerd/cgroups/state.go +++ /dev/null @@ -1,28 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -// State is a type that represents the state of the current cgroup -type State string - -const ( - Unknown State = "" - Thawed State = "thawed" - Frozen State = "frozen" - Freezing State = "freezing" - Deleted State = "deleted" -) diff --git a/vendor/github.com/containerd/cgroups/subsystem.go b/vendor/github.com/containerd/cgroups/subsystem.go deleted file mode 100644 index 23de04d4..00000000 --- a/vendor/github.com/containerd/cgroups/subsystem.go +++ /dev/null @@ -1,112 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "fmt" - - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -// Name is a typed name for a cgroup subsystem -type Name string - -const ( - Devices Name = "devices" - Hugetlb Name = "hugetlb" - Freezer Name = "freezer" - Pids Name = "pids" - NetCLS Name = "net_cls" - NetPrio Name = "net_prio" - PerfEvent Name = "perf_event" - Cpuset Name = "cpuset" - Cpu Name = "cpu" - Cpuacct Name = "cpuacct" - Memory Name = "memory" - Blkio Name = "blkio" - Rdma Name = "rdma" -) - -// Subsystems returns a complete list of the default cgroups -// available on most linux systems -func Subsystems() []Name { - n := []Name{ - Hugetlb, - Freezer, - Pids, - NetCLS, - NetPrio, - PerfEvent, - Cpuset, - Cpu, - Cpuacct, - Memory, - Blkio, - Rdma, - } - if !isUserNS { - n = append(n, Devices) - } - return n -} - -type Subsystem interface { - Name() Name -} - -type pather interface { - Subsystem - Path(path string) string -} - -type creator interface { - Subsystem - Create(path string, resources *specs.LinuxResources) error -} - -type deleter interface { - Subsystem - Delete(path string) error -} - -type stater interface { - Subsystem - Stat(path string, stats *Metrics) error -} - -type updater interface { - Subsystem - Update(path string, resources *specs.LinuxResources) error -} - -// SingleSubsystem returns a single cgroup subsystem within the base Hierarchy -func SingleSubsystem(baseHierarchy Hierarchy, subsystem Name) Hierarchy { - return func() ([]Subsystem, error) { - subsystems, err := baseHierarchy() - if err != nil { - return nil, err - } - for _, s := range subsystems { - if s.Name() == subsystem { - return []Subsystem{ - s, - }, nil - } - } - return nil, fmt.Errorf("unable to find subsystem %s", subsystem) - } -} diff --git a/vendor/github.com/containerd/cgroups/systemd.go b/vendor/github.com/containerd/cgroups/systemd.go deleted file mode 100644 index c5d4e308..00000000 --- a/vendor/github.com/containerd/cgroups/systemd.go +++ /dev/null @@ -1,160 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "fmt" - "path/filepath" - "strings" - "sync" - - systemdDbus "github.com/coreos/go-systemd/dbus" - "github.com/godbus/dbus" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -const ( - SystemdDbus Name = "systemd" - defaultSlice = "system.slice" -) - -var ( - canDelegate bool - once sync.Once -) - -func Systemd() ([]Subsystem, error) { - root, err := v1MountPoint() - if err != nil { - return nil, err - } - defaultSubsystems, err := defaults(root) - if err != nil { - return nil, err - } - s, err := NewSystemd(root) - if err != nil { - return nil, err - } - // make sure the systemd controller is added first - return append([]Subsystem{s}, defaultSubsystems...), nil -} - -func Slice(slice, name string) Path { - if slice == "" { - slice = defaultSlice - } - return func(subsystem Name) (string, error) { - return filepath.Join(slice, name), nil - } -} - -func NewSystemd(root string) (*SystemdController, error) { - return &SystemdController{ - root: root, - }, nil -} - -type SystemdController struct { - mu sync.Mutex - root string -} - -func (s *SystemdController) Name() Name { - return SystemdDbus -} - -func (s *SystemdController) Create(path string, resources *specs.LinuxResources) error { - conn, err := systemdDbus.New() - if err != nil { - return err - } - defer conn.Close() - slice, name := splitName(path) - // We need to see if systemd can handle the delegate property - // Systemd will return an error if it cannot handle delegate regardless - // of its bool setting. - checkDelegate := func() { - canDelegate = true - dlSlice := newProperty("Delegate", true) - if _, err := conn.StartTransientUnit(slice, "testdelegate", []systemdDbus.Property{dlSlice}, nil); err != nil { - if dbusError, ok := err.(dbus.Error); ok { - // Starting with systemd v237, Delegate is not even a property of slices anymore, - // so the D-Bus call fails with "InvalidArgs" error. - if strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.PropertyReadOnly") || strings.Contains(dbusError.Name, "org.freedesktop.DBus.Error.InvalidArgs") { - canDelegate = false - } - } - } - - conn.StopUnit(slice, "testDelegate", nil) - } - once.Do(checkDelegate) - properties := []systemdDbus.Property{ - systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)), - systemdDbus.PropWants(slice), - newProperty("DefaultDependencies", false), - newProperty("MemoryAccounting", true), - newProperty("CPUAccounting", true), - newProperty("BlockIOAccounting", true), - } - - // If we can delegate, we add the property back in - if canDelegate { - properties = append(properties, newProperty("Delegate", true)) - } - - ch := make(chan string) - _, err = conn.StartTransientUnit(name, "replace", properties, ch) - if err != nil { - return err - } - <-ch - return nil -} - -func (s *SystemdController) Delete(path string) error { - conn, err := systemdDbus.New() - if err != nil { - return err - } - defer conn.Close() - _, name := splitName(path) - ch := make(chan string) - _, err = conn.StopUnit(name, "replace", ch) - if err != nil { - return err - } - <-ch - return nil -} - -func newProperty(name string, units interface{}) systemdDbus.Property { - return systemdDbus.Property{ - Name: name, - Value: dbus.MakeVariant(units), - } -} - -func unitName(name string) string { - return fmt.Sprintf("%s.slice", name) -} - -func splitName(path string) (slice string, unit string) { - slice, unit = filepath.Split(path) - return strings.TrimSuffix(slice, "/"), unit -} diff --git a/vendor/github.com/containerd/cgroups/ticks.go b/vendor/github.com/containerd/cgroups/ticks.go deleted file mode 100644 index 84dc38d0..00000000 --- a/vendor/github.com/containerd/cgroups/ticks.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -func getClockTicks() uint64 { - // The value comes from `C.sysconf(C._SC_CLK_TCK)`, and - // on Linux it's a constant which is safe to be hard coded, - // so we can avoid using cgo here. - // See https://github.com/containerd/cgroups/pull/12 for - // more details. - return 100 -} diff --git a/vendor/github.com/containerd/cgroups/utils.go b/vendor/github.com/containerd/cgroups/utils.go deleted file mode 100644 index 8a97d04d..00000000 --- a/vendor/github.com/containerd/cgroups/utils.go +++ /dev/null @@ -1,324 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - units "github.com/docker/go-units" - specs "github.com/opencontainers/runtime-spec/specs-go" -) - -var isUserNS = runningInUserNS() - -// runningInUserNS detects whether we are currently running in a user namespace. -// Copied from github.com/lxc/lxd/shared/util.go -func runningInUserNS() bool { - file, err := os.Open("/proc/self/uid_map") - if err != nil { - // This kernel-provided file only exists if user namespaces are supported - return false - } - defer file.Close() - - buf := bufio.NewReader(file) - l, _, err := buf.ReadLine() - if err != nil { - return false - } - - line := string(l) - var a, b, c int64 - fmt.Sscanf(line, "%d %d %d", &a, &b, &c) - /* - * We assume we are in the initial user namespace if we have a full - * range - 4294967295 uids starting at uid 0. - */ - if a == 0 && b == 0 && c == 4294967295 { - return false - } - return true -} - -// defaults returns all known groups -func defaults(root string) ([]Subsystem, error) { - h, err := NewHugetlb(root) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - s := []Subsystem{ - NewNamed(root, "systemd"), - NewFreezer(root), - NewPids(root), - NewNetCls(root), - NewNetPrio(root), - NewPerfEvent(root), - NewCputset(root), - NewCpu(root), - NewCpuacct(root), - NewMemory(root), - NewBlkio(root), - NewRdma(root), - } - // only add the devices cgroup if we are not in a user namespace - // because modifications are not allowed - if !isUserNS { - s = append(s, NewDevices(root)) - } - // add the hugetlb cgroup if error wasn't due to missing hugetlb - // cgroup support on the host - if err == nil { - s = append(s, h) - } - return s, nil -} - -// remove will remove a cgroup path handling EAGAIN and EBUSY errors and -// retrying the remove after a exp timeout -func remove(path string) error { - delay := 10 * time.Millisecond - for i := 0; i < 5; i++ { - if i != 0 { - time.Sleep(delay) - delay *= 2 - } - if err := os.RemoveAll(path); err == nil { - return nil - } - } - return fmt.Errorf("cgroups: unable to remove path %q", path) -} - -// readPids will read all the pids of processes in a cgroup by the provided path -func readPids(path string, subsystem Name) ([]Process, error) { - f, err := os.Open(filepath.Join(path, cgroupProcs)) - if err != nil { - return nil, err - } - defer f.Close() - var ( - out []Process - s = bufio.NewScanner(f) - ) - for s.Scan() { - if t := s.Text(); t != "" { - pid, err := strconv.Atoi(t) - if err != nil { - return nil, err - } - out = append(out, Process{ - Pid: pid, - Subsystem: subsystem, - Path: path, - }) - } - } - return out, nil -} - -// readTasksPids will read all the pids of tasks in a cgroup by the provided path -func readTasksPids(path string, subsystem Name) ([]Task, error) { - f, err := os.Open(filepath.Join(path, cgroupTasks)) - if err != nil { - return nil, err - } - defer f.Close() - var ( - out []Task - s = bufio.NewScanner(f) - ) - for s.Scan() { - if t := s.Text(); t != "" { - pid, err := strconv.Atoi(t) - if err != nil { - return nil, err - } - out = append(out, Task{ - Pid: pid, - Subsystem: subsystem, - Path: path, - }) - } - } - return out, nil -} - -func hugePageSizes() ([]string, error) { - var ( - pageSizes []string - sizeList = []string{"B", "KB", "MB", "GB", "TB", "PB"} - ) - files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages") - if err != nil { - return nil, err - } - for _, st := range files { - nameArray := strings.Split(st.Name(), "-") - pageSize, err := units.RAMInBytes(nameArray[1]) - if err != nil { - return nil, err - } - pageSizes = append(pageSizes, units.CustomSize("%g%s", float64(pageSize), 1024.0, sizeList)) - } - return pageSizes, nil -} - -func readUint(path string) (uint64, error) { - v, err := ioutil.ReadFile(path) - if err != nil { - return 0, err - } - return parseUint(strings.TrimSpace(string(v)), 10, 64) -} - -func parseUint(s string, base, bitSize int) (uint64, error) { - v, err := strconv.ParseUint(s, base, bitSize) - if err != nil { - intValue, intErr := strconv.ParseInt(s, base, bitSize) - // 1. Handle negative values greater than MinInt64 (and) - // 2. Handle negative values lesser than MinInt64 - if intErr == nil && intValue < 0 { - return 0, nil - } else if intErr != nil && - intErr.(*strconv.NumError).Err == strconv.ErrRange && - intValue < 0 { - return 0, nil - } - return 0, err - } - return v, nil -} - -func parseKV(raw string) (string, uint64, error) { - parts := strings.Fields(raw) - switch len(parts) { - case 2: - v, err := parseUint(parts[1], 10, 64) - if err != nil { - return "", 0, err - } - return parts[0], v, nil - default: - return "", 0, ErrInvalidFormat - } -} - -func parseCgroupFile(path string) (map[string]string, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - return parseCgroupFromReader(f) -} - -func parseCgroupFromReader(r io.Reader) (map[string]string, error) { - var ( - cgroups = make(map[string]string) - s = bufio.NewScanner(r) - ) - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - var ( - text = s.Text() - parts = strings.SplitN(text, ":", 3) - ) - if len(parts) < 3 { - return nil, fmt.Errorf("invalid cgroup entry: %q", text) - } - for _, subs := range strings.Split(parts[1], ",") { - if subs != "" { - cgroups[subs] = parts[2] - } - } - } - return cgroups, nil -} - -func getCgroupDestination(subsystem string) (string, error) { - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return "", err - } - defer f.Close() - s := bufio.NewScanner(f) - for s.Scan() { - if err := s.Err(); err != nil { - return "", err - } - fields := strings.Fields(s.Text()) - for _, opt := range strings.Split(fields[len(fields)-1], ",") { - if opt == subsystem { - return fields[3], nil - } - } - } - return "", ErrNoCgroupMountDestination -} - -func pathers(subystems []Subsystem) []pather { - var out []pather - for _, s := range subystems { - if p, ok := s.(pather); ok { - out = append(out, p) - } - } - return out -} - -func initializeSubsystem(s Subsystem, path Path, resources *specs.LinuxResources) error { - if c, ok := s.(creator); ok { - p, err := path(s.Name()) - if err != nil { - return err - } - if err := c.Create(p, resources); err != nil { - return err - } - } else if c, ok := s.(pather); ok { - p, err := path(s.Name()) - if err != nil { - return err - } - // do the default create if the group does not have a custom one - if err := os.MkdirAll(c.Path(p), defaultDirPerm); err != nil { - return err - } - } - return nil -} - -func cleanPath(path string) string { - if path == "" { - return "" - } - path = filepath.Clean(path) - if !filepath.IsAbs(path) { - path, _ = filepath.Rel(string(os.PathSeparator), filepath.Clean(string(os.PathSeparator)+path)) - } - return filepath.Clean(path) -} diff --git a/vendor/github.com/containerd/cgroups/v1.go b/vendor/github.com/containerd/cgroups/v1.go deleted file mode 100644 index a076d469..00000000 --- a/vendor/github.com/containerd/cgroups/v1.go +++ /dev/null @@ -1,81 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package cgroups - -import ( - "bufio" - "fmt" - "os" - "path/filepath" - "strings" -) - -// V1 returns all the groups in the default cgroups mountpoint in a single hierarchy -func V1() ([]Subsystem, error) { - root, err := v1MountPoint() - if err != nil { - return nil, err - } - subsystems, err := defaults(root) - if err != nil { - return nil, err - } - var enabled []Subsystem - for _, s := range pathers(subsystems) { - // check and remove the default groups that do not exist - if _, err := os.Lstat(s.Path("/")); err == nil { - enabled = append(enabled, s) - } - } - return enabled, nil -} - -// v1MountPoint returns the mount point where the cgroup -// mountpoints are mounted in a single hiearchy -func v1MountPoint() (string, error) { - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return "", err - } - defer f.Close() - scanner := bufio.NewScanner(f) - for scanner.Scan() { - if err := scanner.Err(); err != nil { - return "", err - } - var ( - text = scanner.Text() - fields = strings.Split(text, " ") - // safe as mountinfo encodes mountpoints with spaces as \040. - index = strings.Index(text, " - ") - postSeparatorFields = strings.Fields(text[index+3:]) - numPostFields = len(postSeparatorFields) - ) - // this is an error as we can't detect if the mount is for "cgroup" - if numPostFields == 0 { - return "", fmt.Errorf("Found no fields post '-' in %q", text) - } - if postSeparatorFields[0] == "cgroup" { - // check that the mount is properly formated. - if numPostFields < 3 { - return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text) - } - return filepath.Dir(fields[4]), nil - } - } - return "", ErrMountPointNotExist -} diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go index 9ada8734..6c792000 100644 --- a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go @@ -9,6 +9,7 @@ import ( types "github.com/containerd/containerd/api/types" proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + types1 "github.com/gogo/protobuf/types" grpc "google.golang.org/grpc" io "io" math "math" @@ -29,11 +30,12 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type ApplyRequest struct { // Diff is the descriptor of the diff to be extracted - Diff *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"` - Mounts []*types.Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Diff *types.Descriptor `protobuf:"bytes,1,opt,name=diff,proto3" json:"diff,omitempty"` + Mounts []*types.Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` + Payloads map[string]*types1.Any `protobuf:"bytes,3,rep,name=payloads,proto3" json:"payloads,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ApplyRequest) Reset() { *m = ApplyRequest{} } @@ -205,6 +207,7 @@ var xxx_messageInfo_DiffResponse proto.InternalMessageInfo func init() { proto.RegisterType((*ApplyRequest)(nil), "containerd.services.diff.v1.ApplyRequest") + proto.RegisterMapType((map[string]*types1.Any)(nil), "containerd.services.diff.v1.ApplyRequest.PayloadsEntry") proto.RegisterType((*ApplyResponse)(nil), "containerd.services.diff.v1.ApplyResponse") proto.RegisterType((*DiffRequest)(nil), "containerd.services.diff.v1.DiffRequest") proto.RegisterMapType((map[string]string)(nil), "containerd.services.diff.v1.DiffRequest.LabelsEntry") @@ -216,36 +219,40 @@ func init() { } var fileDescriptor_3b36a99e6faaa935 = []byte{ - // 457 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x30, - 0x14, 0xaf, 0xfb, 0x0f, 0xf5, 0x75, 0x48, 0xc8, 0x9a, 0x44, 0x14, 0x20, 0xaa, 0x7a, 0xea, 0x40, - 0x38, 0xac, 0xa0, 0x09, 0xb6, 0xcb, 0x40, 0x43, 0x5c, 0xc6, 0x25, 0xda, 0x01, 0x81, 0x04, 0x4a, - 0x9b, 0x97, 0xce, 0x22, 0x8d, 0xbd, 0xd8, 0xad, 0x94, 0x1b, 0xdf, 0x85, 0x8f, 0xc2, 0x65, 0x47, - 0x8e, 0x1c, 0x69, 0x3f, 0x09, 0xb2, 0x93, 0x40, 0x24, 0xa4, 0x12, 0x76, 0xca, 0xcb, 0xf3, 0xef, - 0x9f, 0xfd, 0x6c, 0x38, 0x5d, 0x70, 0x7d, 0xb9, 0x9a, 0xb1, 0xb9, 0x58, 0xfa, 0x73, 0x91, 0xea, - 0x90, 0xa7, 0x98, 0x45, 0xf5, 0x32, 0x94, 0xdc, 0x57, 0x98, 0xad, 0xf9, 0x1c, 0x95, 0x1f, 0xf1, - 0x38, 0xf6, 0xd7, 0x87, 0xf6, 0xcb, 0x64, 0x26, 0xb4, 0xa0, 0xf7, 0xfe, 0x60, 0x59, 0x85, 0x63, - 0x76, 0x7d, 0x7d, 0xe8, 0xee, 0x2f, 0xc4, 0x42, 0x58, 0x9c, 0x6f, 0xaa, 0x82, 0xe2, 0x1e, 0x35, - 0x32, 0xd5, 0xb9, 0x44, 0xe5, 0x2f, 0xc5, 0x2a, 0xd5, 0x25, 0xef, 0xe4, 0x3f, 0x78, 0x11, 0xaa, - 0x79, 0xc6, 0xa5, 0x16, 0x59, 0x41, 0x1e, 0x5f, 0xc1, 0xde, 0x4b, 0x29, 0x93, 0x3c, 0xc0, 0xab, - 0x15, 0x2a, 0x4d, 0x9f, 0x40, 0xd7, 0xa4, 0x74, 0xc8, 0x88, 0x4c, 0x86, 0xd3, 0xfb, 0xac, 0xb6, - 0x0d, 0xab, 0xc0, 0xce, 0x7e, 0x2b, 0x04, 0x16, 0x49, 0x7d, 0xe8, 0xdb, 0x34, 0xca, 0x69, 0x8f, - 0x3a, 0x93, 0xe1, 0xf4, 0xee, 0xdf, 0x9c, 0xb7, 0x66, 0x3d, 0x28, 0x61, 0xe3, 0x37, 0x70, 0xbb, - 0xb4, 0x54, 0x52, 0xa4, 0x0a, 0xe9, 0x11, 0xdc, 0x0a, 0xa5, 0x4c, 0x38, 0x46, 0x8d, 0x6c, 0x2b, - 0xf0, 0xf8, 0x6b, 0x1b, 0x86, 0x67, 0x3c, 0x8e, 0xab, 0xec, 0x8f, 0xa0, 0x9b, 0x60, 0xac, 0x1d, - 0xb2, 0x3b, 0x87, 0x05, 0xd1, 0xc7, 0xd0, 0xcb, 0xf8, 0xe2, 0x52, 0xff, 0x2b, 0x75, 0x81, 0xa2, - 0x0f, 0x00, 0x96, 0x18, 0xf1, 0xf0, 0x93, 0x59, 0x73, 0x3a, 0x23, 0x32, 0x19, 0x04, 0x03, 0xdb, - 0xb9, 0xc8, 0x25, 0xd2, 0x3b, 0xd0, 0xc9, 0x30, 0x76, 0xba, 0xb6, 0x6f, 0x4a, 0x7a, 0x0e, 0xfd, - 0x24, 0x9c, 0x61, 0xa2, 0x9c, 0x9e, 0x35, 0x78, 0xc6, 0x76, 0xdc, 0x08, 0x56, 0xdb, 0x06, 0x3b, - 0xb7, 0xb4, 0xd7, 0xa9, 0xce, 0xf2, 0xa0, 0xd4, 0x70, 0x5f, 0xc0, 0xb0, 0xd6, 0x36, 0x76, 0x9f, - 0x31, 0xb7, 0xa7, 0x35, 0x08, 0x4c, 0x49, 0xf7, 0xa1, 0xb7, 0x0e, 0x93, 0x15, 0x3a, 0x6d, 0xdb, - 0x2b, 0x7e, 0x8e, 0xdb, 0xcf, 0xc9, 0xf8, 0x14, 0xf6, 0x0a, 0xf5, 0xf2, 0xb4, 0xab, 0x09, 0x77, - 0x9a, 0x4e, 0x78, 0xfa, 0x8d, 0x40, 0xd7, 0x48, 0xd0, 0x8f, 0xd0, 0xb3, 0x93, 0xa3, 0x07, 0x3b, - 0x37, 0x53, 0xbf, 0x50, 0xee, 0xc3, 0x26, 0xd0, 0x32, 0xda, 0x87, 0xd2, 0x67, 0xd2, 0xf4, 0xac, - 0xdc, 0x83, 0x06, 0xc8, 0x42, 0xfc, 0xd5, 0xc5, 0xf5, 0xc6, 0x6b, 0xfd, 0xd8, 0x78, 0xad, 0x2f, - 0x5b, 0x8f, 0x5c, 0x6f, 0x3d, 0xf2, 0x7d, 0xeb, 0x91, 0x9f, 0x5b, 0x8f, 0xbc, 0x3f, 0xbe, 0xd1, - 0x6b, 0x3f, 0x31, 0xdf, 0x77, 0xad, 0x59, 0xdf, 0x3e, 0xa4, 0xa7, 0xbf, 0x02, 0x00, 0x00, 0xff, - 0xff, 0x61, 0xd1, 0x6e, 0x9e, 0x34, 0x04, 0x00, 0x00, + // 526 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x41, 0x6f, 0xd3, 0x4c, + 0x10, 0x8d, 0xed, 0x24, 0xdf, 0x97, 0x49, 0x2b, 0xa1, 0x55, 0x24, 0x8c, 0x01, 0xab, 0xca, 0x29, + 0x2d, 0x62, 0x4d, 0x03, 0x2a, 0xd0, 0x5e, 0x5a, 0x54, 0xc4, 0xa5, 0x48, 0x60, 0x7a, 0x40, 0x20, + 0x81, 0x9c, 0x78, 0xed, 0xae, 0x70, 0xbc, 0x8b, 0x77, 0x1d, 0xc9, 0x37, 0xfe, 0x06, 0x67, 0x7e, + 0x0a, 0x97, 0x1e, 0x39, 0x72, 0xa4, 0xf9, 0x25, 0xc8, 0xeb, 0x75, 0x31, 0x02, 0x05, 0xc3, 0xc9, + 0x9b, 0x9d, 0xf7, 0xde, 0xce, 0xbc, 0x37, 0x0a, 0x1c, 0xc6, 0x54, 0x9e, 0xe5, 0x33, 0x3c, 0x67, + 0x0b, 0x6f, 0xce, 0x52, 0x19, 0xd0, 0x94, 0x64, 0x61, 0xf3, 0x18, 0x70, 0xea, 0x09, 0x92, 0x2d, + 0xe9, 0x9c, 0x08, 0x2f, 0xa4, 0x51, 0xe4, 0x2d, 0x77, 0xd5, 0x17, 0xf3, 0x8c, 0x49, 0x86, 0xae, + 0xff, 0xc0, 0xe2, 0x1a, 0x87, 0x55, 0x7d, 0xb9, 0xeb, 0x8c, 0x62, 0x16, 0x33, 0x85, 0xf3, 0xca, + 0x53, 0x45, 0x71, 0xae, 0xc5, 0x8c, 0xc5, 0x09, 0xf1, 0xd4, 0xaf, 0x59, 0x1e, 0x79, 0x41, 0x5a, + 0xe8, 0xd2, 0x5e, 0xab, 0x7e, 0x64, 0xc1, 0x89, 0xf0, 0x16, 0x2c, 0x4f, 0xa5, 0xe6, 0x1d, 0xfc, + 0x05, 0x2f, 0x24, 0x62, 0x9e, 0x51, 0x2e, 0x59, 0x56, 0x91, 0xc7, 0x1f, 0x4d, 0xd8, 0x38, 0xe2, + 0x3c, 0x29, 0x7c, 0xf2, 0x3e, 0x27, 0x42, 0xa2, 0x3b, 0xd0, 0x2d, 0x27, 0xb0, 0x8d, 0x2d, 0x63, + 0x32, 0x9c, 0xde, 0xc0, 0x8d, 0x11, 0x95, 0x04, 0x3e, 0xbe, 0x94, 0xf0, 0x15, 0x12, 0x79, 0xd0, + 0x57, 0xed, 0x08, 0xdb, 0xdc, 0xb2, 0x26, 0xc3, 0xe9, 0xd5, 0x5f, 0x39, 0x4f, 0xcb, 0xba, 0xaf, + 0x61, 0xe8, 0x05, 0xfc, 0xcf, 0x83, 0x22, 0x61, 0x41, 0x28, 0x6c, 0x4b, 0x51, 0xee, 0xe3, 0x35, + 0x4e, 0xe2, 0x66, 0x7f, 0xf8, 0x99, 0x66, 0x3e, 0x4e, 0x65, 0x56, 0xf8, 0x97, 0x42, 0xce, 0x73, + 0xd8, 0xfc, 0xa9, 0x84, 0xae, 0x80, 0xf5, 0x8e, 0x14, 0x6a, 0x8e, 0x81, 0x5f, 0x1e, 0xd1, 0x0e, + 0xf4, 0x96, 0x41, 0x92, 0x13, 0xdb, 0x54, 0xb3, 0x8d, 0x70, 0x95, 0x05, 0xae, 0xb3, 0xc0, 0x47, + 0x69, 0xe1, 0x57, 0x90, 0x7d, 0xf3, 0x81, 0x31, 0x7e, 0x02, 0x9b, 0xfa, 0x69, 0xc1, 0x59, 0x2a, + 0x08, 0xda, 0x83, 0xff, 0x02, 0xce, 0x13, 0x4a, 0xc2, 0x56, 0xf6, 0xd4, 0xe0, 0xf1, 0x27, 0x13, + 0x86, 0xc7, 0x34, 0x8a, 0x6a, 0x8f, 0x6f, 0x41, 0x37, 0x21, 0x91, 0xb4, 0x8d, 0xf5, 0x7e, 0x29, + 0x10, 0xba, 0x0d, 0xbd, 0x8c, 0xc6, 0x67, 0xf2, 0x4f, 0xee, 0x56, 0x28, 0x74, 0x13, 0x60, 0x41, + 0x42, 0x1a, 0xbc, 0x2d, 0x6b, 0xb6, 0xa5, 0xa6, 0x1f, 0xa8, 0x9b, 0xd3, 0x82, 0x93, 0xd2, 0x95, + 0x8c, 0x44, 0x76, 0xb7, 0x72, 0x25, 0x23, 0x11, 0x3a, 0x81, 0x7e, 0x12, 0xcc, 0x48, 0x22, 0xec, + 0x9e, 0x7a, 0xe0, 0xde, 0xda, 0x2c, 0x1a, 0x63, 0xe0, 0x13, 0x45, 0xab, 0x82, 0xd0, 0x1a, 0xce, + 0x43, 0x18, 0x36, 0xae, 0x7f, 0x13, 0xc2, 0xa8, 0x19, 0xc2, 0xa0, 0x69, 0xf7, 0x21, 0x6c, 0x54, + 0xea, 0xda, 0xed, 0x7a, 0x13, 0xad, 0xb6, 0x9b, 0x38, 0xfd, 0x6c, 0x40, 0xb7, 0x94, 0x40, 0x6f, + 0xa0, 0xa7, 0x92, 0x43, 0xdb, 0xad, 0x17, 0xcb, 0xd9, 0x69, 0x03, 0xd5, 0xad, 0xbd, 0xd6, 0xef, + 0x4c, 0xda, 0x7a, 0xe5, 0x6c, 0xb7, 0x40, 0x56, 0xe2, 0x8f, 0x4e, 0xcf, 0x2f, 0xdc, 0xce, 0xd7, + 0x0b, 0xb7, 0xf3, 0x61, 0xe5, 0x1a, 0xe7, 0x2b, 0xd7, 0xf8, 0xb2, 0x72, 0x8d, 0x6f, 0x2b, 0xd7, + 0x78, 0xb5, 0xff, 0x4f, 0xff, 0x58, 0x07, 0xe5, 0xf7, 0x65, 0x67, 0xd6, 0x57, 0x7b, 0x7e, 0xf7, + 0x7b, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x85, 0x25, 0xb8, 0xf8, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -400,6 +407,34 @@ func (m *ApplyRequest) MarshalTo(dAtA []byte) (int, error) { i += n } } + if len(m.Payloads) > 0 { + for k, _ := range m.Payloads { + dAtA[i] = 0x1a + i++ + v := m.Payloads[k] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovDiff(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovDiff(uint64(len(k))) + msgSize + i = encodeVarintDiff(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintDiff(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintDiff(dAtA, i, uint64(v.Size())) + n2, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + } + } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -425,11 +460,11 @@ func (m *ApplyResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintDiff(dAtA, i, uint64(m.Applied.Size())) - n2, err := m.Applied.MarshalTo(dAtA[i:]) + n3, err := m.Applied.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n3 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -530,11 +565,11 @@ func (m *DiffResponse) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintDiff(dAtA, i, uint64(m.Diff.Size())) - n3, err := m.Diff.MarshalTo(dAtA[i:]) + n4, err := m.Diff.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n4 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -567,6 +602,19 @@ func (m *ApplyRequest) Size() (n int) { n += 1 + l + sovDiff(uint64(l)) } } + if len(m.Payloads) > 0 { + for k, v := range m.Payloads { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovDiff(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovDiff(uint64(len(k))) + l + n += mapEntrySize + 1 + sovDiff(uint64(mapEntrySize)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -662,9 +710,20 @@ func (this *ApplyRequest) String() string { if this == nil { return "nil" } + keysForPayloads := make([]string, 0, len(this.Payloads)) + for k, _ := range this.Payloads { + keysForPayloads = append(keysForPayloads, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPayloads) + mapStringForPayloads := "map[string]*types1.Any{" + for _, k := range keysForPayloads { + mapStringForPayloads += fmt.Sprintf("%v: %v,", k, this.Payloads[k]) + } + mapStringForPayloads += "}" s := strings.Join([]string{`&ApplyRequest{`, `Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "types.Descriptor", 1) + `,`, `Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "types.Mount", 1) + `,`, + `Payloads:` + mapStringForPayloads + `,`, `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, `}`, }, "") @@ -824,6 +883,135 @@ func (m *ApplyRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payloads", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDiff + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDiff + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDiff + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Payloads == nil { + m.Payloads = make(map[string]*types1.Any) + } + var mapkey string + var mapvalue *types1.Any + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDiff + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDiff + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthDiff + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthDiff + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDiff + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthDiff + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthDiff + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &types1.Any{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipDiff(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDiff + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Payloads[mapkey] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDiff(dAtA[iNdEx:]) diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go index 016ced4b..a4e23868 100644 --- a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go @@ -10,6 +10,7 @@ import ( rpc "github.com/gogo/googleapis/google/rpc" proto "github.com/gogo/protobuf/proto" github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + types1 "github.com/gogo/protobuf/types" grpc "google.golang.org/grpc" io "io" math "math" @@ -191,11 +192,51 @@ func (m *PluginsResponse) XXX_DiscardUnknown() { var xxx_messageInfo_PluginsResponse proto.InternalMessageInfo +type ServerResponse struct { + UUID string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerResponse) Reset() { *m = ServerResponse{} } +func (*ServerResponse) ProtoMessage() {} +func (*ServerResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1a14fda866f10715, []int{3} +} +func (m *ServerResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ServerResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ServerResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerResponse.Merge(m, src) +} +func (m *ServerResponse) XXX_Size() int { + return m.Size() +} +func (m *ServerResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ServerResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerResponse proto.InternalMessageInfo + func init() { proto.RegisterType((*Plugin)(nil), "containerd.services.introspection.v1.Plugin") proto.RegisterMapType((map[string]string)(nil), "containerd.services.introspection.v1.Plugin.ExportsEntry") proto.RegisterType((*PluginsRequest)(nil), "containerd.services.introspection.v1.PluginsRequest") proto.RegisterType((*PluginsResponse)(nil), "containerd.services.introspection.v1.PluginsResponse") + proto.RegisterType((*ServerResponse)(nil), "containerd.services.introspection.v1.ServerResponse") } func init() { @@ -203,38 +244,42 @@ func init() { } var fileDescriptor_1a14fda866f10715 = []byte{ - // 487 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x4d, 0x6f, 0xd3, 0x40, - 0x10, 0xcd, 0x3a, 0x69, 0xdc, 0x4c, 0xca, 0x87, 0x56, 0x15, 0x58, 0x3e, 0xb8, 0x51, 0xc4, 0x21, - 0x42, 0xb0, 0x56, 0x03, 0x48, 0xb4, 0x48, 0x1c, 0x22, 0x72, 0xa8, 0xd4, 0x43, 0xe5, 0x5e, 0x10, - 0x97, 0xca, 0x71, 0x36, 0x66, 0x85, 0xeb, 0xdd, 0xee, 0xae, 0x2d, 0x72, 0xe3, 0xc6, 0x5f, 0xcb, - 0x91, 0x23, 0xa7, 0x8a, 0xfa, 0x37, 0xf0, 0x03, 0x90, 0xbd, 0x76, 0x9b, 0xdc, 0x12, 0x71, 0x9b, - 0x79, 0x7e, 0x6f, 0xe6, 0xcd, 0x93, 0x17, 0x82, 0x98, 0xe9, 0xaf, 0xd9, 0x8c, 0x44, 0xfc, 0xda, - 0x8f, 0x78, 0xaa, 0x43, 0x96, 0x52, 0x39, 0x5f, 0x2f, 0x43, 0xc1, 0x7c, 0x45, 0x65, 0xce, 0x22, - 0xaa, 0x7c, 0x96, 0x6a, 0xc9, 0x95, 0xa0, 0x91, 0x66, 0x3c, 0xf5, 0xf3, 0xe3, 0x4d, 0x80, 0x08, - 0xc9, 0x35, 0xc7, 0x2f, 0x1e, 0xd4, 0xa4, 0x51, 0x92, 0x4d, 0x62, 0x7e, 0xec, 0x9e, 0x6c, 0xb5, - 0x59, 0x2f, 0x05, 0x55, 0xbe, 0x48, 0x42, 0xbd, 0xe0, 0xf2, 0xda, 0x2c, 0x70, 0x9f, 0xc7, 0x9c, - 0xc7, 0x09, 0xf5, 0xa5, 0x88, 0x7c, 0xa5, 0x43, 0x9d, 0xa9, 0xfa, 0xc3, 0x61, 0xcc, 0x63, 0x5e, - 0x95, 0x7e, 0x59, 0x19, 0x74, 0xf8, 0xd7, 0x82, 0xee, 0x45, 0x92, 0xc5, 0x2c, 0xc5, 0x18, 0x3a, - 0xe5, 0x44, 0x07, 0x0d, 0xd0, 0xa8, 0x17, 0x54, 0x35, 0x7e, 0x06, 0x16, 0x9b, 0x3b, 0x56, 0x89, - 0x4c, 0xba, 0xc5, 0xed, 0x91, 0x75, 0xf6, 0x29, 0xb0, 0xd8, 0x1c, 0xbb, 0xb0, 0x2f, 0xe9, 0x4d, - 0xc6, 0x24, 0x55, 0x4e, 0x7b, 0xd0, 0x1e, 0xf5, 0x82, 0xfb, 0x1e, 0x7f, 0x84, 0x5e, 0xe3, 0x49, - 0x39, 0x9d, 0x41, 0x7b, 0xd4, 0x1f, 0xbb, 0x64, 0xed, 0xec, 0xca, 0x36, 0xb9, 0xa8, 0x29, 0x93, - 0xce, 0xea, 0xf6, 0xa8, 0x15, 0x3c, 0x48, 0xf0, 0x25, 0xd8, 0xf4, 0xbb, 0xe0, 0x52, 0x2b, 0x67, - 0xaf, 0x52, 0x9f, 0x90, 0x6d, 0x42, 0x23, 0xe6, 0x0c, 0x32, 0x35, 0xda, 0x69, 0xaa, 0xe5, 0x32, - 0x68, 0x26, 0xe1, 0x21, 0x1c, 0x44, 0xa1, 0x08, 0x67, 0x2c, 0x61, 0x9a, 0x51, 0xe5, 0x74, 0x2b, - 0xd3, 0x1b, 0x18, 0x7e, 0x0d, 0xfb, 0x2c, 0x65, 0xfa, 0x8a, 0x4a, 0xe9, 0xd8, 0x03, 0x34, 0xea, - 0x8f, 0x31, 0x31, 0x69, 0x12, 0x29, 0x22, 0x72, 0x59, 0xa5, 0x19, 0xd8, 0x25, 0x67, 0x2a, 0xa5, - 0x7b, 0x0a, 0x07, 0xeb, 0xbb, 0xf0, 0x53, 0x68, 0x7f, 0xa3, 0xcb, 0x3a, 0xbe, 0xb2, 0xc4, 0x87, - 0xb0, 0x97, 0x87, 0x49, 0x46, 0x4d, 0x80, 0x81, 0x69, 0x4e, 0xad, 0xf7, 0x68, 0xf8, 0x12, 0x1e, - 0x1b, 0xbb, 0x2a, 0xa0, 0x37, 0x19, 0x55, 0x1a, 0x3b, 0x60, 0x2f, 0x58, 0xa2, 0xa9, 0x54, 0x0e, - 0xaa, 0xbc, 0x35, 0xed, 0xf0, 0x0a, 0x9e, 0xdc, 0x73, 0x95, 0xe0, 0xa9, 0xa2, 0xf8, 0x1c, 0x6c, - 0x61, 0xa0, 0x8a, 0xdc, 0x1f, 0xbf, 0xda, 0x25, 0xa2, 0x3a, 0xf2, 0x66, 0xc4, 0xf8, 0x27, 0x82, - 0x47, 0x67, 0xeb, 0x54, 0x9c, 0x83, 0x5d, 0xaf, 0xc4, 0x6f, 0x77, 0x99, 0xdc, 0x5c, 0xe3, 0xbe, - 0xdb, 0x51, 0x65, 0xee, 0x9a, 0x2c, 0x56, 0x77, 0x5e, 0xeb, 0xf7, 0x9d, 0xd7, 0xfa, 0x51, 0x78, - 0x68, 0x55, 0x78, 0xe8, 0x57, 0xe1, 0xa1, 0x3f, 0x85, 0x87, 0xbe, 0x9c, 0xff, 0xdf, 0x5b, 0xfc, - 0xb0, 0x01, 0x7c, 0xb6, 0x66, 0xdd, 0xea, 0xf7, 0x7f, 0xf3, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe6, - 0x72, 0xde, 0x35, 0xe4, 0x03, 0x00, 0x00, + // 549 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0xad, 0x9d, 0x34, 0x6e, 0x37, 0xa5, 0xa0, 0x55, 0x55, 0x2c, 0x83, 0x9c, 0x28, 0xe2, 0x10, + 0x21, 0x58, 0xab, 0x01, 0x24, 0x5a, 0x24, 0x0e, 0x51, 0x73, 0x88, 0xd4, 0x43, 0xe5, 0xa8, 0x08, + 0x71, 0xa9, 0x1c, 0x67, 0x63, 0x56, 0x38, 0xde, 0xed, 0xee, 0xda, 0x22, 0x37, 0x3e, 0x2f, 0x47, + 0x8e, 0x9c, 0x02, 0xf5, 0x37, 0xf0, 0x01, 0xc8, 0xbb, 0x76, 0x9a, 0xdc, 0x12, 0x71, 0x9b, 0x79, + 0x33, 0x6f, 0xe6, 0xcd, 0xf3, 0xca, 0xc0, 0x8f, 0x88, 0xfc, 0x9a, 0x8e, 0x51, 0x48, 0x67, 0x5e, + 0x48, 0x13, 0x19, 0x90, 0x04, 0xf3, 0xc9, 0x7a, 0x18, 0x30, 0xe2, 0x09, 0xcc, 0x33, 0x12, 0x62, + 0xe1, 0x91, 0x44, 0x72, 0x2a, 0x18, 0x0e, 0x25, 0xa1, 0x89, 0x97, 0x9d, 0x6d, 0x02, 0x88, 0x71, + 0x2a, 0x29, 0x7c, 0xf1, 0xc0, 0x46, 0x15, 0x13, 0x6d, 0x36, 0x66, 0x67, 0xce, 0xf9, 0x56, 0x9b, + 0xe5, 0x9c, 0x61, 0xe1, 0xb1, 0x38, 0x90, 0x53, 0xca, 0x67, 0x7a, 0x81, 0xf3, 0x34, 0xa2, 0x34, + 0x8a, 0xb1, 0xc7, 0x59, 0xe8, 0x09, 0x19, 0xc8, 0x54, 0x94, 0x85, 0x67, 0x65, 0x41, 0x65, 0xe3, + 0x74, 0xea, 0xe1, 0x19, 0x93, 0xf3, 0xb2, 0x78, 0x12, 0xd1, 0x88, 0xaa, 0xd0, 0x2b, 0x22, 0x8d, + 0x76, 0xfe, 0x9a, 0xa0, 0x71, 0x1d, 0xa7, 0x11, 0x49, 0x20, 0x04, 0xf5, 0x62, 0x9d, 0x6d, 0xb4, + 0x8d, 0xee, 0xa1, 0xaf, 0x62, 0x78, 0x0a, 0x4c, 0x32, 0xb1, 0xcd, 0x02, 0xe9, 0x37, 0xf2, 0x65, + 0xcb, 0x1c, 0x5e, 0xfa, 0x26, 0x99, 0x40, 0x07, 0x1c, 0x70, 0x7c, 0x97, 0x12, 0x8e, 0x85, 0x5d, + 0x6b, 0xd7, 0xba, 0x87, 0xfe, 0x2a, 0x87, 0x1f, 0xc1, 0x61, 0x25, 0x58, 0xd8, 0xf5, 0x76, 0xad, + 0xdb, 0xec, 0x39, 0x68, 0xcd, 0x13, 0x75, 0x13, 0xba, 0x2e, 0x5b, 0xfa, 0xf5, 0xc5, 0xb2, 0xb5, + 0xe7, 0x3f, 0x50, 0xe0, 0x08, 0x58, 0xf8, 0x3b, 0xa3, 0x5c, 0x0a, 0x7b, 0x5f, 0xb1, 0xcf, 0xd1, + 0x36, 0x8e, 0x22, 0x7d, 0x06, 0x1a, 0x68, 0xee, 0x20, 0x91, 0x7c, 0xee, 0x57, 0x93, 0x60, 0x07, + 0x1c, 0x85, 0x01, 0x0b, 0xc6, 0x24, 0x26, 0x92, 0x60, 0x61, 0x37, 0x94, 0xe8, 0x0d, 0x0c, 0xbe, + 0x06, 0x07, 0x24, 0x21, 0xf2, 0x16, 0x73, 0x6e, 0x5b, 0x6d, 0xa3, 0xdb, 0xec, 0x41, 0xa4, 0x1d, + 0x45, 0x9c, 0x85, 0x68, 0xa4, 0xac, 0xf6, 0xad, 0xa2, 0x67, 0xc0, 0xb9, 0x73, 0x01, 0x8e, 0xd6, + 0x77, 0xc1, 0x27, 0xa0, 0xf6, 0x0d, 0xcf, 0x4b, 0xfb, 0x8a, 0x10, 0x9e, 0x80, 0xfd, 0x2c, 0x88, + 0x53, 0xac, 0x0d, 0xf4, 0x75, 0x72, 0x61, 0xbe, 0x37, 0x3a, 0x2f, 0xc1, 0xb1, 0x96, 0x2b, 0x7c, + 0x7c, 0x97, 0x62, 0x21, 0xa1, 0x0d, 0xac, 0x29, 0x89, 0x25, 0xe6, 0xc2, 0x36, 0x94, 0xb6, 0x2a, + 0xed, 0xdc, 0x82, 0xc7, 0xab, 0x5e, 0xc1, 0x68, 0x22, 0x30, 0xbc, 0x02, 0x16, 0xd3, 0x90, 0x6a, + 0x6e, 0xf6, 0x5e, 0xed, 0x62, 0x51, 0x69, 0x79, 0x35, 0xa2, 0x83, 0xc0, 0xf1, 0x08, 0xf3, 0x0c, + 0xf3, 0xd5, 0xfc, 0xe7, 0xa0, 0x9e, 0xa6, 0x64, 0xa2, 0x6f, 0xe9, 0x1f, 0xe4, 0xcb, 0x56, 0xfd, + 0xe6, 0x66, 0x78, 0xe9, 0x2b, 0xb4, 0xf7, 0xdb, 0x00, 0x8f, 0x86, 0xeb, 0xa3, 0x61, 0x06, 0xac, + 0x52, 0x22, 0x7c, 0xbb, 0x8b, 0x92, 0xea, 0x7a, 0xe7, 0xdd, 0x8e, 0xac, 0x52, 0xe7, 0x27, 0xd0, + 0xd0, 0xca, 0xe1, 0x69, 0xf5, 0xa5, 0xaa, 0xb7, 0x8f, 0x06, 0xc5, 0xdb, 0x77, 0xb6, 0x94, 0xb3, + 0x79, 0x7f, 0x7f, 0xba, 0xb8, 0x77, 0xf7, 0x7e, 0xdd, 0xbb, 0x7b, 0x3f, 0x72, 0xd7, 0x58, 0xe4, + 0xae, 0xf1, 0x33, 0x77, 0x8d, 0x3f, 0xb9, 0x6b, 0x7c, 0xb9, 0xfa, 0xbf, 0x1f, 0xc6, 0x87, 0x0d, + 0xe0, 0x73, 0x6d, 0xdc, 0x50, 0x7a, 0xdf, 0xfc, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x0c, 0xb3, 0x50, + 0xdc, 0x89, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -254,6 +299,8 @@ type IntrospectionClient interface { // Clients can use this to detect features and capabilities when using // containerd. Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error) + // Server returns information about the containerd server + Server(ctx context.Context, in *types1.Empty, opts ...grpc.CallOption) (*ServerResponse, error) } type introspectionClient struct { @@ -273,6 +320,15 @@ func (c *introspectionClient) Plugins(ctx context.Context, in *PluginsRequest, o return out, nil } +func (c *introspectionClient) Server(ctx context.Context, in *types1.Empty, opts ...grpc.CallOption) (*ServerResponse, error) { + out := new(ServerResponse) + err := c.cc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Server", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // IntrospectionServer is the server API for Introspection service. type IntrospectionServer interface { // Plugins returns a list of plugins in containerd. @@ -280,6 +336,8 @@ type IntrospectionServer interface { // Clients can use this to detect features and capabilities when using // containerd. Plugins(context.Context, *PluginsRequest) (*PluginsResponse, error) + // Server returns information about the containerd server + Server(context.Context, *types1.Empty) (*ServerResponse, error) } func RegisterIntrospectionServer(s *grpc.Server, srv IntrospectionServer) { @@ -304,6 +362,24 @@ func _Introspection_Plugins_Handler(srv interface{}, ctx context.Context, dec fu return interceptor(ctx, in, info, handler) } +func _Introspection_Server_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(types1.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(IntrospectionServer).Server(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/containerd.services.introspection.v1.Introspection/Server", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(IntrospectionServer).Server(ctx, req.(*types1.Empty)) + } + return interceptor(ctx, in, info, handler) +} + var _Introspection_serviceDesc = grpc.ServiceDesc{ ServiceName: "containerd.services.introspection.v1.Introspection", HandlerType: (*IntrospectionServer)(nil), @@ -312,6 +388,10 @@ var _Introspection_serviceDesc = grpc.ServiceDesc{ MethodName: "Plugins", Handler: _Introspection_Plugins_Handler, }, + { + MethodName: "Server", + Handler: _Introspection_Server_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "github.com/containerd/containerd/api/services/introspection/v1/introspection.proto", @@ -488,6 +568,33 @@ func (m *PluginsResponse) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ServerResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServerResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.UUID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintIntrospection(dAtA, i, uint64(len(m.UUID))) + i += copy(dAtA[i:], m.UUID) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) @@ -583,6 +690,22 @@ func (m *PluginsResponse) Size() (n int) { return n } +func (m *ServerResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UUID) + if l > 0 { + n += 1 + l + sovIntrospection(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovIntrospection(x uint64) (n int) { for { n++ @@ -645,6 +768,17 @@ func (this *PluginsResponse) String() string { }, "") return s } +func (this *ServerResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServerResponse{`, + `UUID:` + fmt.Sprintf("%v", this.UUID) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} func valueToStringIntrospection(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -1206,6 +1340,92 @@ func (m *PluginsResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *ServerResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIntrospection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServerResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServerResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowIntrospection + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthIntrospection + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthIntrospection + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipIntrospection(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthIntrospection + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthIntrospection + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipIntrospection(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/containerd/containerd/archive/compression/compression.go b/vendor/github.com/containerd/containerd/archive/compression/compression.go index 60c80e98..2338de6b 100644 --- a/vendor/github.com/containerd/containerd/archive/compression/compression.go +++ b/vendor/github.com/containerd/containerd/archive/compression/compression.go @@ -180,7 +180,7 @@ func DecompressStream(archive io.Reader) (DecompressReadCloser, error) { } } -// CompressStream compresseses the dest with specified compression algorithm. +// CompressStream compresses the dest with specified compression algorithm. func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { switch compression { case Uncompressed: diff --git a/vendor/github.com/containerd/containerd/archive/tar.go b/vendor/github.com/containerd/containerd/archive/tar.go index fae023c5..7ec46575 100644 --- a/vendor/github.com/containerd/containerd/archive/tar.go +++ b/vendor/github.com/containerd/containerd/archive/tar.go @@ -19,9 +19,7 @@ package archive import ( "archive/tar" "context" - "fmt" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -91,11 +89,6 @@ const ( // archives. whiteoutMetaPrefix = whiteoutPrefix + whiteoutPrefix - // whiteoutLinkDir is a directory AUFS uses for storing hardlink links to other - // layers. Normally these should not go into exported archives and all changed - // hardlinks should be copied to the top layer. - whiteoutLinkDir = whiteoutMetaPrefix + "plnk" - // whiteoutOpaqueDir file means directory has been made opaque - meaning // readdir calls to this directory do not follow to lower layers. whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq" @@ -117,11 +110,15 @@ func Apply(ctx context.Context, root string, r io.Reader, opts ...ApplyOpt) (int if options.Filter == nil { options.Filter = all } + if options.applyFunc == nil { + options.applyFunc = applyNaive + } - return apply(ctx, root, tar.NewReader(r), options) + return options.applyFunc(ctx, root, tar.NewReader(r), options) } -// applyNaive applies a tar stream of an OCI style diff tar. +// applyNaive applies a tar stream of an OCI style diff tar to a directory +// applying each file as either a whole file or whiteout. // See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) { var ( @@ -131,11 +128,49 @@ func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyO // may occur out of order unpackedPaths = make(map[string]struct{}) - // Used for aufs plink directory - aufsTempdir = "" - aufsHardlinks = make(map[string]*tar.Header) + convertWhiteout = options.ConvertWhiteout ) + if convertWhiteout == nil { + // handle whiteouts by removing the target files + convertWhiteout = func(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + if base == whiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return false, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + return false, err + } + + if strings.HasPrefix(base, whiteoutPrefix) { + originalBase := base[len(whiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + return false, os.RemoveAll(originalPath) + } + + return true, nil + } + } + // Iterate through the files in the archive. for { select { @@ -193,85 +228,21 @@ func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyO if base == "" { parentPath = filepath.Dir(path) } - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = mkdirAll(parentPath, 0755) - if err != nil { - return 0, err - } - } - } - - // Skip AUFS metadata dirs - if strings.HasPrefix(hdr.Name, whiteoutMetaPrefix) { - // Regular files inside /.wh..wh.plnk can be used as hardlink targets - // We don't want this directory, but we need the files in them so that - // such hardlinks can be resolved. - if strings.HasPrefix(hdr.Name, whiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { - basename := filepath.Base(hdr.Name) - aufsHardlinks[basename] = hdr - if aufsTempdir == "" { - if aufsTempdir, err = ioutil.TempDir(os.Getenv("XDG_RUNTIME_DIR"), "dockerplnk"); err != nil { - return 0, err - } - defer os.RemoveAll(aufsTempdir) - } - p, err := fs.RootPath(aufsTempdir, basename) - if err != nil { - return 0, err - } - if err := createTarFile(ctx, p, root, hdr, tr); err != nil { - return 0, err - } - } - - if hdr.Name != whiteoutOpaqueDir { - continue - } - } - - if strings.HasPrefix(base, whiteoutPrefix) { - dir := filepath.Dir(path) - if base == whiteoutOpaqueDir { - _, err := os.Lstat(dir) - if err != nil { - return 0, err - } - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - if os.IsNotExist(err) { - err = nil // parent was deleted - } - return err - } - if path == dir { - return nil - } - if _, exists := unpackedPaths[path]; !exists { - err := os.RemoveAll(path) - return err - } - return nil - }) - if err != nil { - return 0, err - } - continue - } - - originalBase := base[len(whiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - - // Ensure originalPath is under dir - if dir[len(dir)-1] != filepath.Separator { - dir += string(filepath.Separator) - } - if !strings.HasPrefix(originalPath, dir) { - return 0, errors.Wrapf(errInvalidArchive, "invalid whiteout name: %v", base) - } - - if err := os.RemoveAll(originalPath); err != nil { + if err := mkparent(ctx, parentPath, root, options.Parents); err != nil { return 0, err } + } + + // Naive whiteout convert function which handles whiteout files by + // removing the target files. + if err := validateWhiteout(path); err != nil { + return 0, err + } + writeFile, err := convertWhiteout(hdr, path) + if err != nil { + return 0, errors.Wrapf(err, "failed to convert whiteout file %q", hdr.Name) + } + if !writeFile { continue } // If path exits we almost always just want to remove and replace it. @@ -289,26 +260,6 @@ func applyNaive(ctx context.Context, root string, tr *tar.Reader, options ApplyO srcData := io.Reader(tr) srcHdr := hdr - // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so - // we manually retarget these into the temporary files we extracted them into - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), whiteoutLinkDir) { - linkBasename := filepath.Base(hdr.Linkname) - srcHdr = aufsHardlinks[linkBasename] - if srcHdr == nil { - return 0, fmt.Errorf("invalid aufs hardlink") - } - p, err := fs.RootPath(aufsTempdir, linkBasename) - if err != nil { - return 0, err - } - tmpFile, err := os.Open(p) - if err != nil { - return 0, err - } - defer tmpFile.Close() - srcData = tmpFile - } - if err := createTarFile(ctx, path, root, srcHdr, srcData); err != nil { return 0, err } @@ -428,6 +379,66 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header return chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)) } +func mkparent(ctx context.Context, path, root string, parents []string) error { + if dir, err := os.Lstat(path); err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkparent", + Path: path, + Err: syscall.ENOTDIR, + } + } else if !os.IsNotExist(err) { + return err + } + + i := len(path) + for i > len(root) && !os.IsPathSeparator(path[i-1]) { + i-- + } + + if i > len(root)+1 { + if err := mkparent(ctx, path[:i-1], root, parents); err != nil { + return err + } + } + + if err := mkdir(path, 0755); err != nil { + // Check that still doesn't exist + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + + for _, p := range parents { + ppath, err := fs.RootPath(p, path[len(root):]) + if err != nil { + return err + } + + dir, err := os.Lstat(ppath) + if err == nil { + if !dir.IsDir() { + // Replaced, do not copy attributes + break + } + if err := copyDirInfo(dir, path); err != nil { + return err + } + return copyUpXAttrs(path, ppath) + } else if !os.IsNotExist(err) { + return err + } + } + + log.G(ctx).Debugf("parent directory %q not found: default permissions(0755) used", path) + + return nil +} + type changeWriter struct { tw *tar.Writer source string @@ -493,6 +504,12 @@ func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, e hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + // truncate timestamp for compatibility. without PAX stdlib rounds timestamps instead + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + name := p if strings.HasPrefix(name, string(filepath.Separator)) { name, err = filepath.Rel(string(filepath.Separator), name) @@ -598,6 +615,9 @@ func (cw *changeWriter) Close() error { } func (cw *changeWriter) includeParents(hdr *tar.Header) error { + if cw.addedDirs == nil { + return nil + } name := strings.TrimRight(hdr.Name, "/") fname := filepath.Join(cw.source, name) parent := filepath.Dir(name) @@ -684,3 +704,26 @@ func hardlinkRootPath(root, linkname string) (string, error) { } return targetPath, nil } + +func validateWhiteout(path string) error { + base := filepath.Base(path) + dir := filepath.Dir(path) + + if base == whiteoutOpaqueDir { + return nil + } + + if strings.HasPrefix(base, whiteoutPrefix) { + originalBase := base[len(whiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + // Ensure originalPath is under dir + if dir[len(dir)-1] != filepath.Separator { + dir += string(filepath.Separator) + } + if !strings.HasPrefix(originalPath, dir) { + return errors.Wrapf(errInvalidArchive, "invalid whiteout name: %v", base) + } + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/archive/tar_opts.go b/vendor/github.com/containerd/containerd/archive/tar_opts.go index a08bc102..ca419e11 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_opts.go +++ b/vendor/github.com/containerd/containerd/archive/tar_opts.go @@ -16,7 +16,19 @@ package archive -import "archive/tar" +import ( + "archive/tar" + "context" +) + +// ApplyOptions provides additional options for an Apply operation +type ApplyOptions struct { + Filter Filter // Filter tar headers + ConvertWhiteout ConvertWhiteout // Convert whiteout files + Parents []string // Parent directories to handle inherited attributes without CoW + + applyFunc func(context.Context, string, *tar.Reader, ApplyOptions) (int64, error) +} // ApplyOpt allows setting mutable archive apply properties on creation type ApplyOpt func(options *ApplyOptions) error @@ -24,6 +36,9 @@ type ApplyOpt func(options *ApplyOptions) error // Filter specific files from the archive type Filter func(*tar.Header) (bool, error) +// ConvertWhiteout converts whiteout files from the archive +type ConvertWhiteout func(*tar.Header, string) (bool, error) + // all allows all files func all(_ *tar.Header) (bool, error) { return true, nil @@ -36,3 +51,24 @@ func WithFilter(f Filter) ApplyOpt { return nil } } + +// WithConvertWhiteout uses the convert function to convert the whiteout files. +func WithConvertWhiteout(c ConvertWhiteout) ApplyOpt { + return func(options *ApplyOptions) error { + options.ConvertWhiteout = c + return nil + } +} + +// WithParents provides parent directories for resolving inherited attributes +// directory from the filesystem. +// Inherited attributes are searched from first to last, making the first +// element in the list the most immediate parent directory. +// NOTE: When applying to a filesystem which supports CoW, file attributes +// should be inherited by the filesystem. +func WithParents(p []string) ApplyOpt { + return func(options *ApplyOptions) error { + options.Parents = p + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/archive/tar_opts_linux.go b/vendor/github.com/containerd/containerd/archive/tar_opts_linux.go new file mode 100644 index 00000000..38ef9e9b --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/tar_opts_linux.go @@ -0,0 +1,59 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package archive + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + + "golang.org/x/sys/unix" +) + +// AufsConvertWhiteout converts whiteout files for aufs. +func AufsConvertWhiteout(_ *tar.Header, _ string) (bool, error) { + return true, nil +} + +// OverlayConvertWhiteout converts whiteout files for overlay. +func OverlayConvertWhiteout(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque, we need to translate that to overlay + if base == whiteoutOpaqueDir { + // don't write the file itself + return false, unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, whiteoutPrefix) { + originalBase := base[len(whiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + return false, err + } + // don't write the file itself + return false, os.Chown(originalPath, hdr.Uid, hdr.Gid) + } + + return true, nil +} diff --git a/vendor/github.com/containerd/containerd/archive/tar_opts_unix.go b/vendor/github.com/containerd/containerd/archive/tar_opts_unix.go deleted file mode 100644 index 17382696..00000000 --- a/vendor/github.com/containerd/containerd/archive/tar_opts_unix.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package archive - -// ApplyOptions provides additional options for an Apply operation -type ApplyOptions struct { - Filter Filter // Filter tar headers -} diff --git a/vendor/github.com/containerd/containerd/archive/tar_opts_windows.go b/vendor/github.com/containerd/containerd/archive/tar_opts_windows.go index e4b15a16..f472013b 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_opts_windows.go +++ b/vendor/github.com/containerd/containerd/archive/tar_opts_windows.go @@ -18,28 +18,12 @@ package archive -// ApplyOptions provides additional options for an Apply operation -type ApplyOptions struct { - ParentLayerPaths []string // Parent layer paths used for Windows layer apply - IsWindowsContainerLayer bool // True if the tar stream to be applied is a Windows Container Layer - Filter Filter // Filter tar headers -} - -// WithParentLayers adds parent layers to the apply process this is required -// for all Windows layers except the base layer. -func WithParentLayers(parentPaths []string) ApplyOpt { - return func(options *ApplyOptions) error { - options.ParentLayerPaths = parentPaths - return nil - } -} - // AsWindowsContainerLayer indicates that the tar stream to apply is that of // a Windows Container Layer. The caller must be holding SeBackupPrivilege and // SeRestorePrivilege. func AsWindowsContainerLayer() ApplyOpt { return func(options *ApplyOptions) error { - options.IsWindowsContainerLayer = true + options.applyFunc = applyWindowsLayer return nil } } diff --git a/vendor/github.com/containerd/containerd/archive/tar_unix.go b/vendor/github.com/containerd/containerd/archive/tar_unix.go index 022dd6d4..e8721875 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_unix.go +++ b/vendor/github.com/containerd/containerd/archive/tar_unix.go @@ -20,11 +20,12 @@ package archive import ( "archive/tar" - "context" "os" + "strings" "sync" "syscall" + "github.com/containerd/continuity/fs" "github.com/containerd/continuity/sysx" "github.com/opencontainers/runc/libcontainer/system" "github.com/pkg/errors" @@ -74,10 +75,6 @@ func openFile(name string, flag int, perm os.FileMode) (*os.File, error) { return f, err } -func mkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - func mkdir(path string, perm os.FileMode) error { if err := os.Mkdir(path, perm); err != nil { return err @@ -149,11 +146,71 @@ func getxattr(path, attr string) ([]byte, error) { } func setxattr(path, key, value string) error { - return sysx.LSetxattr(path, key, []byte(value), 0) + // Do not set trusted attributes + if strings.HasPrefix(key, "trusted.") { + return errors.Wrap(unix.ENOTSUP, "admin attributes from archive not supported") + } + return unix.Lsetxattr(path, key, []byte(value), 0) } -// apply applies a tar stream of an OCI style diff tar. -// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets -func apply(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) { - return applyNaive(ctx, root, tr, options) +func copyDirInfo(fi os.FileInfo, path string) error { + st := fi.Sys().(*syscall.Stat_t) + if err := os.Lchown(path, int(st.Uid), int(st.Gid)); err != nil { + if os.IsPermission(err) { + // Normally if uid/gid are the same this would be a no-op, but some + // filesystems may still return EPERM... for instance NFS does this. + // In such a case, this is not an error. + if dstStat, err2 := os.Lstat(path); err2 == nil { + st2 := dstStat.Sys().(*syscall.Stat_t) + if st.Uid == st2.Uid && st.Gid == st2.Gid { + err = nil + } + } + } + if err != nil { + return errors.Wrapf(err, "failed to chown %s", path) + } + } + + if err := os.Chmod(path, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", path) + } + + timespec := []unix.Timespec{unix.Timespec(fs.StatAtime(st)), unix.Timespec(fs.StatMtime(st))} + if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrapf(err, "failed to utime %s", path) + } + + return nil +} + +func copyUpXAttrs(dst, src string) error { + xattrKeys, err := sysx.LListxattr(src) + if err != nil { + if err == unix.ENOTSUP || err == sysx.ENODATA { + return nil + } + return errors.Wrapf(err, "failed to list xattrs on %s", src) + } + for _, xattr := range xattrKeys { + // Do not copy up trusted attributes + if strings.HasPrefix(xattr, "trusted.") { + continue + } + data, err := sysx.LGetxattr(src, xattr) + if err != nil { + if err == unix.ENOTSUP || err == sysx.ENODATA { + continue + } + return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src) + } + if err := unix.Lsetxattr(dst, xattr, data, unix.XATTR_CREATE); err != nil { + if err == unix.ENOTSUP || err == unix.ENODATA || err == unix.EEXIST { + continue + } + return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst) + } + } + + return nil } diff --git a/vendor/github.com/containerd/containerd/archive/tar_windows.go b/vendor/github.com/containerd/containerd/archive/tar_windows.go index b97631fc..a5c6da69 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_windows.go +++ b/vendor/github.com/containerd/containerd/archive/tar_windows.go @@ -23,7 +23,6 @@ import ( "bufio" "context" "encoding/base64" - "errors" "fmt" "io" "os" @@ -36,6 +35,7 @@ import ( "github.com/Microsoft/go-winio" "github.com/Microsoft/hcsshim" "github.com/containerd/containerd/sys" + "github.com/pkg/errors" ) const ( @@ -107,10 +107,6 @@ func openFile(name string, flag int, perm os.FileMode) (*os.File, error) { return sys.OpenFileSequential(name, flag, perm) } -func mkdirAll(path string, perm os.FileMode) error { - return sys.MkdirAll(path, perm) -} - func mkdir(path string, perm os.FileMode) error { return os.Mkdir(path, perm) } @@ -153,16 +149,8 @@ func setxattr(path, key, value string) error { return errors.New("xattrs not supported on Windows") } -// apply applies a tar stream of an OCI style diff tar of a Windows layer. -// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets -func apply(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) { - if options.IsWindowsContainerLayer { - return applyWindowsLayer(ctx, root, tr, options) - } - return applyNaive(ctx, root, tr, options) -} - -// applyWindowsLayer applies a tar stream of an OCI style diff tar of a Windows layer. +// applyWindowsLayer applies a tar stream of an OCI style diff tar of a Windows +// layer using the hcsshim layer writer and backup streams. // See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets func applyWindowsLayer(ctx context.Context, root string, tr *tar.Reader, options ApplyOptions) (size int64, err error) { home, id := filepath.Split(root) @@ -170,7 +158,7 @@ func applyWindowsLayer(ctx context.Context, root string, tr *tar.Reader, options HomeDir: home, } - w, err := hcsshim.NewLayerWriter(info, id, options.ParentLayerPaths) + w, err := hcsshim.NewLayerWriter(info, id, options.Parents) if err != nil { return 0, err } @@ -443,3 +431,14 @@ func writeBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) ( } } } + +func copyDirInfo(fi os.FileInfo, path string) error { + if err := os.Chmod(path, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", path) + } + return nil +} + +func copyUpXAttrs(dst, src string) error { + return nil +} diff --git a/vendor/github.com/containerd/containerd/archive/time_unix.go b/vendor/github.com/containerd/containerd/archive/time_unix.go index fd8d98bf..e05ca719 100644 --- a/vendor/github.com/containerd/containerd/archive/time_unix.go +++ b/vendor/github.com/containerd/containerd/archive/time_unix.go @@ -32,7 +32,7 @@ func chtimes(path string, atime, mtime time.Time) error { utimes[1] = unix.NsecToTimespec(mtime.UnixNano()) if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil { - return errors.Wrap(err, "failed call to UtimesNanoAt") + return errors.Wrapf(err, "failed call to UtimesNanoAt for %s", path) } return nil diff --git a/vendor/github.com/containerd/containerd/cio/io_unix.go b/vendor/github.com/containerd/containerd/cio/io_unix.go index eb2ada80..42d32093 100644 --- a/vendor/github.com/containerd/containerd/cio/io_unix.go +++ b/vendor/github.com/containerd/containerd/cio/io_unix.go @@ -72,17 +72,19 @@ func copyIO(fifos *FIFOSet, ioset *Streams) (*cio, error) { } var wg = &sync.WaitGroup{} - wg.Add(1) - go func() { - p := bufPool.Get().(*[]byte) - defer bufPool.Put(p) + if fifos.Stdout != "" { + wg.Add(1) + go func() { + p := bufPool.Get().(*[]byte) + defer bufPool.Put(p) - io.CopyBuffer(ioset.Stdout, pipes.Stdout, *p) - pipes.Stdout.Close() - wg.Done() - }() + io.CopyBuffer(ioset.Stdout, pipes.Stdout, *p) + pipes.Stdout.Close() + wg.Done() + }() + } - if !fifos.Terminal { + if !fifos.Terminal && fifos.Stderr != "" { wg.Add(1) go func() { p := bufPool.Get().(*[]byte) diff --git a/vendor/github.com/containerd/containerd/client.go b/vendor/github.com/containerd/containerd/client.go index 9b0888c7..99141e2d 100644 --- a/vendor/github.com/containerd/containerd/client.go +++ b/vendor/github.com/containerd/containerd/client.go @@ -57,6 +57,7 @@ import ( "github.com/containerd/containerd/snapshots" snproxy "github.com/containerd/containerd/snapshots/proxy" "github.com/containerd/typeurl" + "github.com/gogo/protobuf/types" ptypes "github.com/gogo/protobuf/types" ocispec "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -88,12 +89,20 @@ func New(address string, opts ...ClientOpt) (*Client, error) { copts.timeout = 10 * time.Second } - c := &Client{} + c := &Client{ + defaultns: copts.defaultns, + } if copts.defaultRuntime != "" { c.runtime = copts.defaultRuntime } else { - c.runtime = fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS) + c.runtime = defaults.DefaultRuntime + } + + if copts.defaultPlatform != nil { + c.platform = copts.defaultPlatform + } else { + c.platform = platforms.Default() } if copts.services != nil { @@ -137,13 +146,12 @@ func New(address string, opts ...ClientOpt) (*Client, error) { c.conn, c.connector = conn, connector } if copts.services == nil && c.conn == nil { - return nil, errors.New("no grpc connection or services is available") + return nil, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection or services is available") } // check namespace labels for default runtime - if copts.defaultRuntime == "" && copts.defaultns != "" { - ctx := namespaces.WithNamespace(context.Background(), copts.defaultns) - if label, err := c.GetLabel(ctx, defaults.DefaultRuntimeNSLabel); err != nil { + if copts.defaultRuntime == "" && c.defaultns != "" { + if label, err := c.GetLabel(context.Background(), defaults.DefaultRuntimeNSLabel); err != nil { return nil, err } else if label != "" { c.runtime = label @@ -163,14 +171,14 @@ func NewWithConn(conn *grpc.ClientConn, opts ...ClientOpt) (*Client, error) { } } c := &Client{ - conn: conn, - runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS), + defaultns: copts.defaultns, + conn: conn, + runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS), } // check namespace labels for default runtime - if copts.defaultRuntime == "" && copts.defaultns != "" { - ctx := namespaces.WithNamespace(context.Background(), copts.defaultns) - if label, err := c.GetLabel(ctx, defaults.DefaultRuntimeNSLabel); err != nil { + if copts.defaultRuntime == "" && c.defaultns != "" { + if label, err := c.GetLabel(context.Background(), defaults.DefaultRuntimeNSLabel); err != nil { return nil, err } else if label != "" { c.runtime = label @@ -190,13 +198,15 @@ type Client struct { connMu sync.Mutex conn *grpc.ClientConn runtime string + defaultns string + platform platforms.MatchComparer connector func() (*grpc.ClientConn, error) } // Reconnect re-establishes the GRPC connection to the containerd daemon func (c *Client) Reconnect() error { if c.connector == nil { - return errors.New("unable to reconnect to containerd, no connector available") + return errors.Wrap(errdefs.ErrUnavailable, "unable to reconnect to containerd, no connector available") } c.connMu.Lock() defer c.connMu.Unlock() @@ -219,7 +229,7 @@ func (c *Client) IsServing(ctx context.Context) (bool, error) { c.connMu.Lock() if c.conn == nil { c.connMu.Unlock() - return false, errors.New("no grpc connection available") + return false, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available") } c.connMu.Unlock() r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.WaitForReady(true)) @@ -291,10 +301,14 @@ type RemoteContext struct { PlatformMatcher platforms.MatchComparer // Unpack is done after an image is pulled to extract into a snapshotter. + // It is done simultaneously for schema 2 images when they are pulled. // If an image is not unpacked on pull, it can be unpacked any time // afterwards. Unpacking is required to run an image. Unpack bool + // UnpackOpts handles options to the unpack call. + UnpackOpts []UnpackOpt + // Snapshotter used for unpacking Snapshotter string @@ -326,9 +340,8 @@ type RemoteContext struct { // MaxConcurrentDownloads is the max concurrent content downloads for each pull. MaxConcurrentDownloads int - // AppendDistributionSourceLabel allows fetcher to add distribute source - // label for each blob content, which doesn't work for legacy schema1. - AppendDistributionSourceLabel bool + // AllMetadata downloads all manifests and known-configuration files + AllMetadata bool } func defaultRemoteContext() *RemoteContext { @@ -350,7 +363,7 @@ func (c *Client) Fetch(ctx context.Context, ref string, opts ...RemoteOpt) (imag } if fetchCtx.Unpack { - return images.Image{}, errors.New("unpack on fetch not supported, try pull") + return images.Image{}, errors.Wrap(errdefs.ErrNotImplemented, "unpack on fetch not supported, try pull") } if fetchCtx.PlatformMatcher == nil { @@ -403,6 +416,11 @@ func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor, } } + // Annotate ref with digest to push only push tag for single digest + if !strings.Contains(ref, "@") { + ref = ref + "@" + desc.Digest.String() + } + pusher, err := pushCtx.Resolver.Pusher(ctx, ref) if err != nil { return err @@ -491,7 +509,10 @@ func writeIndex(ctx context.Context, index *ocispec.Index, client *Client, ref s func (c *Client) GetLabel(ctx context.Context, label string) (string, error) { ns, err := namespaces.NamespaceRequired(ctx) if err != nil { - return "", err + if c.defaultns == "" { + return "", err + } + ns = c.defaultns } srv := c.NamespaceService() @@ -557,6 +578,10 @@ func (c *Client) ContentStore() content.Store { // SnapshotService returns the underlying snapshotter for the provided snapshotter name func (c *Client) SnapshotService(snapshotterName string) snapshots.Snapshotter { + snapshotterName, err := c.resolveSnapshotterName(context.Background(), snapshotterName) + if err != nil { + snapshotterName = DefaultSnapshotter + } if c.snapshotters != nil { return c.snapshotters[snapshotterName] } @@ -656,7 +681,7 @@ func (c *Client) Version(ctx context.Context) (Version, error) { c.connMu.Lock() if c.conn == nil { c.connMu.Unlock() - return Version{}, errors.New("no grpc connection available") + return Version{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available") } c.connMu.Unlock() response, err := c.VersionService().Version(ctx, &ptypes.Empty{}) @@ -669,6 +694,27 @@ func (c *Client) Version(ctx context.Context) (Version, error) { }, nil } +type ServerInfo struct { + UUID string +} + +func (c *Client) Server(ctx context.Context) (ServerInfo, error) { + c.connMu.Lock() + if c.conn == nil { + c.connMu.Unlock() + return ServerInfo{}, errors.Wrap(errdefs.ErrUnavailable, "no grpc connection available") + } + c.connMu.Unlock() + + response, err := c.IntrospectionService().Server(ctx, &types.Empty{}) + if err != nil { + return ServerInfo{}, err + } + return ServerInfo{ + UUID: response.UUID, + }, nil +} + func (c *Client) resolveSnapshotterName(ctx context.Context, name string) (string, error) { if name == "" { label, err := c.GetLabel(ctx, defaults.DefaultSnapshotterNSLabel) diff --git a/vendor/github.com/containerd/containerd/client_opts.go b/vendor/github.com/containerd/containerd/client_opts.go index ed2ff05d..6f485c18 100644 --- a/vendor/github.com/containerd/containerd/client_opts.go +++ b/vendor/github.com/containerd/containerd/client_opts.go @@ -26,11 +26,12 @@ import ( ) type clientOpts struct { - defaultns string - defaultRuntime string - services *services - dialOptions []grpc.DialOption - timeout time.Duration + defaultns string + defaultRuntime string + defaultPlatform platforms.MatchComparer + services *services + dialOptions []grpc.DialOption + timeout time.Duration } // ClientOpt allows callers to set options on the containerd client @@ -55,6 +56,14 @@ func WithDefaultRuntime(rt string) ClientOpt { } } +// WithDefaultPlatform sets the default platform matcher on the client +func WithDefaultPlatform(platform platforms.MatchComparer) ClientOpt { + return func(c *clientOpts) error { + c.defaultPlatform = platform + return nil + } +} + // WithDialOpts allows grpc.DialOptions to be set on the connection func WithDialOpts(opts []grpc.DialOption) ClientOpt { return func(c *clientOpts) error { @@ -195,11 +204,10 @@ func WithMaxConcurrentDownloads(max int) RemoteOpt { } } -// WithAppendDistributionSourceLabel allows fetcher to add distribute source -// label for each blob content, which doesn't work for legacy schema1. -func WithAppendDistributionSourceLabel() RemoteOpt { +// WithAllMetadata downloads all manifests and known-configuration files +func WithAllMetadata() RemoteOpt { return func(_ *Client, c *RemoteContext) error { - c.AppendDistributionSourceLabel = true + c.AllMetadata = true return nil } } diff --git a/vendor/github.com/containerd/containerd/container.go b/vendor/github.com/containerd/containerd/container.go index 46d51ecd..fd880d0e 100644 --- a/vendor/github.com/containerd/containerd/container.go +++ b/vendor/github.com/containerd/containerd/container.go @@ -25,6 +25,7 @@ import ( "github.com/containerd/containerd/api/services/tasks/v1" "github.com/containerd/containerd/api/types" + tasktypes "github.com/containerd/containerd/api/types/task" "github.com/containerd/containerd/cio" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/errdefs" @@ -382,7 +383,9 @@ func (c *container) loadTask(ctx context.Context, ioAttach cio.Attach) (Task, er return nil, err } var i cio.IO - if ioAttach != nil { + if ioAttach != nil && response.Process.Status != tasktypes.StatusUnknown { + // Do not attach IO for task in unknown state, because there + // are no fifo paths anyway. if i, err = attachExistingIO(response, ioAttach); err != nil { return nil, err } diff --git a/vendor/github.com/containerd/containerd/container_opts.go b/vendor/github.com/containerd/containerd/container_opts.go index 4c8a4048..89548402 100644 --- a/vendor/github.com/containerd/containerd/container_opts.go +++ b/vendor/github.com/containerd/containerd/container_opts.go @@ -22,7 +22,6 @@ import ( "github.com/containerd/containerd/containers" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/oci" - "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/snapshots" "github.com/containerd/typeurl" "github.com/gogo/protobuf/types" @@ -78,6 +77,14 @@ func WithImage(i Image) NewContainerOpts { } } +// WithImageName allows setting the image name as the base for the container +func WithImageName(n string) NewContainerOpts { + return func(ctx context.Context, _ *Client, c *containers.Container) error { + c.Image = n + return nil + } +} + // WithContainerLabels adds the provided labels to the container func WithContainerLabels(labels map[string]string) NewContainerOpts { return func(_ context.Context, _ *Client, c *containers.Container) error { @@ -171,7 +178,9 @@ func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Conta if err != nil { return err } - return s.Remove(ctx, c.SnapshotKey) + if err := s.Remove(ctx, c.SnapshotKey); err != nil && !errdefs.IsNotFound(err) { + return err + } } return nil } @@ -180,7 +189,7 @@ func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Conta // root filesystem in read-only mode func WithNewSnapshotView(id string, i Image, opts ...snapshots.Opt) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { - diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default()) + diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), client.platform) if err != nil { return err } diff --git a/vendor/github.com/containerd/containerd/container_opts_unix.go b/vendor/github.com/containerd/containerd/container_opts_unix.go index af52d042..b109a10e 100644 --- a/vendor/github.com/containerd/containerd/container_opts_unix.go +++ b/vendor/github.com/containerd/containerd/container_opts_unix.go @@ -28,7 +28,6 @@ import ( "github.com/containerd/containerd/containers" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/platforms" "github.com/opencontainers/image-spec/identity" ) @@ -45,7 +44,7 @@ func WithRemappedSnapshotView(id string, i Image, uid, gid uint32) NewContainerO func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { - diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default()) + diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), client.platform) if err != nil { return err } diff --git a/vendor/github.com/containerd/containerd/container_restore_opts.go b/vendor/github.com/containerd/containerd/container_restore_opts.go index 4f251c4a..03722dba 100644 --- a/vendor/github.com/containerd/containerd/container_restore_opts.go +++ b/vendor/github.com/containerd/containerd/container_restore_opts.go @@ -22,7 +22,6 @@ import ( "github.com/containerd/containerd/containers" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" - "github.com/containerd/containerd/platforms" "github.com/gogo/protobuf/proto" ptypes "github.com/gogo/protobuf/types" "github.com/opencontainers/image-spec/identity" @@ -58,7 +57,7 @@ func WithRestoreImage(ctx context.Context, id string, client *Client, checkpoint return err } - diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default()) + diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), client.platform) if err != nil { return err } diff --git a/vendor/github.com/containerd/containerd/containers/containers.go b/vendor/github.com/containerd/containerd/containers/containers.go index c7ad2bfa..7174bbd6 100644 --- a/vendor/github.com/containerd/containerd/containers/containers.go +++ b/vendor/github.com/containerd/containerd/containers/containers.go @@ -49,7 +49,7 @@ type Container struct { // This property is required and immutable. Runtime RuntimeInfo - // Spec should carry the the runtime specification used to implement the + // Spec should carry the runtime specification used to implement the // container. // // This field is required but mutable. diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go index 6d5c7624..c1c20461 100644 --- a/vendor/github.com/containerd/containerd/content/helpers.go +++ b/vendor/github.com/containerd/containerd/content/helpers.go @@ -169,6 +169,28 @@ func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error { return err } +// CopyReader copies to a writer from a given reader, returning +// the number of bytes copied. +// Note: if the writer has a non-zero offset, the total number +// of bytes read may be greater than those copied if the reader +// is not an io.Seeker. +// This copy does not commit the writer. +func CopyReader(cw Writer, r io.Reader) (int64, error) { + ws, err := cw.Status() + if err != nil { + return 0, errors.Wrap(err, "failed to get status") + } + + if ws.Offset > 0 { + r, err = seekReader(r, ws.Offset, 0) + if err != nil { + return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref) + } + } + + return copyWithBuffer(cw, r) +} + // seekReader attempts to seek the reader to the given offset, either by // resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding // up to the given offset. diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go index 30ed4223..319e8777 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go @@ -32,4 +32,6 @@ const ( // DefaultFIFODir is the default location used by client-side cio library // to store FIFOs. DefaultFIFODir = "/run/containerd/fifo" + // DefaultRuntime is the default linux runtime + DefaultRuntime = "io.containerd.runc.v2" ) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go index 16f1048c..5eede8de 100644 --- a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go +++ b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go @@ -40,4 +40,6 @@ const ( // DefaultFIFODir is the default location used by client-side cio library // to store FIFOs. Unused on Windows. DefaultFIFODir = "" + // DefaultRuntime is the default windows runtime + DefaultRuntime = "io.containerd.runhcs.v1" ) diff --git a/vendor/github.com/containerd/containerd/diff.go b/vendor/github.com/containerd/containerd/diff.go index 18236294..445df019 100644 --- a/vendor/github.com/containerd/containerd/diff.go +++ b/vendor/github.com/containerd/containerd/diff.go @@ -48,13 +48,14 @@ type diffRemote struct { func (r *diffRemote) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (ocispec.Descriptor, error) { var config diff.ApplyConfig for _, opt := range opts { - if err := opt(&config); err != nil { + if err := opt(ctx, desc, &config); err != nil { return ocispec.Descriptor{}, err } } req := &diffapi.ApplyRequest{ - Diff: fromDescriptor(desc), - Mounts: fromMounts(mounts), + Diff: fromDescriptor(desc), + Mounts: fromMounts(mounts), + Payloads: config.ProcessorPayloads, } resp, err := r.client.Apply(ctx, req) if err != nil { diff --git a/vendor/github.com/containerd/containerd/diff/diff.go b/vendor/github.com/containerd/containerd/diff/diff.go index a62a4671..17aab616 100644 --- a/vendor/github.com/containerd/containerd/diff/diff.go +++ b/vendor/github.com/containerd/containerd/diff/diff.go @@ -20,6 +20,7 @@ import ( "context" "github.com/containerd/containerd/mount" + "github.com/gogo/protobuf/types" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -53,10 +54,12 @@ type Comparer interface { // ApplyConfig is used to hold parameters needed for a apply operation type ApplyConfig struct { + // ProcessorPayloads specifies the payload sent to various processors + ProcessorPayloads map[string]*types.Any } // ApplyOpt is used to configure an Apply operation -type ApplyOpt func(*ApplyConfig) error +type ApplyOpt func(context.Context, ocispec.Descriptor, *ApplyConfig) error // Applier allows applying diffs between mounts type Applier interface { @@ -94,3 +97,11 @@ func WithLabels(labels map[string]string) Opt { return nil } } + +// WithPayloads sets the apply processor payloads to the config +func WithPayloads(payloads map[string]*types.Any) ApplyOpt { + return func(_ context.Context, _ ocispec.Descriptor, c *ApplyConfig) error { + c.ProcessorPayloads = payloads + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/diff/stream.go b/vendor/github.com/containerd/containerd/diff/stream.go new file mode 100644 index 00000000..1b625fea --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/stream.go @@ -0,0 +1,187 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + "context" + "io" + "os" + + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/images" + "github.com/gogo/protobuf/types" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var ( + handlers []Handler + + // ErrNoProcessor is returned when no stream processor is available for a media-type + ErrNoProcessor = errors.New("no processor for media-type") +) + +func init() { + // register the default compression handler + RegisterProcessor(compressedHandler) +} + +// RegisterProcessor registers a stream processor for media-types +func RegisterProcessor(handler Handler) { + handlers = append(handlers, handler) +} + +// GetProcessor returns the processor for a media-type +func GetProcessor(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + // reverse this list so that user configured handlers come up first + for i := len(handlers) - 1; i >= 0; i-- { + processor, ok := handlers[i](ctx, stream.MediaType()) + if ok { + return processor(ctx, stream, payloads) + } + } + return nil, ErrNoProcessor +} + +// Handler checks a media-type and initializes the processor +type Handler func(ctx context.Context, mediaType string) (StreamProcessorInit, bool) + +// StaticHandler returns the processor init func for a static media-type +func StaticHandler(expectedMediaType string, fn StreamProcessorInit) Handler { + return func(ctx context.Context, mediaType string) (StreamProcessorInit, bool) { + if mediaType == expectedMediaType { + return fn, true + } + return nil, false + } +} + +// StreamProcessorInit returns the initialized stream processor +type StreamProcessorInit func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) + +// RawProcessor provides access to direct fd for processing +type RawProcessor interface { + // File returns the fd for the read stream of the underlying processor + File() *os.File +} + +// StreamProcessor handles processing a content stream and transforming it into a different media-type +type StreamProcessor interface { + io.ReadCloser + + // MediaType is the resulting media-type that the processor processes the stream into + MediaType() string +} + +func compressedHandler(ctx context.Context, mediaType string) (StreamProcessorInit, bool) { + compressed, err := images.DiffCompression(ctx, mediaType) + if err != nil { + return nil, false + } + if compressed != "" { + return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + ds, err := compression.DecompressStream(stream) + if err != nil { + return nil, err + } + + return &compressedProcessor{ + rc: ds, + }, nil + }, true + } + return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + return &stdProcessor{ + rc: stream, + }, nil + }, true +} + +// NewProcessorChain initialized the root StreamProcessor +func NewProcessorChain(mt string, r io.Reader) StreamProcessor { + return &processorChain{ + mt: mt, + rc: r, + } +} + +type processorChain struct { + mt string + rc io.Reader +} + +func (c *processorChain) MediaType() string { + return c.mt +} + +func (c *processorChain) Read(p []byte) (int, error) { + return c.rc.Read(p) +} + +func (c *processorChain) Close() error { + return nil +} + +type stdProcessor struct { + rc StreamProcessor +} + +func (c *stdProcessor) MediaType() string { + return ocispec.MediaTypeImageLayer +} + +func (c *stdProcessor) Read(p []byte) (int, error) { + return c.rc.Read(p) +} + +func (c *stdProcessor) Close() error { + return nil +} + +type compressedProcessor struct { + rc io.ReadCloser +} + +func (c *compressedProcessor) MediaType() string { + return ocispec.MediaTypeImageLayer +} + +func (c *compressedProcessor) Read(p []byte) (int, error) { + return c.rc.Read(p) +} + +func (c *compressedProcessor) Close() error { + return c.rc.Close() +} + +func BinaryHandler(id, returnsMediaType string, mediaTypes []string, path string, args []string) Handler { + set := make(map[string]struct{}, len(mediaTypes)) + for _, m := range mediaTypes { + set[m] = struct{}{} + } + return func(_ context.Context, mediaType string) (StreamProcessorInit, bool) { + if _, ok := set[mediaType]; ok { + return func(ctx context.Context, stream StreamProcessor, payloads map[string]*types.Any) (StreamProcessor, error) { + payload := payloads[id] + return NewBinaryProcessor(ctx, mediaType, returnsMediaType, stream, path, args, payload) + }, true + } + return nil, false + } +} + +const mediaTypeEnvVar = "STREAM_PROCESSOR_MEDIATYPE" diff --git a/vendor/github.com/containerd/containerd/diff/stream_unix.go b/vendor/github.com/containerd/containerd/diff/stream_unix.go new file mode 100644 index 00000000..28f38d99 --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/stream_unix.go @@ -0,0 +1,146 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "os/exec" + "sync" + + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/pkg/errors" +) + +// NewBinaryProcessor returns a binary processor for use with processing content streams +func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args []string, payload *types.Any) (StreamProcessor, error) { + cmd := exec.CommandContext(ctx, name, args...) + cmd.Env = os.Environ() + + var payloadC io.Closer + if payload != nil { + data, err := proto.Marshal(payload) + if err != nil { + return nil, err + } + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + go func() { + io.Copy(w, bytes.NewReader(data)) + w.Close() + }() + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + payloadC = r + } + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt)) + var ( + stdin io.Reader + closer func() error + err error + ) + if f, ok := stream.(RawProcessor); ok { + stdin = f.File() + closer = f.File().Close + } else { + stdin = stream + } + cmd.Stdin = stdin + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + cmd.Stdout = w + + stderr := bytes.NewBuffer(nil) + cmd.Stderr = stderr + + if err := cmd.Start(); err != nil { + return nil, err + } + p := &binaryProcessor{ + cmd: cmd, + r: r, + mt: rmt, + stderr: stderr, + } + go p.wait() + + // close after start and dup + w.Close() + if closer != nil { + closer() + } + if payloadC != nil { + payloadC.Close() + } + return p, nil +} + +type binaryProcessor struct { + cmd *exec.Cmd + r *os.File + mt string + stderr *bytes.Buffer + + mu sync.Mutex + err error +} + +func (c *binaryProcessor) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *binaryProcessor) wait() { + if err := c.cmd.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); ok { + c.mu.Lock() + c.err = errors.New(c.stderr.String()) + c.mu.Unlock() + } + } +} + +func (c *binaryProcessor) File() *os.File { + return c.r +} + +func (c *binaryProcessor) MediaType() string { + return c.mt +} + +func (c *binaryProcessor) Read(p []byte) (int, error) { + return c.r.Read(p) +} + +func (c *binaryProcessor) Close() error { + err := c.r.Close() + if kerr := c.cmd.Process.Kill(); err == nil { + err = kerr + } + return err +} diff --git a/vendor/github.com/containerd/containerd/diff/stream_windows.go b/vendor/github.com/containerd/containerd/diff/stream_windows.go new file mode 100644 index 00000000..8dadd72c --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/stream_windows.go @@ -0,0 +1,165 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package diff + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "sync" + + winio "github.com/Microsoft/go-winio" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const processorPipe = "STREAM_PROCESSOR_PIPE" + +// NewBinaryProcessor returns a binary processor for use with processing content streams +func NewBinaryProcessor(ctx context.Context, imt, rmt string, stream StreamProcessor, name string, args []string, payload *types.Any) (StreamProcessor, error) { + cmd := exec.CommandContext(ctx, name, args...) + cmd.Env = os.Environ() + + if payload != nil { + data, err := proto.Marshal(payload) + if err != nil { + return nil, err + } + up, err := getUiqPath() + if err != nil { + return nil, err + } + path := fmt.Sprintf("\\\\.\\pipe\\containerd-processor-%s-pipe", up) + l, err := winio.ListenPipe(path, nil) + if err != nil { + return nil, err + } + go func() { + defer l.Close() + conn, err := l.Accept() + if err != nil { + logrus.WithError(err).Error("accept npipe connection") + return + } + io.Copy(conn, bytes.NewReader(data)) + conn.Close() + }() + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", processorPipe, path)) + } + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", mediaTypeEnvVar, imt)) + var ( + stdin io.Reader + closer func() error + err error + ) + if f, ok := stream.(RawProcessor); ok { + stdin = f.File() + closer = f.File().Close + } else { + stdin = stream + } + cmd.Stdin = stdin + r, w, err := os.Pipe() + if err != nil { + return nil, err + } + cmd.Stdout = w + stderr := bytes.NewBuffer(nil) + cmd.Stderr = stderr + + if err := cmd.Start(); err != nil { + return nil, err + } + p := &binaryProcessor{ + cmd: cmd, + r: r, + mt: rmt, + stderr: stderr, + } + go p.wait() + + // close after start and dup + w.Close() + if closer != nil { + closer() + } + return p, nil +} + +type binaryProcessor struct { + cmd *exec.Cmd + r *os.File + mt string + stderr *bytes.Buffer + + mu sync.Mutex + err error +} + +func (c *binaryProcessor) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *binaryProcessor) wait() { + if err := c.cmd.Wait(); err != nil { + if _, ok := err.(*exec.ExitError); ok { + c.mu.Lock() + c.err = errors.New(c.stderr.String()) + c.mu.Unlock() + } + } +} + +func (c *binaryProcessor) File() *os.File { + return c.r +} + +func (c *binaryProcessor) MediaType() string { + return c.mt +} + +func (c *binaryProcessor) Read(p []byte) (int, error) { + return c.r.Read(p) +} + +func (c *binaryProcessor) Close() error { + err := c.r.Close() + if kerr := c.cmd.Process.Kill(); err == nil { + err = kerr + } + return err +} + +func getUiqPath() (string, error) { + dir, err := ioutil.TempDir("", "") + if err != nil { + return "", err + } + os.Remove(dir) + return filepath.Base(dir), nil +} diff --git a/vendor/github.com/containerd/containerd/events/exchange/exchange.go b/vendor/github.com/containerd/containerd/events/exchange/exchange.go index 39972d74..59273c95 100644 --- a/vendor/github.com/containerd/containerd/events/exchange/exchange.go +++ b/vendor/github.com/containerd/containerd/events/exchange/exchange.go @@ -50,7 +50,7 @@ var _ events.Publisher = &Exchange{} var _ events.Forwarder = &Exchange{} var _ events.Subscriber = &Exchange{} -// Forward accepts an envelope to be direcly distributed on the exchange. +// Forward accepts an envelope to be directly distributed on the exchange. // // This is useful when an event is forwarded on behalf of another namespace or // when the event is propagated on behalf of another publisher. diff --git a/vendor/github.com/containerd/containerd/image.go b/vendor/github.com/containerd/containerd/image.go index 77c95eaa..9ef09ac2 100644 --- a/vendor/github.com/containerd/containerd/image.go +++ b/vendor/github.com/containerd/containerd/image.go @@ -19,16 +19,21 @@ package containerd import ( "context" "fmt" + "strings" + "sync/atomic" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/rootfs" + "github.com/containerd/containerd/snapshots" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + "golang.org/x/sync/semaphore" ) // Image describes an image used by containers @@ -40,11 +45,13 @@ type Image interface { // Labels of the image Labels() map[string]string // Unpack unpacks the image's content into a snapshot - Unpack(context.Context, string) error + Unpack(context.Context, string, ...UnpackOpt) error // RootFS returns the unpacked diffids that make up images rootfs. RootFS(ctx context.Context) ([]digest.Digest, error) // Size returns the total size of the image's packed resources. Size(ctx context.Context) (int64, error) + // Usage returns a usage calculation for the image. + Usage(context.Context, ...UsageOpt) (int64, error) // Config descriptor for the image. Config(ctx context.Context) (ocispec.Descriptor, error) // IsUnpacked returns whether or not an image is unpacked. @@ -53,6 +60,49 @@ type Image interface { ContentStore() content.Store } +type usageOptions struct { + manifestLimit *int + manifestOnly bool + snapshots bool +} + +// UsageOpt is used to configure the usage calculation +type UsageOpt func(*usageOptions) error + +// WithUsageManifestLimit sets the limit to the number of manifests which will +// be walked for usage. Setting this value to 0 will require all manifests to +// be walked, returning ErrNotFound if manifests are missing. +// NOTE: By default all manifests which exist will be walked +// and any non-existent manifests and their subobjects will be ignored. +func WithUsageManifestLimit(i int) UsageOpt { + // If 0 then don't filter any manifests + // By default limits to current platform + return func(o *usageOptions) error { + o.manifestLimit = &i + return nil + } +} + +// WithSnapshotUsage will check for referenced snapshots from the image objects +// and include the snapshot size in the total usage. +func WithSnapshotUsage() UsageOpt { + return func(o *usageOptions) error { + o.snapshots = true + return nil + } +} + +// WithManifestUsage is used to get the usage for an image based on what is +// reported by the manifests rather than what exists in the content store. +// NOTE: This function is best used with the manifest limit set to get a +// consistent value, otherwise non-existent manifests will be excluded. +func WithManifestUsage() UsageOpt { + return func(o *usageOptions) error { + o.manifestOnly = true + return nil + } +} + var _ = (Image)(&image{}) // NewImage returns a client image object from the metadata image @@ -60,7 +110,7 @@ func NewImage(client *Client, i images.Image) Image { return &image{ client: client, i: i, - platform: platforms.Default(), + platform: client.platform, } } @@ -98,8 +148,95 @@ func (i *image) RootFS(ctx context.Context) ([]digest.Digest, error) { } func (i *image) Size(ctx context.Context) (int64, error) { - provider := i.client.ContentStore() - return i.i.Size(ctx, provider, i.platform) + return i.Usage(ctx, WithUsageManifestLimit(1), WithManifestUsage()) +} + +func (i *image) Usage(ctx context.Context, opts ...UsageOpt) (int64, error) { + var config usageOptions + for _, opt := range opts { + if err := opt(&config); err != nil { + return 0, err + } + } + + var ( + provider = i.client.ContentStore() + handler = images.ChildrenHandler(provider) + size int64 + mustExist bool + ) + + if config.manifestLimit != nil { + handler = images.LimitManifests(handler, i.platform, *config.manifestLimit) + mustExist = true + } + + var wh images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + var usage int64 + children, err := handler(ctx, desc) + if err != nil { + if !errdefs.IsNotFound(err) || mustExist { + return nil, err + } + if !config.manifestOnly { + // Do not count size of non-existent objects + desc.Size = 0 + } + } else if config.snapshots || !config.manifestOnly { + info, err := provider.Info(ctx, desc.Digest) + if err != nil { + if !errdefs.IsNotFound(err) { + return nil, err + } + if !config.manifestOnly { + // Do not count size of non-existent objects + desc.Size = 0 + } + } else if info.Size > desc.Size { + // Count actual usage, Size may be unset or -1 + desc.Size = info.Size + } + + for k, v := range info.Labels { + const prefix = "containerd.io/gc.ref.snapshot." + if !strings.HasPrefix(k, prefix) { + continue + } + + sn := i.client.SnapshotService(k[len(prefix):]) + if sn == nil { + continue + } + + u, err := sn.Usage(ctx, v) + if err != nil { + if !errdefs.IsNotFound(err) && !errdefs.IsInvalidArgument(err) { + return nil, err + } + } else { + usage += u.Size + } + } + } + + // Ignore unknown sizes. Generally unknown sizes should + // never be set in manifests, however, the usage + // calculation does not need to enforce this. + if desc.Size >= 0 { + usage += desc.Size + } + + atomic.AddInt64(&size, usage) + + return children, nil + } + + l := semaphore.NewWeighted(3) + if err := images.Dispatch(ctx, wh, l, i.i.Target); err != nil { + return 0, err + } + + return size, nil } func (i *image) Config(ctx context.Context) (ocispec.Descriptor, error) { @@ -130,13 +267,31 @@ func (i *image) IsUnpacked(ctx context.Context, snapshotterName string) (bool, e return false, nil } -func (i *image) Unpack(ctx context.Context, snapshotterName string) error { +// UnpackConfig provides configuration for the unpack of an image +type UnpackConfig struct { + // ApplyOpts for applying a diff to a snapshotter + ApplyOpts []diff.ApplyOpt + // SnapshotOpts for configuring a snapshotter + SnapshotOpts []snapshots.Opt +} + +// UnpackOpt provides configuration for unpack +type UnpackOpt func(context.Context, *UnpackConfig) error + +func (i *image) Unpack(ctx context.Context, snapshotterName string, opts ...UnpackOpt) error { ctx, done, err := i.client.WithLease(ctx) if err != nil { return err } defer done(ctx) + var config UnpackConfig + for _, o := range opts { + if err := o(ctx, &config); err != nil { + return err + } + } + layers, err := i.getLayers(ctx, i.platform) if err != nil { return err @@ -158,7 +313,7 @@ func (i *image) Unpack(ctx context.Context, snapshotterName string) error { return err } for _, layer := range layers { - unpacked, err = rootfs.ApplyLayer(ctx, layer, chain, sn, a) + unpacked, err = rootfs.ApplyLayerWithOpts(ctx, layer, chain, sn, a, config.SnapshotOpts, config.ApplyOpts) if err != nil { return err } diff --git a/vendor/github.com/containerd/containerd/images/archive/exporter.go b/vendor/github.com/containerd/containerd/images/archive/exporter.go index 5440c0d1..244ef322 100644 --- a/vendor/github.com/containerd/containerd/images/archive/exporter.go +++ b/vendor/github.com/containerd/containerd/images/archive/exporter.go @@ -89,31 +89,29 @@ func WithImage(is images.Store, name string) ExportOpt { } // WithManifest adds a manifest to the exported archive. -// It is up to caller to put name annotation to on the manifest -// descriptor if needed. -func WithManifest(manifest ocispec.Descriptor) ExportOpt { +// When names are given they will be set on the manifest in the +// exported archive, creating an index record for each name. +// When no names are provided, it is up to caller to put name annotation to +// on the manifest descriptor if needed. +func WithManifest(manifest ocispec.Descriptor, names ...string) ExportOpt { return func(ctx context.Context, o *exportOptions) error { - o.manifests = append(o.manifests, manifest) - return nil - } -} - -// WithNamedManifest adds a manifest to the exported archive -// with the provided names. -func WithNamedManifest(manifest ocispec.Descriptor, names ...string) ExportOpt { - return func(ctx context.Context, o *exportOptions) error { - for _, name := range names { - manifest.Annotations = addNameAnnotation(name, manifest.Annotations) + if len(names) == 0 { o.manifests = append(o.manifests, manifest) } + for _, name := range names { + mc := manifest + mc.Annotations = addNameAnnotation(name, manifest.Annotations) + o.manifests = append(o.manifests, mc) + } return nil } } -func addNameAnnotation(name string, annotations map[string]string) map[string]string { - if annotations == nil { - annotations = map[string]string{} +func addNameAnnotation(name string, base map[string]string) map[string]string { + annotations := map[string]string{} + for k, v := range base { + annotations[k] = v } annotations[images.AnnotationImageName] = name diff --git a/vendor/github.com/containerd/containerd/images/archive/importer.go b/vendor/github.com/containerd/containerd/images/archive/importer.go index 0e641591..5bc88713 100644 --- a/vendor/github.com/containerd/containerd/images/archive/importer.go +++ b/vendor/github.com/containerd/containerd/images/archive/importer.go @@ -22,12 +22,14 @@ import ( "bytes" "context" "encoding/json" + "fmt" "io" "io/ioutil" "path" "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" digest "github.com/opencontainers/go-digest" @@ -36,6 +38,22 @@ import ( "github.com/pkg/errors" ) +type importOpts struct { + compress bool +} + +// ImportOpt is an option for importing an OCI index +type ImportOpt func(*importOpts) error + +// WithImportCompression compresses uncompressed layers on import. +// This is used for import formats which do not include the manifest. +func WithImportCompression() ImportOpt { + return func(io *importOpts) error { + io.compress = true + return nil + } +} + // ImportIndex imports an index from a tar archive image bundle // - implements Docker v1.1, v1.2 and OCI v1. // - prefers OCI v1 when provided @@ -43,8 +61,7 @@ import ( // - normalizes Docker references and adds as OCI ref name // e.g. alpine:latest -> docker.io/library/alpine:latest // - existing OCI reference names are untouched -// - TODO: support option to compress layers on ingest -func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error) { +func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opts ...ImportOpt) (ocispec.Descriptor, error) { var ( tr = tar.NewReader(reader) @@ -56,7 +73,15 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc } symlinks = make(map[string]string) blobs = make(map[string]ocispec.Descriptor) + iopts importOpts ) + + for _, o := range opts { + if err := o(&iopts); err != nil { + return ocispec.Descriptor{}, err + } + } + for { hdr, err := tr.Next() if err == io.EOF { @@ -99,7 +124,7 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc } // If OCI layout was given, interpret the tar as an OCI layout. - // When not provided, the layout of the tar will be interpretted + // When not provided, the layout of the tar will be interpreted // as Docker v1.1 or v1.2. if ociLayout.Version != "" { if ociLayout.Version != ocispec.ImageLayoutVersion { @@ -137,19 +162,23 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc if !ok { return ocispec.Descriptor{}, errors.Errorf("image config %q not found", mfst.Config) } - config.MediaType = ocispec.MediaTypeImageConfig + config.MediaType = images.MediaTypeDockerSchema2Config - layers, err := resolveLayers(ctx, store, mfst.Layers, blobs) + layers, err := resolveLayers(ctx, store, mfst.Layers, blobs, iopts.compress) if err != nil { return ocispec.Descriptor{}, errors.Wrap(err, "failed to resolve layers") } - manifest := ocispec.Manifest{ - Versioned: specs.Versioned{ - SchemaVersion: 2, - }, - Config: config, - Layers: layers, + manifest := struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + Config ocispec.Descriptor `json:"config"` + Layers []ocispec.Descriptor `json:"layers"` + }{ + SchemaVersion: 2, + MediaType: images.MediaTypeDockerSchema2Manifest, + Config: config, + Layers: layers, } desc, err := writeManifest(ctx, store, manifest, ocispec.MediaTypeImageManifest) @@ -211,36 +240,118 @@ func onUntarBlob(ctx context.Context, r io.Reader, store content.Ingester, size return dgstr.Digest(), nil } -func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor) ([]ocispec.Descriptor, error) { - var layers []ocispec.Descriptor - for _, f := range layerFiles { +func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor, compress bool) ([]ocispec.Descriptor, error) { + layers := make([]ocispec.Descriptor, len(layerFiles)) + descs := map[digest.Digest]*ocispec.Descriptor{} + filters := []string{} + for i, f := range layerFiles { desc, ok := blobs[f] if !ok { return nil, errors.Errorf("layer %q not found", f) } + layers[i] = desc + descs[desc.Digest] = &layers[i] + filters = append(filters, "labels.\"containerd.io/uncompressed\"=="+desc.Digest.String()) + } + err := store.Walk(ctx, func(info content.Info) error { + dgst, ok := info.Labels["containerd.io/uncompressed"] + if ok { + desc := descs[digest.Digest(dgst)] + if desc != nil { + desc.MediaType = images.MediaTypeDockerSchema2LayerGzip + desc.Digest = info.Digest + desc.Size = info.Size + } + } + return nil + }, filters...) + if err != nil { + return nil, errors.Wrap(err, "failure checking for compressed blobs") + } + + for i, desc := range layers { + if desc.MediaType != "" { + continue + } // Open blob, resolve media type ra, err := store.ReaderAt(ctx, desc) if err != nil { - return nil, errors.Wrapf(err, "failed to open %q (%s)", f, desc.Digest) + return nil, errors.Wrapf(err, "failed to open %q (%s)", layerFiles[i], desc.Digest) } s, err := compression.DecompressStream(content.NewReader(ra)) if err != nil { - return nil, errors.Wrapf(err, "failed to detect compression for %q", f) + return nil, errors.Wrapf(err, "failed to detect compression for %q", layerFiles[i]) } if s.GetCompression() == compression.Uncompressed { - // TODO: Support compressing and writing back to content store - desc.MediaType = ocispec.MediaTypeImageLayer + if compress { + ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded()) + labels := map[string]string{ + "containerd.io/uncompressed": desc.Digest.String(), + } + layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels)) + if err != nil { + s.Close() + return nil, err + } + layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip + } else { + layers[i].MediaType = images.MediaTypeDockerSchema2Layer + } } else { - desc.MediaType = ocispec.MediaTypeImageLayerGzip + layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip } s.Close() - layers = append(layers, desc) } return layers, nil } +func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string, opts ...content.Opt) (desc ocispec.Descriptor, err error) { + w, err := content.OpenWriter(ctx, cs, content.WithRef(ref)) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to open writer") + } + + defer func() { + w.Close() + if err != nil { + cs.Abort(ctx, ref) + } + }() + if err := w.Truncate(0); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to truncate writer") + } + + cw, err := compression.CompressStream(w, compression.Gzip) + if err != nil { + return ocispec.Descriptor{}, err + } + + if _, err := io.Copy(cw, r); err != nil { + return ocispec.Descriptor{}, err + } + if err := cw.Close(); err != nil { + return ocispec.Descriptor{}, err + } + + cst, err := w.Status() + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to get writer status") + } + + desc.Digest = w.Digest() + desc.Size = cst.Offset + + if err := w.Commit(ctx, desc.Size, desc.Digest, opts...); err != nil { + if !errdefs.IsAlreadyExists(err) { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to commit") + } + } + + return desc, nil +} + func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{}, mediaType string) (ocispec.Descriptor, error) { manifestBytes, err := json.Marshal(manifest) if err != nil { diff --git a/vendor/github.com/containerd/containerd/images/archive/reference.go b/vendor/github.com/containerd/containerd/images/archive/reference.go index 19f419ea..cd63517e 100644 --- a/vendor/github.com/containerd/containerd/images/archive/reference.go +++ b/vendor/github.com/containerd/containerd/images/archive/reference.go @@ -91,7 +91,7 @@ func familiarizeReference(ref string) (string, error) { func ociReferenceName(name string) string { // OCI defines the reference name as only a tag excluding the // repository. The containerd annotation contains the full image name - // since the tag is insufficent for correctly naming and referring to an + // since the tag is insufficient for correctly naming and referring to an // image var ociRef string if spec, err := reference.Parse(name); err == nil { diff --git a/vendor/github.com/containerd/containerd/images/handlers.go b/vendor/github.com/containerd/containerd/images/handlers.go index dac701bb..04c2d5a6 100644 --- a/vendor/github.com/containerd/containerd/images/handlers.go +++ b/vendor/github.com/containerd/containerd/images/handlers.go @@ -117,7 +117,7 @@ func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) err // // If any handler returns an error, the dispatch session will be canceled. func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error { - eg, ctx := errgroup.WithContext(ctx) + eg, ctx2 := errgroup.WithContext(ctx) for _, desc := range descs { desc := desc @@ -126,10 +126,11 @@ func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, return err } } + eg.Go(func() error { desc := desc - children, err := handler.Handle(ctx, desc) + children, err := handler.Handle(ctx2, desc) if limiter != nil { limiter.Release(1) } @@ -141,7 +142,7 @@ func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, } if len(children) > 0 { - return Dispatch(ctx, handler, limiter, children...) + return Dispatch(ctx2, handler, limiter, children...) } return nil diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go index f72684d8..ee5778d2 100644 --- a/vendor/github.com/containerd/containerd/images/image.go +++ b/vendor/github.com/containerd/containerd/images/image.go @@ -20,7 +20,6 @@ import ( "context" "encoding/json" "sort" - "strings" "time" "github.com/containerd/containerd/content" @@ -119,7 +118,7 @@ func (image *Image) Size(ctx context.Context, provider content.Provider, platfor } size += desc.Size return nil, nil - }), FilterPlatforms(ChildrenHandler(provider), platform)), image.Target) + }), LimitManifests(FilterPlatforms(ChildrenHandler(provider), platform), platform, 1)), image.Target) } type platformManifest struct { @@ -142,6 +141,7 @@ type platformManifest struct { // this direction because this abstraction is not needed.` func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) { var ( + limit = 1 m []platformManifest wasIndex bool ) @@ -210,10 +210,22 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc } } + sort.SliceStable(descs, func(i, j int) bool { + if descs[i].Platform == nil { + return false + } + if descs[j].Platform == nil { + return true + } + return platform.Less(*descs[i].Platform, *descs[j].Platform) + }) + wasIndex = true + if len(descs) > limit { + return descs[:limit], nil + } return descs, nil - } return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest) }), image); err != nil { @@ -227,17 +239,6 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc } return ocispec.Manifest{}, err } - - sort.SliceStable(m, func(i, j int) bool { - if m[i].p == nil { - return false - } - if m[j].p == nil { - return true - } - return platform.Less(*m[i].p, *m[j].p) - }) - return *m[0].m, nil } @@ -356,15 +357,11 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr } descs = append(descs, index.Manifests...) - case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip, - MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip, - MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, - ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip, - ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip, - MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig: - // childless data types. - return nil, nil default: + if IsLayerType(desc.MediaType) || IsKnownConfig(desc.MediaType) { + // childless data types. + return nil, nil + } log.G(ctx).Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType) } @@ -387,22 +384,3 @@ func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.D } return config.RootFS.DiffIDs, nil } - -// IsCompressedDiff returns true if mediaType is a known compressed diff media type. -// It returns false if the media type is a diff, but not compressed. If the media type -// is not a known diff type, it returns errdefs.ErrNotImplemented -func IsCompressedDiff(ctx context.Context, mediaType string) (bool, error) { - switch mediaType { - case ocispec.MediaTypeImageLayer, MediaTypeDockerSchema2Layer: - case ocispec.MediaTypeImageLayerGzip, MediaTypeDockerSchema2LayerGzip: - return true, nil - default: - // Still apply all generic media types *.tar[.+]gzip and *.tar - if strings.HasSuffix(mediaType, ".tar.gzip") || strings.HasSuffix(mediaType, ".tar+gzip") { - return true, nil - } else if !strings.HasSuffix(mediaType, ".tar") { - return false, errdefs.ErrNotImplemented - } - } - return false, nil -} diff --git a/vendor/github.com/containerd/containerd/images/mediatypes.go b/vendor/github.com/containerd/containerd/images/mediatypes.go index 186a3b67..2f47b0e6 100644 --- a/vendor/github.com/containerd/containerd/images/mediatypes.go +++ b/vendor/github.com/containerd/containerd/images/mediatypes.go @@ -16,6 +16,15 @@ package images +import ( + "context" + "sort" + "strings" + + "github.com/containerd/containerd/errdefs" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + // mediatype definitions for image components handled in containerd. // // oci components are generally referenced directly, although we may centralize @@ -40,3 +49,78 @@ const ( // Legacy Docker schema1 manifest MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" ) + +// DiffCompression returns the compression as defined by the layer diff media +// type. For Docker media types without compression, "unknown" is returned to +// indicate that the media type may be compressed. If the media type is not +// recognized as a layer diff, then it returns errdefs.ErrNotImplemented +func DiffCompression(ctx context.Context, mediaType string) (string, error) { + base, ext := parseMediaTypes(mediaType) + switch base { + case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerForeign: + if len(ext) > 0 { + // Type is wrapped + return "", nil + } + // These media types may have been compressed but failed to + // use the correct media type. The decompression function + // should detect and handle this case. + return "unknown", nil + case MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2LayerForeignGzip: + if len(ext) > 0 { + // Type is wrapped + return "", nil + } + return "gzip", nil + case ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerNonDistributable: + if len(ext) > 0 { + switch ext[len(ext)-1] { + case "gzip": + return "gzip", nil + } + } + return "", nil + default: + return "", errdefs.ErrNotImplemented + } +} + +// parseMediaTypes splits the media type into the base type and +// an array of sorted extensions +func parseMediaTypes(mt string) (string, []string) { + if mt == "" { + return "", []string{} + } + + s := strings.Split(mt, "+") + ext := s[1:] + sort.Strings(ext) + + return s[0], ext +} + +// IsLayerTypes returns true if the media type is a layer +func IsLayerType(mt string) bool { + if strings.HasPrefix(mt, "application/vnd.oci.image.layer.") { + return true + } + + // Parse Docker media types, strip off any + suffixes first + base, _ := parseMediaTypes(mt) + switch base { + case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip, + MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip: + return true + } + return false +} + +// IsKnownConfig returns true if the media type is a known config type +func IsKnownConfig(mt string) bool { + switch mt { + case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, + MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig: + return true + } + return false +} diff --git a/vendor/github.com/containerd/containerd/import.go b/vendor/github.com/containerd/containerd/import.go index 0e1531ab..6080161f 100644 --- a/vendor/github.com/containerd/containerd/import.go +++ b/vendor/github.com/containerd/containerd/import.go @@ -35,6 +35,7 @@ type importOpts struct { imageRefT func(string) string dgstRefT func(digest.Digest) string allPlatforms bool + compress bool } // ImportOpt allows the caller to specify import specific options @@ -74,9 +75,18 @@ func WithAllPlatforms(allPlatforms bool) ImportOpt { } } +// WithImportCompression compresses uncompressed layers on import. +// This is used for import formats which do not include the manifest. +func WithImportCompression() ImportOpt { + return func(c *importOpts) error { + c.compress = true + return nil + } +} + // Import imports an image from a Tar stream using reader. // Caller needs to specify importer. Future version may use oci.v1 as the default. -// Note that unreferrenced blobs may be imported to the content store as well. +// Note that unreferenced blobs may be imported to the content store as well. func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt) ([]images.Image, error) { var iopts importOpts for _, o := range opts { @@ -91,7 +101,12 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt } defer done(ctx) - index, err := archive.ImportIndex(ctx, c.ContentStore(), reader) + var aio []archive.ImportOpt + if iopts.compress { + aio = append(aio, archive.WithImportCompression()) + } + + index, err := archive.ImportIndex(ctx, c.ContentStore(), reader, aio...) if err != nil { return nil, err } @@ -110,7 +125,7 @@ func (c *Client) Import(ctx context.Context, reader io.Reader, opts ...ImportOpt } var platformMatcher = platforms.All if !iopts.allPlatforms { - platformMatcher = platforms.Default() + platformMatcher = c.platform } var handler images.HandlerFunc = func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { diff --git a/vendor/github.com/containerd/containerd/install.go b/vendor/github.com/containerd/containerd/install.go index 4545d455..5b8b735d 100644 --- a/vendor/github.com/containerd/containerd/install.go +++ b/vendor/github.com/containerd/containerd/install.go @@ -27,7 +27,6 @@ import ( "github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" - "github.com/containerd/containerd/platforms" "github.com/pkg/errors" ) @@ -43,7 +42,7 @@ func (c *Client) Install(ctx context.Context, image Image, opts ...InstallOpts) } var ( cs = image.ContentStore() - platform = platforms.Default() + platform = c.platform ) manifest, err := images.Manifest(ctx, cs, image.Target(), platform) if err != nil { diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go index 3fab96b8..31f1a3ac 100644 --- a/vendor/github.com/containerd/containerd/log/context.go +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -30,7 +30,7 @@ var ( // messages. G = GetLogger - // L is an alias for the the standard logger. + // L is an alias for the standard logger. L = logrus.NewEntry(logrus.StandardLogger()) ) diff --git a/vendor/github.com/containerd/containerd/namespaces/context.go b/vendor/github.com/containerd/containerd/namespaces/context.go index b4e988e7..20596f09 100644 --- a/vendor/github.com/containerd/containerd/namespaces/context.go +++ b/vendor/github.com/containerd/containerd/namespaces/context.go @@ -64,7 +64,7 @@ func Namespace(ctx context.Context) (string, bool) { return namespace, ok } -// NamespaceRequired returns the valid namepace from the context or an error. +// NamespaceRequired returns the valid namespace from the context or an error. func NamespaceRequired(ctx context.Context) (string, error) { namespace, ok := Namespace(ctx) if !ok || namespace == "" { diff --git a/vendor/github.com/containerd/containerd/namespaces/ttrpc.go b/vendor/github.com/containerd/containerd/namespaces/ttrpc.go index 2224334d..bcd2643c 100644 --- a/vendor/github.com/containerd/containerd/namespaces/ttrpc.go +++ b/vendor/github.com/containerd/containerd/namespaces/ttrpc.go @@ -27,10 +27,20 @@ const ( TTRPCHeader = "containerd-namespace-ttrpc" ) +func copyMetadata(src ttrpc.MD) ttrpc.MD { + md := ttrpc.MD{} + for k, v := range src { + md[k] = append(md[k], v...) + } + return md +} + func withTTRPCNamespaceHeader(ctx context.Context, namespace string) context.Context { md, ok := ttrpc.GetMetadata(ctx) if !ok { md = ttrpc.MD{} + } else { + md = copyMetadata(md) } md.Set(TTRPCHeader, namespace) return ttrpc.WithMetadata(ctx, md) diff --git a/vendor/github.com/containerd/containerd/namespaces_opts_linux.go b/vendor/github.com/containerd/containerd/namespaces_opts_linux.go deleted file mode 100644 index 6b8cc8f8..00000000 --- a/vendor/github.com/containerd/containerd/namespaces_opts_linux.go +++ /dev/null @@ -1,36 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package containerd - -import ( - "context" - - "github.com/containerd/cgroups" - "github.com/containerd/containerd/namespaces" -) - -// WithNamespaceCgroupDeletion removes the cgroup directory that was created for the namespace -func WithNamespaceCgroupDeletion(ctx context.Context, i *namespaces.DeleteInfo) error { - cg, err := cgroups.Load(cgroups.V1, cgroups.StaticPath(i.Name)) - if err != nil { - if err == cgroups.ErrCgroupDeleted { - return nil - } - return err - } - return cg.Delete() -} diff --git a/vendor/github.com/containerd/containerd/oci/spec.go b/vendor/github.com/containerd/containerd/oci/spec.go index 3e7b5492..035bb7e7 100644 --- a/vendor/github.com/containerd/containerd/oci/spec.go +++ b/vendor/github.com/containerd/containerd/oci/spec.go @@ -78,7 +78,7 @@ func generateDefaultSpecWithPlatform(ctx context.Context, platform, id string, s return err } -// ApplyOpts applys the options to the given spec, injecting data from the +// ApplyOpts applies the options to the given spec, injecting data from the // context, client and container instance. func ApplyOpts(ctx context.Context, client Client, c *containers.Container, s *Spec, opts ...SpecOpts) error { for _, o := range opts { @@ -141,7 +141,6 @@ func populateDefaultUnixSpec(ctx context.Context, s *Spec, id string) error { Path: defaultRootfsPath, }, Process: &specs.Process{ - Env: defaultUnixEnv, Cwd: "/", NoNewPrivileges: true, User: specs.User{ diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts.go b/vendor/github.com/containerd/containerd/oci/spec_opts.go index ce756108..ad6b52a9 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts.go @@ -17,6 +17,7 @@ package oci import ( + "bufio" "context" "encoding/json" "fmt" @@ -76,6 +77,20 @@ func setLinux(s *Spec) { } } +// nolint +func setResources(s *Spec) { + if s.Linux != nil { + if s.Linux.Resources == nil { + s.Linux.Resources = &specs.LinuxResources{} + } + } + if s.Windows != nil { + if s.Windows.Resources == nil { + s.Windows.Resources = &specs.WindowsResources{} + } + } +} + // setCapabilities sets Linux Capabilities to empty if unset func setCapabilities(s *Spec) { setProcess(s) @@ -104,7 +119,7 @@ func WithDefaultSpecForPlatform(platform string) SpecOpts { } } -// WithSpecFromBytes loads the the spec from the provided byte slice. +// WithSpecFromBytes loads the spec from the provided byte slice. func WithSpecFromBytes(p []byte) SpecOpts { return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { *s = Spec{} // make sure spec is cleared. @@ -137,6 +152,13 @@ func WithEnv(environmentVariables []string) SpecOpts { } } +// WithDefaultPathEnv sets the $PATH environment variable to the +// default PATH defined in this package. +func WithDefaultPathEnv(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, defaultUnixEnv) + return nil +} + // replaceOrAppendEnvValues returns the defaults with the overrides either // replaced by env key or appended to the list func replaceOrAppendEnvValues(defaults, overrides []string) []string { @@ -312,7 +334,11 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts { setProcess(s) if s.Linux != nil { - s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, config.Env) + defaults := config.Env + if len(defaults) == 0 { + defaults = defaultUnixEnv + } + s.Process.Env = replaceOrAppendEnvValues(defaults, s.Process.Env) cmd := config.Cmd if len(args) > 0 { cmd = args @@ -334,7 +360,7 @@ func WithImageConfigArgs(image Image, args []string) SpecOpts { // even if there is no specified user in the image config return WithAdditionalGIDs("root")(ctx, client, c, s) } else if s.Windows != nil { - s.Process.Env = replaceOrAppendEnvValues(s.Process.Env, config.Env) + s.Process.Env = replaceOrAppendEnvValues(config.Env, s.Process.Env) cmd := config.Cmd if len(args) > 0 { cmd = args @@ -607,7 +633,7 @@ func WithUserID(uid uint32) SpecOpts { } // WithUsername sets the correct UID and GID for the container -// based on the the image's /etc/passwd contents. If /etc/passwd +// based on the image's /etc/passwd contents. If /etc/passwd // does not exist, or the username is not found in /etc/passwd, // it returns error. func WithUsername(username string) SpecOpts { @@ -1139,3 +1165,85 @@ func WithAnnotations(annotations map[string]string) SpecOpts { return nil } } + +// WithLinuxDevices adds the provided linux devices to the spec +func WithLinuxDevices(devices []specs.LinuxDevice) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + s.Linux.Devices = append(s.Linux.Devices, devices...) + return nil + } +} + +var ErrNotADevice = errors.New("not a device node") + +// WithLinuxDevice adds the device specified by path to the spec +func WithLinuxDevice(path, permissions string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + setLinux(s) + setResources(s) + + dev, err := deviceFromPath(path, permissions) + if err != nil { + return err + } + + s.Linux.Devices = append(s.Linux.Devices, *dev) + + s.Linux.Resources.Devices = append(s.Linux.Resources.Devices, specs.LinuxDeviceCgroup{ + Type: dev.Type, + Allow: true, + Major: &dev.Major, + Minor: &dev.Minor, + Access: permissions, + }) + + return nil + } +} + +// WithEnvFile adds environment variables from a file to the container's spec +func WithEnvFile(path string) SpecOpts { + return func(_ context.Context, _ Client, _ *containers.Container, s *Spec) error { + var vars []string + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + + sc := bufio.NewScanner(f) + for sc.Scan() { + if sc.Err() != nil { + return sc.Err() + } + vars = append(vars, sc.Text()) + } + return WithEnv(vars)(nil, nil, nil, s) + } +} + +// ErrNoShmMount is returned when there is no /dev/shm mount specified in the config +// and an Opts was trying to set a configuration value on the mount. +var ErrNoShmMount = errors.New("no /dev/shm mount specified") + +// WithDevShmSize sets the size of the /dev/shm mount for the container. +// +// The size value is specified in kb, kilobytes. +func WithDevShmSize(kb int64) SpecOpts { + return func(ctx context.Context, _ Client, c *containers.Container, s *Spec) error { + for _, m := range s.Mounts { + if m.Source == "shm" && m.Type == "tmpfs" { + for i, o := range m.Options { + if strings.HasPrefix(o, "size=") { + m.Options[i] = fmt.Sprintf("size=%dk", kb) + return nil + } + } + m.Options = append(m.Options, fmt.Sprintf("size=%dk", kb)) + return nil + } + } + return ErrNoShmMount + } +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go b/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go new file mode 100644 index 00000000..918c8f4e --- /dev/null +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_linux.go @@ -0,0 +1,64 @@ +// +build linux + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import ( + "os" + + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/sys/unix" +) + +func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) { + var stat unix.Stat_t + if err := unix.Lstat(path, &stat); err != nil { + return nil, err + } + + var ( + // The type is 32bit on mips. + devNumber = uint64(stat.Rdev) // nolint: unconvert + major = unix.Major(devNumber) + minor = unix.Minor(devNumber) + ) + if major == 0 { + return nil, ErrNotADevice + } + + var ( + devType string + mode = stat.Mode + ) + switch { + case mode&unix.S_IFBLK == unix.S_IFBLK: + devType = "b" + case mode&unix.S_IFCHR == unix.S_IFCHR: + devType = "c" + } + fm := os.FileMode(mode) + return &specs.LinuxDevice{ + Type: devType, + Path: path, + Major: int64(major), + Minor: int64(minor), + FileMode: &fm, + UID: &stat.Uid, + GID: &stat.Gid, + }, nil +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go new file mode 100644 index 00000000..3f63dfd1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_unix.go @@ -0,0 +1,63 @@ +// +build !linux,!windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package oci + +import ( + "os" + + specs "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/sys/unix" +) + +func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) { + var stat unix.Stat_t + if err := unix.Lstat(path, &stat); err != nil { + return nil, err + } + + var ( + devNumber = uint64(stat.Rdev) + major = unix.Major(devNumber) + minor = unix.Minor(devNumber) + ) + if major == 0 { + return nil, ErrNotADevice + } + + var ( + devType string + mode = stat.Mode + ) + switch { + case mode&unix.S_IFBLK == unix.S_IFBLK: + devType = "b" + case mode&unix.S_IFCHR == unix.S_IFCHR: + devType = "c" + } + fm := os.FileMode(mode) + return &specs.LinuxDevice{ + Type: devType, + Path: path, + Major: int64(major), + Minor: int64(minor), + FileMode: &fm, + UID: &stat.Uid, + GID: &stat.Gid, + }, nil +} diff --git a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go index fbe1cb33..d265d544 100644 --- a/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go +++ b/vendor/github.com/containerd/containerd/oci/spec_opts_windows.go @@ -23,6 +23,7 @@ import ( "github.com/containerd/containerd/containers" specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" ) // WithWindowsCPUCount sets the `Windows.Resources.CPU.Count` section to the @@ -65,3 +66,7 @@ func WithWindowNetworksAllowUnqualifiedDNSQuery() SpecOpts { return nil } } + +func deviceFromPath(path, permissions string) (*specs.LinuxDevice, error) { + return nil, errors.New("device from path not supported on Windows") +} diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go index 8e85448e..6ede9406 100644 --- a/vendor/github.com/containerd/containerd/platforms/database.go +++ b/vendor/github.com/containerd/containerd/platforms/database.go @@ -28,7 +28,7 @@ func isLinuxOS(os string) bool { return os == "linux" } -// These function are generated from from https://golang.org/src/go/build/syslist.go. +// These function are generated from https://golang.org/src/go/build/syslist.go. // // We use switch statements because they are slightly faster than map lookups // and use a little less memory. @@ -38,7 +38,7 @@ func isLinuxOS(os string) bool { // The OS value should be normalized before calling this function. func isKnownOS(os string) bool { switch os { - case "android", "darwin", "dragonfly", "freebsd", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": return true } return false @@ -60,7 +60,7 @@ func isArmArch(arch string) bool { // The arch value should be normalized before being passed to this function. func isKnownArch(arch string) bool { switch arch { - case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "s390", "s390x", "sparc", "sparc64": + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": return true } return false diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go index 2c2cc110..d2b73ac3 100644 --- a/vendor/github.com/containerd/containerd/platforms/platforms.go +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -130,7 +130,7 @@ type Matcher interface { // specification. The returned matcher only looks for equality based on os, // architecture and variant. // -// One may implement their own matcher if this doesn't provide the the required +// One may implement their own matcher if this doesn't provide the required // functionality. // // Applications should opt to use `Match` over directly parsing specifiers. diff --git a/vendor/github.com/containerd/containerd/plugin/context.go b/vendor/github.com/containerd/containerd/plugin/context.go index 1211c907..75b7366f 100644 --- a/vendor/github.com/containerd/containerd/plugin/context.go +++ b/vendor/github.com/containerd/containerd/plugin/context.go @@ -28,12 +28,13 @@ import ( // InitContext is used for plugin inititalization type InitContext struct { - Context context.Context - Root string - State string - Config interface{} - Address string - Events *exchange.Exchange + Context context.Context + Root string + State string + Config interface{} + Address string + TTRPCAddress string + Events *exchange.Exchange Meta *Meta // plugins can fill in metadata at init. diff --git a/vendor/github.com/containerd/containerd/process.go b/vendor/github.com/containerd/containerd/process.go index 14732d99..5b302569 100644 --- a/vendor/github.com/containerd/containerd/process.go +++ b/vendor/github.com/containerd/containerd/process.go @@ -44,7 +44,7 @@ type Process interface { Wait(context.Context) (<-chan ExitStatus, error) // CloseIO allows various pipes to be closed on the process CloseIO(context.Context, ...IOCloserOpts) error - // Resize changes the width and heigh of the process's terminal + // Resize changes the width and height of the process's terminal Resize(ctx context.Context, w, h uint32) error // IO returns the io set for the process IO() cio.IO @@ -61,7 +61,7 @@ func NewExitStatus(code uint32, t time.Time, err error) *ExitStatus { } } -// ExitStatus encapsulates a process' exit status. +// ExitStatus encapsulates a process's exit status. // It is used by `Wait()` to return either a process exit code or an error type ExitStatus struct { code uint32 diff --git a/vendor/github.com/containerd/containerd/pull.go b/vendor/github.com/containerd/containerd/pull.go index 693dcafe..2520639d 100644 --- a/vendor/github.com/containerd/containerd/pull.go +++ b/vendor/github.com/containerd/containerd/pull.go @@ -32,7 +32,7 @@ import ( // Pull downloads the provided content into containerd's content store // and returns a platform specific image object -func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image, error) { +func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (_ Image, retErr error) { pullCtx := defaultRemoteContext() for _, o := range opts { if err := o(c, pullCtx); err != nil { @@ -44,7 +44,7 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image if len(pullCtx.Platforms) > 1 { return nil, errors.New("cannot pull multiplatform image locally, try Fetch") } else if len(pullCtx.Platforms) == 0 { - pullCtx.PlatformMatcher = platforms.Default() + pullCtx.PlatformMatcher = c.platform } else { p, err := platforms.Parse(pullCtx.Platforms[0]) if err != nil { @@ -61,6 +61,30 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image } defer done(ctx) + var unpacks int32 + if pullCtx.Unpack { + // unpacker only supports schema 2 image, for schema 1 this is noop. + u, err := c.newUnpacker(ctx, pullCtx) + if err != nil { + return nil, errors.Wrap(err, "create unpacker") + } + unpackWrapper, eg := u.handlerWrapper(ctx, &unpacks) + defer func() { + if err := eg.Wait(); err != nil { + if retErr == nil { + retErr = errors.Wrap(err, "unpack") + } + } + }() + wrapper := pullCtx.HandlerWrapper + pullCtx.HandlerWrapper = func(h images.Handler) images.Handler { + if wrapper == nil { + return unpackWrapper(h) + } + return wrapper(unpackWrapper(h)) + } + } + img, err := c.fetch(ctx, pullCtx, ref, 1) if err != nil { return nil, err @@ -69,8 +93,12 @@ func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image i := NewImageWithPlatform(c, img, pullCtx.PlatformMatcher) if pullCtx.Unpack { - if err := i.Unpack(ctx, pullCtx.Snapshotter); err != nil { - return nil, errors.Wrapf(err, "failed to unpack image on snapshotter %s", pullCtx.Snapshotter) + if unpacks == 0 { + // Try to unpack is none is done previously. + // This is at least required for schema 1 image. + if err := i.Unpack(ctx, pullCtx.Snapshotter, pullCtx.UnpackOpts...); err != nil { + return nil, errors.Wrapf(err, "failed to unpack image on snapshotter %s", pullCtx.Snapshotter) + } } } @@ -112,9 +140,14 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim childrenHandler := images.ChildrenHandler(store) // Set any children labels for that content childrenHandler = images.SetChildrenLabels(store, childrenHandler) - // Filter manifests by platforms but allow to handle manifest - // and configuration for not-target platforms - childrenHandler = remotes.FilterManifestByPlatformHandler(childrenHandler, rCtx.PlatformMatcher) + if rCtx.AllMetadata { + // Filter manifests by platforms but allow to handle manifest + // and configuration for not-target platforms + childrenHandler = remotes.FilterManifestByPlatformHandler(childrenHandler, rCtx.PlatformMatcher) + } else { + // Filter children by platforms if specified. + childrenHandler = images.FilterPlatforms(childrenHandler, rCtx.PlatformMatcher) + } // Sort and limit manifests if a finite number is needed if limit > 0 { childrenHandler = images.LimitManifests(childrenHandler, rCtx.PlatformMatcher, limit) @@ -131,22 +164,18 @@ func (c *Client) fetch(ctx context.Context, rCtx *RemoteContext, ref string, lim }, ) + appendDistSrcLabelHandler, err := docker.AppendDistributionSourceLabel(store, ref) + if err != nil { + return images.Image{}, err + } + handlers := append(rCtx.BaseHandlers, remotes.FetchHandler(store, fetcher), convertibleHandler, childrenHandler, + appendDistSrcLabelHandler, ) - // append distribution source label to blob data - if rCtx.AppendDistributionSourceLabel { - appendDistSrcLabelHandler, err := docker.AppendDistributionSourceLabel(store, ref) - if err != nil { - return images.Image{}, err - } - - handlers = append(handlers, appendDistSrcLabelHandler) - } - handler = images.Handlers(handlers...) converterFunc = func(ctx context.Context, desc ocispec.Descriptor) (ocispec.Descriptor, error) { diff --git a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go index bec9b7a0..9652d3ac 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go @@ -31,7 +31,6 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" - "github.com/containerd/containerd/version" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/net/context/ctxhttp" @@ -41,7 +40,7 @@ type dockerAuthorizer struct { credentials func(string) (string, string, error) client *http.Client - ua string + header http.Header mu sync.Mutex // indexed by host name @@ -50,15 +49,58 @@ type dockerAuthorizer struct { // NewAuthorizer creates a Docker authorizer using the provided function to // get credentials for the token server or basic auth. +// Deprecated: Use NewDockerAuthorizer func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) Authorizer { - if client == nil { - client = http.DefaultClient + return NewDockerAuthorizer(WithAuthClient(client), WithAuthCreds(f)) +} + +type authorizerConfig struct { + credentials func(string) (string, string, error) + client *http.Client + header http.Header +} + +// AuthorizerOpt configures an authorizer +type AuthorizerOpt func(*authorizerConfig) + +// WithAuthClient provides the HTTP client for the authorizer +func WithAuthClient(client *http.Client) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.client = client + } +} + +// WithAuthCreds provides a credential function to the authorizer +func WithAuthCreds(creds func(string) (string, string, error)) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.credentials = creds + } +} + +// WithAuthHeader provides HTTP headers for authorization +func WithAuthHeader(hdr http.Header) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.header = hdr + } +} + +// NewDockerAuthorizer creates an authorizer using Docker's registry +// authentication spec. +// See https://docs.docker.com/registry/spec/auth/ +func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer { + var ao authorizerConfig + for _, opt := range opts { + opt(&ao) + } + + if ao.client == nil { + ao.client = http.DefaultClient } return &dockerAuthorizer{ - credentials: f, - client: client, - ua: "containerd/" + version.Version, + credentials: ao.credentials, + client: ao.client, + header: ao.header, handlers: make(map[string]*authHandler), } } @@ -115,7 +157,7 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R return err } - a.handlers[host] = newAuthHandler(a.client, a.ua, c.scheme, common) + a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common) return nil } else if c.scheme == basicAuth && a.credentials != nil { username, secret, err := a.credentials(host) @@ -129,7 +171,7 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R secret: secret, } - a.handlers[host] = newAuthHandler(a.client, a.ua, c.scheme, common) + a.handlers[host] = newAuthHandler(a.client, a.header, c.scheme, common) return nil } } @@ -179,7 +221,7 @@ type authResult struct { type authHandler struct { sync.Mutex - ua string + header http.Header client *http.Client @@ -194,13 +236,9 @@ type authHandler struct { scopedTokens map[string]*authResult } -func newAuthHandler(client *http.Client, ua string, scheme authenticationScheme, opts tokenOptions) *authHandler { - if client == nil { - client = http.DefaultClient - } - +func newAuthHandler(client *http.Client, hdr http.Header, scheme authenticationScheme, opts tokenOptions) *authHandler { return &authHandler{ - ua: ua, + header: hdr, client: client, scheme: scheme, common: opts, @@ -227,7 +265,7 @@ func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) { } auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret)) - return fmt.Sprintf("%s %s", "Basic", auth), nil + return fmt.Sprintf("Basic %s", auth), nil } func (ah *authHandler) doBearerAuth(ctx context.Context) (string, error) { @@ -269,7 +307,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (string, error) { token, err = ah.fetchToken(ctx, to) err = errors.Wrap(err, "failed to fetch anonymous token") } - token = fmt.Sprintf("%s %s", "Bearer", token) + token = fmt.Sprintf("Bearer %s", token) r.token, r.err = token, err r.Done() @@ -313,8 +351,10 @@ func (ah *authHandler) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) return "", err } req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - if ah.ua != "" { - req.Header.Set("User-Agent", ah.ua) + if ah.header != nil { + for k, v := range ah.header { + req.Header[k] = append(req.Header[k], v...) + } } resp, err := ctxhttp.Do(ctx, ah.client, req) @@ -363,8 +403,10 @@ func (ah *authHandler) fetchToken(ctx context.Context, to tokenOptions) (string, return "", err } - if ah.ua != "" { - req.Header.Set("User-Agent", ah.ua) + if ah.header != nil { + for k, v := range ah.header { + req.Header[k] = append(req.Header[k], v...) + } } reqParams := req.URL.Query() diff --git a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go index 6f06b0e5..ad8482fa 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go @@ -23,7 +23,7 @@ import ( "io" "io/ioutil" "net/http" - "path" + "net/url" "strings" "github.com/containerd/containerd/errdefs" @@ -32,7 +32,6 @@ import ( "github.com/docker/distribution/registry/api/errcode" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type dockerFetcher struct { @@ -40,26 +39,46 @@ type dockerFetcher struct { } func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { - ctx = log.WithLogger(ctx, log.G(ctx).WithFields( - logrus.Fields{ - "base": r.base.String(), - "digest": desc.Digest, - }, - )) + ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", desc.Digest)) - urls, err := r.getV2URLPaths(ctx, desc) - if err != nil { - return nil, err + hosts := r.filterHosts(HostCapabilityPull) + if len(hosts) == 0 { + return nil, errors.Wrap(errdefs.ErrNotFound, "no pull hosts") } - ctx, err = contextWithRepositoryScope(ctx, r.refspec, false) + ctx, err := contextWithRepositoryScope(ctx, r.refspec, false) if err != nil { return nil, err } return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) { - for _, u := range urls { - rc, err := r.open(ctx, u, desc.MediaType, offset) + // firstly try fetch via external urls + for _, us := range desc.URLs { + ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", us)) + + u, err := url.Parse(us) + if err != nil { + log.G(ctx).WithError(err).Debug("failed to parse") + continue + } + log.G(ctx).Debug("trying alternative url") + + // Try this first, parse it + host := RegistryHost{ + Client: http.DefaultClient, + Host: u.Host, + Scheme: u.Scheme, + Path: u.Path, + Capabilities: HostCapabilityPull, + } + req := r.request(host, http.MethodGet) + // Strip namespace from base + req.path = u.Path + if u.RawQuery != "" { + req.path = req.path + "?" + u.RawQuery + } + + rc, err := r.open(ctx, req, desc.MediaType, offset) if err != nil { if errdefs.IsNotFound(err) { continue // try one of the other urls. @@ -71,6 +90,44 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R return rc, nil } + // Try manifests endpoints for manifests types + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, + images.MediaTypeDockerSchema1Manifest, + ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: + + for _, host := range r.hosts { + req := r.request(host, http.MethodGet, "manifests", desc.Digest.String()) + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + if errdefs.IsNotFound(err) { + continue // try another host + } + + return nil, err + } + + return rc, nil + } + } + + // Finally use blobs endpoints + for _, host := range r.hosts { + req := r.request(host, http.MethodGet, "blobs", desc.Digest.String()) + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + if errdefs.IsNotFound(err) { + continue // try another host + } + + return nil, err + } + + return rc, nil + } + return nil, errors.Wrapf(errdefs.ErrNotFound, "could not fetch content descriptor %v (%v) from remote", desc.Digest, desc.MediaType) @@ -78,22 +135,17 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R }) } -func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int64) (io.ReadCloser, error) { - req, err := http.NewRequest(http.MethodGet, u, nil) - if err != nil { - return nil, err - } - - req.Header.Set("Accept", strings.Join([]string{mediatype, `*`}, ", ")) +func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (io.ReadCloser, error) { + req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", ")) if offset > 0 { // Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints // will return the header without supporting the range. The content // range must always be checked. - req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + req.header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) } - resp, err := r.doRequestWithRetries(ctx, req, nil) + resp, err := req.doWithRetries(ctx, nil) if err != nil { return nil, err } @@ -106,13 +158,13 @@ func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int defer resp.Body.Close() if resp.StatusCode == http.StatusNotFound { - return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", u) + return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", req.String()) } var registryErr errcode.Errors if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 { - return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status) + return nil, errors.Errorf("unexpected status code %v: %v", req.String(), resp.Status) } - return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", u, resp.Status, registryErr.Error()) + return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error()) } if offset > 0 { cr := resp.Header.Get("content-range") @@ -141,30 +193,3 @@ func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int return resp.Body, nil } - -// getV2URLPaths generates the candidate urls paths for the object based on the -// set of hints and the provided object id. URLs are returned in the order of -// most to least likely succeed. -func (r *dockerFetcher) getV2URLPaths(ctx context.Context, desc ocispec.Descriptor) ([]string, error) { - var urls []string - - if len(desc.URLs) > 0 { - // handle fetch via external urls. - for _, u := range desc.URLs { - log.G(ctx).WithField("url", u).Debug("adding alternative url") - urls = append(urls, u) - } - } - - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, - images.MediaTypeDockerSchema1Manifest, - ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: - urls = append(urls, r.url(path.Join("manifests", desc.Digest.String()))) - } - - // always fallback to attempting to get the object out of the blobs store. - urls = append(urls, r.url(path.Join("blobs", desc.Digest.String()))) - - return urls, nil -} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go index af8a7742..a96fe5a9 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go @@ -21,7 +21,7 @@ import ( "io" "io/ioutil" "net/http" - "path" + "net/url" "strings" "time" @@ -37,7 +37,7 @@ import ( type dockerPusher struct { *dockerBase - tag string + object string // TODO: namespace tracker tracker StatusTracker @@ -59,31 +59,32 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten return nil, errors.Wrap(err, "failed to get status") } + hosts := p.filterHosts(HostCapabilityPush) + if len(hosts) == 0 { + return nil, errors.Wrap(errdefs.ErrNotFound, "no push hosts") + } + var ( isManifest bool - existCheck string + existCheck []string + host = hosts[0] ) switch desc.MediaType { case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: isManifest = true - if p.tag == "" { - existCheck = path.Join("manifests", desc.Digest.String()) - } else { - existCheck = path.Join("manifests", p.tag) - } + existCheck = getManifestPath(p.object, desc.Digest) default: - existCheck = path.Join("blobs", desc.Digest.String()) + existCheck = []string{"blobs", desc.Digest.String()} } - req, err := http.NewRequest(http.MethodHead, p.url(existCheck), nil) - if err != nil { - return nil, err - } + req := p.request(host, http.MethodHead, existCheck...) + req.header.Set("Accept", strings.Join([]string{desc.MediaType, `*/*`}, ", ")) - req.Header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", ")) - resp, err := p.doRequestWithRetries(ctx, req, nil) + log.G(ctx).WithField("url", req.String()).Debugf("checking and pushing to") + + resp, err := req.doWithRetries(ctx, nil) if err != nil { if errors.Cause(err) != ErrInvalidAuthorization { return nil, err @@ -92,7 +93,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten } else { if resp.StatusCode == http.StatusOK { var exists bool - if isManifest && p.tag != "" { + if isManifest && existCheck[1] != desc.Digest.String() { dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) if dgstHeader == desc.Digest { exists = true @@ -117,28 +118,16 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten } if isManifest { - var putPath string - if p.tag != "" { - putPath = path.Join("manifests", p.tag) - } else { - putPath = path.Join("manifests", desc.Digest.String()) - } - - req, err = http.NewRequest(http.MethodPut, p.url(putPath), nil) - if err != nil { - return nil, err - } - req.Header.Add("Content-Type", desc.MediaType) + putPath := getManifestPath(p.object, desc.Digest) + req = p.request(host, http.MethodPut, putPath...) + req.header.Add("Content-Type", desc.MediaType) } else { // Start upload request - req, err = http.NewRequest(http.MethodPost, p.url("blobs", "uploads")+"/", nil) - if err != nil { - return nil, err - } + req = p.request(host, http.MethodPost, "blobs", "uploads/") var resp *http.Response if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" { - req = requestWithMountFrom(req, desc.Digest.String(), fromRepo) + preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo) pctx := contextWithAppendPullRepositoryScope(ctx, fromRepo) // NOTE: the fromRepo might be private repo and @@ -147,7 +136,7 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten // // for the private repo, we should remove mount-from // query and send the request again. - resp, err = p.doRequest(pctx, req) + resp, err = preq.do(pctx) if err != nil { return nil, err } @@ -157,16 +146,11 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten resp.Body.Close() resp = nil - - req, err = removeMountFromQuery(req) - if err != nil { - return nil, err - } } } if resp == nil { - resp, err = p.doRequestWithRetries(ctx, req, nil) + resp, err = req.doWithRetries(ctx, nil) if err != nil { return nil, err } @@ -186,31 +170,41 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten return nil, errors.Errorf("unexpected response: %s", resp.Status) } - location := resp.Header.Get("Location") + var ( + location = resp.Header.Get("Location") + lurl *url.URL + lhost = host + ) // Support paths without host in location if strings.HasPrefix(location, "/") { - // Support location string containing path and query - qmIndex := strings.Index(location, "?") - if qmIndex > 0 { - u := p.base - u.Path = location[:qmIndex] - u.RawQuery = location[qmIndex+1:] - location = u.String() - } else { - u := p.base - u.Path = location - location = u.String() + lurl, err = url.Parse(lhost.Scheme + "://" + lhost.Host + location) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse location %v", location) + } + } else { + if !strings.Contains(location, "://") { + location = lhost.Scheme + "://" + location + } + lurl, err = url.Parse(location) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse location %v", location) + } + + if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme { + + lhost.Scheme = lurl.Scheme + lhost.Host = lurl.Host + log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination") + + // Strip authorizer if change to host or scheme + lhost.Authorizer = nil } } - - req, err = http.NewRequest(http.MethodPut, location, nil) - if err != nil { - return nil, err - } - q := req.URL.Query() + q := lurl.Query() q.Add("digest", desc.Digest.String()) - req.URL.RawQuery = q.Encode() + req = p.request(lhost, http.MethodPut) + req.path = lurl.Path + "?" + q.Encode() } p.tracker.SetStatus(ref, Status{ Status: content.Status{ @@ -225,13 +219,22 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten pr, pw := io.Pipe() respC := make(chan *http.Response, 1) + body := ioutil.NopCloser(pr) - req.Body = ioutil.NopCloser(pr) - req.ContentLength = desc.Size + req.body = func() (io.ReadCloser, error) { + if body == nil { + return nil, errors.New("cannot reuse body, request must be retried") + } + // Only use the body once since pipe cannot be seeked + ob := body + body = nil + return ob, nil + } + req.size = desc.Size go func() { defer close(respC) - resp, err = p.doRequest(ctx, req) + resp, err = req.do(ctx) if err != nil { pr.CloseWithError(err) return @@ -257,6 +260,25 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten }, nil } +func getManifestPath(object string, dgst digest.Digest) []string { + if i := strings.IndexByte(object, '@'); i >= 0 { + if object[i+1:] != dgst.String() { + // use digest, not tag + object = "" + } else { + // strip @ for registry path to make tag + object = object[:i] + } + + } + + if object == "" { + return []string{"manifests", dgst.String()} + } + + return []string{"manifests", object} +} + type pushWriter struct { base *dockerBase ref string @@ -330,7 +352,7 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di } if size > 0 && size != status.Offset { - return errors.Errorf("unxpected size %d, expected %d", status.Offset, size) + return errors.Errorf("unexpected size %d, expected %d", status.Offset, size) } if expected == "" { @@ -355,24 +377,15 @@ func (pw *pushWriter) Truncate(size int64) error { return errors.New("cannot truncate remote upload") } -func requestWithMountFrom(req *http.Request, mount, from string) *http.Request { - q := req.URL.Query() +func requestWithMountFrom(req *request, mount, from string) *request { + creq := *req - q.Set("mount", mount) - q.Set("from", from) - req.URL.RawQuery = q.Encode() - return req -} - -func removeMountFromQuery(req *http.Request) (*http.Request, error) { - req, err := copyRequest(req) - if err != nil { - return nil, err + sep := "?" + if strings.Contains(creq.path, sep) { + sep = "&" } - q := req.URL.Query() - q.Del("mount") - q.Del("from") - req.URL.RawQuery = q.Encode() - return req, nil + creq.path = creq.path + sep + "mount=" + mount + "&from=" + from + + return &creq } diff --git a/vendor/github.com/containerd/containerd/remotes/docker/registry.go b/vendor/github.com/containerd/containerd/remotes/docker/registry.go new file mode 100644 index 00000000..ae24f41e --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/registry.go @@ -0,0 +1,202 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "net/http" +) + +// HostCapabilities represent the capabilities of the registry +// host. This also represents the set of operations for which +// the registry host may be trusted to perform. +// +// For example pushing is a capability which should only be +// performed on an upstream source, not a mirror. +// Resolving (the process of converting a name into a digest) +// must be considered a trusted operation and only done by +// a host which is trusted (or more preferably by secure process +// which can prove the provenance of the mapping). A public +// mirror should never be trusted to do a resolve action. +// +// | Registry Type | Pull | Resolve | Push | +// |------------------|------|---------|------| +// | Public Registry | yes | yes | yes | +// | Private Registry | yes | yes | yes | +// | Public Mirror | yes | no | no | +// | Private Mirror | yes | yes | no | +type HostCapabilities uint8 + +const ( + // HostCapabilityPull represents the capability to fetch manifests + // and blobs by digest + HostCapabilityPull HostCapabilities = 1 << iota + + // HostCapabilityResolve represents the capability to fetch manifests + // by name + HostCapabilityResolve + + // HostCapabilityPush represents the capability to push blobs and + // manifests + HostCapabilityPush + + // Reserved for future capabilities (i.e. search, catalog, remove) +) + +func (c HostCapabilities) Has(t HostCapabilities) bool { + return c&t == t +} + +// RegistryHost represents a complete configuration for a registry +// host, representing the capabilities, authorizations, connection +// configuration, and location. +type RegistryHost struct { + Client *http.Client + Authorizer Authorizer + Host string + Scheme string + Path string + Capabilities HostCapabilities +} + +// RegistryHosts fetches the registry hosts for a given namespace, +// provided by the host component of an distribution image reference. +type RegistryHosts func(string) ([]RegistryHost, error) + +// Registries joins multiple registry configuration functions, using the same +// order as provided within the arguments. When an empty registry configuration +// is returned with a nil error, the next function will be called. +// NOTE: This function will not join configurations, as soon as a non-empty +// configuration is returned from a configuration function, it will be returned +// to the caller. +func Registries(registries ...RegistryHosts) RegistryHosts { + return func(host string) ([]RegistryHost, error) { + for _, registry := range registries { + config, err := registry(host) + if err != nil { + return config, err + } + if len(config) > 0 { + return config, nil + } + } + return nil, nil + } +} + +type registryOpts struct { + authorizer Authorizer + plainHTTP func(string) (bool, error) + host func(string) (string, error) + client *http.Client +} + +// RegistryOpt defines a registry default option +type RegistryOpt func(*registryOpts) + +// WithPlainHTTP configures registries to use plaintext http scheme +// for the provided host match function. +func WithPlainHTTP(f func(string) (bool, error)) RegistryOpt { + return func(opts *registryOpts) { + opts.plainHTTP = f + } +} + +// WithAuthorizer configures the default authorizer for a registry +func WithAuthorizer(a Authorizer) RegistryOpt { + return func(opts *registryOpts) { + opts.authorizer = a + } +} + +// WithHostTranslator defines the default translator to use for registry hosts +func WithHostTranslator(h func(string) (string, error)) RegistryOpt { + return func(opts *registryOpts) { + opts.host = h + } +} + +// WithClient configures the default http client for a registry +func WithClient(c *http.Client) RegistryOpt { + return func(opts *registryOpts) { + opts.client = c + } +} + +// ConfigureDefaultRegistries is used to create a default configuration for +// registries. For more advanced configurations or per-domain setups, +// the RegistryHosts interface should be used directly. +// NOTE: This function will always return a non-empty value or error +func ConfigureDefaultRegistries(ropts ...RegistryOpt) RegistryHosts { + var opts registryOpts + for _, opt := range ropts { + opt(&opts) + } + + return func(host string) ([]RegistryHost, error) { + config := RegistryHost{ + Client: opts.client, + Authorizer: opts.authorizer, + Host: host, + Scheme: "https", + Path: "/v2", + Capabilities: HostCapabilityPull | HostCapabilityResolve | HostCapabilityPush, + } + + if config.Client == nil { + config.Client = http.DefaultClient + } + + if opts.plainHTTP != nil { + match, err := opts.plainHTTP(host) + if err != nil { + return nil, err + } + if match { + config.Scheme = "http" + } + } + + if opts.host != nil { + var err error + config.Host, err = opts.host(config.Host) + if err != nil { + return nil, err + } + } else if host == "docker.io" { + config.Host = "registry-1.docker.io" + } + + return []RegistryHost{config}, nil + } +} + +// MatchAllHosts is a host match function which is always true. +func MatchAllHosts(string) (bool, error) { + return true, nil +} + +// MatchLocalhost is a host match function which returns true for +// localhost. +func MatchLocalhost(host string) (bool, error) { + for _, s := range []string{"localhost", "127.0.0.1", "[::1]"} { + if len(host) >= len(s) && host[0:len(s)] == s && (len(host) == len(s) || host[len(s)] == ':') { + return true, nil + } + } + return host == "::1", nil + +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go index 74d1f421..f126449c 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go @@ -18,9 +18,10 @@ package docker import ( "context" + "fmt" "io" + "io/ioutil" "net/http" - "net/url" "path" "strings" @@ -46,6 +47,19 @@ var ( // ErrInvalidAuthorization is used when credentials are passed to a server but // those credentials are rejected. ErrInvalidAuthorization = errors.New("authorization failed") + + // MaxManifestSize represents the largest size accepted from a registry + // during resolution. Larger manifests may be accepted using a + // resolution method other than the registry. + // + // NOTE: The max supported layers by some runtimes is 128 and individual + // layers will not contribute more than 256 bytes, making a + // reasonable limit for a large image manifests of 32K bytes. + // 4M bytes represents a much larger upper bound for images which may + // contain large annotations or be non-images. A proper manifest + // design puts large metadata in subobjects, as is consistent the + // intent of the manifest design. + MaxManifestSize int64 = 4 * 1048 * 1048 ) // Authorizer is used to authorize HTTP requests based on 401 HTTP responses. @@ -72,31 +86,38 @@ type Authorizer interface { // ResolverOptions are used to configured a new Docker register resolver type ResolverOptions struct { - // Authorizer is used to authorize registry requests - Authorizer Authorizer - - // Credentials provides username and secret given a host. - // If username is empty but a secret is given, that secret - // is interpreted as a long lived token. - // Deprecated: use Authorizer - Credentials func(string) (string, string, error) - - // Host provides the hostname given a namespace. - Host func(string) (string, error) + // Hosts returns registry host configurations for a namespace. + Hosts RegistryHosts // Headers are the HTTP request header fields sent by the resolver Headers http.Header - // PlainHTTP specifies to use plain http and not https - PlainHTTP bool - - // Client is the http client to used when making registry requests - Client *http.Client - // Tracker is used to track uploads to the registry. This is used // since the registry does not have upload tracking and the existing // mechanism for getting blob upload status is expensive. Tracker StatusTracker + + // Authorizer is used to authorize registry requests + // Deprecated: use Hosts + Authorizer Authorizer + + // Credentials provides username and secret given a host. + // If username is empty but a secret is given, that secret + // is interpreted as a long lived token. + // Deprecated: use Hosts + Credentials func(string) (string, string, error) + + // Host provides the hostname given a namespace. + // Deprecated: use Hosts + Host func(string) (string, error) + + // PlainHTTP specifies to use plain http and not https + // Deprecated: use Hosts + PlainHTTP bool + + // Client is the http client to used when making registry requests + // Deprecated: use Hosts + Client *http.Client } // DefaultHost is the default host function. @@ -108,13 +129,10 @@ func DefaultHost(ns string) (string, error) { } type dockerResolver struct { - auth Authorizer - host func(string) (string, error) - headers http.Header - uagent string - plainHTTP bool - client *http.Client - tracker StatusTracker + hosts RegistryHosts + header http.Header + resolveHeader http.Header + tracker StatusTracker } // NewResolver returns a new resolver to a Docker registry @@ -122,39 +140,56 @@ func NewResolver(options ResolverOptions) remotes.Resolver { if options.Tracker == nil { options.Tracker = NewInMemoryTracker() } - if options.Host == nil { - options.Host = DefaultHost - } + if options.Headers == nil { options.Headers = make(http.Header) } + if _, ok := options.Headers["User-Agent"]; !ok { + options.Headers.Set("User-Agent", "containerd/"+version.Version) + } + + resolveHeader := http.Header{} if _, ok := options.Headers["Accept"]; !ok { // set headers for all the types we support for resolution. - options.Headers.Set("Accept", strings.Join([]string{ + resolveHeader.Set("Accept", strings.Join([]string{ images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageManifest, - ocispec.MediaTypeImageIndex, "*"}, ", ")) - } - ua := options.Headers.Get("User-Agent") - if ua != "" { - options.Headers.Del("User-Agent") + ocispec.MediaTypeImageIndex, "*/*"}, ", ")) } else { - ua = "containerd/" + version.Version + resolveHeader["Accept"] = options.Headers["Accept"] + delete(options.Headers, "Accept") } - if options.Authorizer == nil { - options.Authorizer = NewAuthorizer(options.Client, options.Credentials) - options.Authorizer.(*dockerAuthorizer).ua = ua + if options.Hosts == nil { + opts := []RegistryOpt{} + if options.Host != nil { + opts = append(opts, WithHostTranslator(options.Host)) + } + + if options.Authorizer == nil { + options.Authorizer = NewDockerAuthorizer( + WithAuthClient(options.Client), + WithAuthHeader(options.Headers), + WithAuthCreds(options.Credentials)) + } + opts = append(opts, WithAuthorizer(options.Authorizer)) + + if options.Client != nil { + opts = append(opts, WithClient(options.Client)) + } + if options.PlainHTTP { + opts = append(opts, WithPlainHTTP(MatchAllHosts)) + } else { + opts = append(opts, WithPlainHTTP(MatchLocalhost)) + } + options.Hosts = ConfigureDefaultRegistries(opts...) } return &dockerResolver{ - auth: options.Authorizer, - host: options.Host, - headers: options.Headers, - uagent: ua, - plainHTTP: options.PlainHTTP, - client: options.Client, - tracker: options.Tracker, + hosts: options.Hosts, + header: options.Headers, + resolveHeader: resolveHeader, + tracker: options.Tracker, } } @@ -201,13 +236,11 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp return "", ocispec.Descriptor{}, err } - fetcher := dockerFetcher{ - dockerBase: base, - } - var ( - urls []string - dgst = refspec.Digest() + lastErr error + paths [][]string + dgst = refspec.Digest() + caps = HostCapabilityPull ) if dgst != "" { @@ -218,100 +251,130 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp } // turns out, we have a valid digest, make a url. - urls = append(urls, fetcher.url("manifests", dgst.String())) + paths = append(paths, []string{"manifests", dgst.String()}) // fallback to blobs on not found. - urls = append(urls, fetcher.url("blobs", dgst.String())) + paths = append(paths, []string{"blobs", dgst.String()}) } else { - urls = append(urls, fetcher.url("manifests", refspec.Object)) + // Add + paths = append(paths, []string{"manifests", refspec.Object}) + caps |= HostCapabilityResolve + } + + hosts := base.filterHosts(caps) + if len(hosts) == 0 { + return "", ocispec.Descriptor{}, errors.Wrap(errdefs.ErrNotFound, "no resolve hosts") } ctx, err = contextWithRepositoryScope(ctx, refspec, false) if err != nil { return "", ocispec.Descriptor{}, err } - for _, u := range urls { - req, err := http.NewRequest(http.MethodHead, u, nil) - if err != nil { - return "", ocispec.Descriptor{}, err - } - req.Header = r.headers + for _, u := range paths { + for _, host := range hosts { + ctx := log.WithLogger(ctx, log.G(ctx).WithField("host", host.Host)) - log.G(ctx).Debug("resolving") - resp, err := fetcher.doRequestWithRetries(ctx, req, nil) - if err != nil { - if errors.Cause(err) == ErrInvalidAuthorization { - err = errors.Wrapf(err, "pull access denied, repository does not exist or may require authorization") + req := base.request(host, http.MethodHead, u...) + for key, value := range r.resolveHeader { + req.header[key] = append(req.header[key], value...) } - return "", ocispec.Descriptor{}, err - } - resp.Body.Close() // don't care about body contents. - if resp.StatusCode > 299 { - if resp.StatusCode == http.StatusNotFound { + log.G(ctx).Debug("resolving") + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + if errors.Cause(err) == ErrInvalidAuthorization { + err = errors.Wrapf(err, "pull access denied, repository does not exist or may require authorization") + } + return "", ocispec.Descriptor{}, err + } + resp.Body.Close() // don't care about body contents. + + if resp.StatusCode > 299 { + if resp.StatusCode == http.StatusNotFound { + continue + } + return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status) + } + size := resp.ContentLength + contentType := getManifestMediaType(resp) + + // if no digest was provided, then only a resolve + // trusted registry was contacted, in this case use + // the digest header (or content from GET) + if dgst == "" { + // this is the only point at which we trust the registry. we use the + // content headers to assemble a descriptor for the name. when this becomes + // more robust, we mostly get this information from a secure trust store. + dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) + + if dgstHeader != "" && size != -1 { + if err := dgstHeader.Validate(); err != nil { + return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader) + } + dgst = dgstHeader + } + } + if dgst == "" || size == -1 { + log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead") + + req = base.request(host, http.MethodGet, u...) + for key, value := range r.resolveHeader { + req.header[key] = append(req.header[key], value...) + } + + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + return "", ocispec.Descriptor{}, err + } + defer resp.Body.Close() + + bodyReader := countingReader{reader: resp.Body} + + contentType = getManifestMediaType(resp) + if dgst == "" { + if contentType == images.MediaTypeDockerSchema1Manifest { + b, err := schema1.ReadStripSignature(&bodyReader) + if err != nil { + return "", ocispec.Descriptor{}, err + } + + dgst = digest.FromBytes(b) + } else { + dgst, err = digest.FromReader(&bodyReader) + if err != nil { + return "", ocispec.Descriptor{}, err + } + } + } else if _, err := io.Copy(ioutil.Discard, &bodyReader); err != nil { + return "", ocispec.Descriptor{}, err + } + size = bodyReader.bytesRead + } + // Prevent resolving to excessively large manifests + if size > MaxManifestSize { + if lastErr == nil { + lastErr = errors.Wrapf(errdefs.ErrNotFound, "rejecting %d byte manifest for %s", size, ref) + } continue } - return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status) + + desc := ocispec.Descriptor{ + Digest: dgst, + MediaType: contentType, + Size: size, + } + + log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved") + return ref, desc, nil } - size := resp.ContentLength - - // this is the only point at which we trust the registry. we use the - // content headers to assemble a descriptor for the name. when this becomes - // more robust, we mostly get this information from a secure trust store. - dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) - contentType := getManifestMediaType(resp) - - if dgstHeader != "" && size != -1 { - if err := dgstHeader.Validate(); err != nil { - return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader) - } - dgst = dgstHeader - } else { - log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead") - - req, err := http.NewRequest(http.MethodGet, u, nil) - if err != nil { - return "", ocispec.Descriptor{}, err - } - req.Header = r.headers - - resp, err := fetcher.doRequestWithRetries(ctx, req, nil) - if err != nil { - return "", ocispec.Descriptor{}, err - } - defer resp.Body.Close() - - bodyReader := countingReader{reader: resp.Body} - - contentType = getManifestMediaType(resp) - if contentType == images.MediaTypeDockerSchema1Manifest { - b, err := schema1.ReadStripSignature(&bodyReader) - if err != nil { - return "", ocispec.Descriptor{}, err - } - - dgst = digest.FromBytes(b) - } else { - dgst, err = digest.FromReader(&bodyReader) - if err != nil { - return "", ocispec.Descriptor{}, err - } - } - size = bodyReader.bytesRead - } - - desc := ocispec.Descriptor{ - Digest: dgst, - MediaType: contentType, - Size: size, - } - - log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved") - return ref, desc, nil } - return "", ocispec.Descriptor{}, errors.Errorf("%v not found", ref) + if lastErr == nil { + lastErr = errors.Wrap(errdefs.ErrNotFound, ref) + } + + return "", ocispec.Descriptor{}, lastErr } func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { @@ -336,13 +399,6 @@ func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher return nil, err } - // Manifests can be pushed by digest like any other object, but the passed in - // reference cannot take a digest without the associated content. A tag is allowed - // and will be used to tag pushed manifests. - if refspec.Object != "" && strings.Contains(refspec.Object, "@") { - return nil, errors.New("cannot use digest reference for push locator") - } - base, err := r.base(refspec) if err != nil { return nil, err @@ -350,62 +406,64 @@ func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher return dockerPusher{ dockerBase: base, - tag: refspec.Object, + object: refspec.Object, tracker: r.tracker, }, nil } type dockerBase struct { - refspec reference.Spec - base url.URL - uagent string - - client *http.Client - auth Authorizer + refspec reference.Spec + namespace string + hosts []RegistryHost + header http.Header } func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) { - var ( - err error - base url.URL - ) - host := refspec.Hostname() - base.Host = host - if r.host != nil { - base.Host, err = r.host(host) - if err != nil { - return nil, err - } + hosts, err := r.hosts(host) + if err != nil { + return nil, err } - - base.Scheme = "https" - if r.plainHTTP || strings.HasPrefix(base.Host, "localhost:") { - base.Scheme = "http" - } - - prefix := strings.TrimPrefix(refspec.Locator, host+"/") - base.Path = path.Join("/v2", prefix) - return &dockerBase{ - refspec: refspec, - base: base, - uagent: r.uagent, - client: r.client, - auth: r.auth, + refspec: refspec, + namespace: strings.TrimPrefix(refspec.Locator, host+"/"), + hosts: hosts, + header: r.header, }, nil } -func (r *dockerBase) url(ps ...string) string { - url := r.base - url.Path = path.Join(url.Path, path.Join(ps...)) - return url.String() +func (r *dockerBase) filterHosts(caps HostCapabilities) (hosts []RegistryHost) { + for _, host := range r.hosts { + if host.Capabilities.Has(caps) { + hosts = append(hosts, host) + } + } + return } -func (r *dockerBase) authorize(ctx context.Context, req *http.Request) error { +func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *request { + header := http.Header{} + for key, value := range r.header { + header[key] = append(header[key], value...) + } + parts := append([]string{"/", host.Path, r.namespace}, ps...) + p := path.Join(parts...) + // Join strips trailing slash, re-add ending "/" if included + if len(parts) > 0 && strings.HasSuffix(parts[len(parts)-1], "/") { + p = p + "/" + } + return &request{ + method: method, + path: p, + header: header, + host: host, + } +} + +func (r *request) authorize(ctx context.Context, req *http.Request) error { // Check if has header for host - if r.auth != nil { - if err := r.auth.Authorize(ctx, req); err != nil { + if r.host.Authorizer != nil { + if err := r.host.Authorizer.Authorize(ctx, req); err != nil { return err } } @@ -413,83 +471,137 @@ func (r *dockerBase) authorize(ctx context.Context, req *http.Request) error { return nil } -func (r *dockerBase) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) { - ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String())) - log.G(ctx).WithField("request.headers", req.Header).WithField("request.method", req.Method).Debug("do request") - req.Header.Set("User-Agent", r.uagent) +type request struct { + method string + path string + header http.Header + host RegistryHost + body func() (io.ReadCloser, error) + size int64 +} + +func (r *request) do(ctx context.Context) (*http.Response, error) { + u := r.host.Scheme + "://" + r.host.Host + r.path + req, err := http.NewRequest(r.method, u, nil) + if err != nil { + return nil, err + } + req.Header = r.header + if r.body != nil { + body, err := r.body() + if err != nil { + return nil, err + } + req.Body = body + req.GetBody = r.body + if r.size > 0 { + req.ContentLength = r.size + } + } + + ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u)) + log.G(ctx).WithFields(requestFields(req)).Debug("do request") if err := r.authorize(ctx, req); err != nil { return nil, errors.Wrap(err, "failed to authorize") } - resp, err := ctxhttp.Do(ctx, r.client, req) + resp, err := ctxhttp.Do(ctx, r.host.Client, req) if err != nil { return nil, errors.Wrap(err, "failed to do request") } - log.G(ctx).WithFields(logrus.Fields{ - "status": resp.Status, - "response.headers": resp.Header, - }).Debug("fetch response received") + log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received") return resp, nil } -func (r *dockerBase) doRequestWithRetries(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Response, error) { - resp, err := r.doRequest(ctx, req) +func (r *request) doWithRetries(ctx context.Context, responses []*http.Response) (*http.Response, error) { + resp, err := r.do(ctx) if err != nil { return nil, err } responses = append(responses, resp) - req, err = r.retryRequest(ctx, req, responses) + retry, err := r.retryRequest(ctx, responses) if err != nil { resp.Body.Close() return nil, err } - if req != nil { + if retry { resp.Body.Close() - return r.doRequestWithRetries(ctx, req, responses) + return r.doWithRetries(ctx, responses) } return resp, err } -func (r *dockerBase) retryRequest(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Request, error) { +func (r *request) retryRequest(ctx context.Context, responses []*http.Response) (bool, error) { if len(responses) > 5 { - return nil, nil + return false, nil } last := responses[len(responses)-1] switch last.StatusCode { case http.StatusUnauthorized: log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized") - if r.auth != nil { - if err := r.auth.AddResponses(ctx, responses); err == nil { - return copyRequest(req) + if r.host.Authorizer != nil { + if err := r.host.Authorizer.AddResponses(ctx, responses); err == nil { + return true, nil } else if !errdefs.IsNotImplemented(err) { - return nil, err + return false, err } } - return nil, nil + + return false, nil case http.StatusMethodNotAllowed: // Support registries which have not properly implemented the HEAD method for // manifests endpoint - if req.Method == http.MethodHead && strings.Contains(req.URL.Path, "/manifests/") { - // TODO: copy request? - req.Method = http.MethodGet - return copyRequest(req) + if r.method == http.MethodHead && strings.Contains(r.path, "/manifests/") { + r.method = http.MethodGet + return true, nil } case http.StatusRequestTimeout, http.StatusTooManyRequests: - return copyRequest(req) + return true, nil } // TODO: Handle 50x errors accounting for attempt history - return nil, nil + return false, nil } -func copyRequest(req *http.Request) (*http.Request, error) { - ireq := *req - if ireq.GetBody != nil { - var err error - ireq.Body, err = ireq.GetBody() - if err != nil { - return nil, err +func (r *request) String() string { + return r.host.Scheme + "://" + r.host.Host + r.path +} + +func requestFields(req *http.Request) logrus.Fields { + fields := map[string]interface{}{ + "request.method": req.Method, + } + for k, vals := range req.Header { + k = strings.ToLower(k) + if k == "authorization" { + continue + } + for i, v := range vals { + field := "request.header." + k + if i > 0 { + field = fmt.Sprintf("%s.%d", field, i) + } + fields[field] = v } } - return &ireq, nil + + return logrus.Fields(fields) +} + +func responseFields(resp *http.Response) logrus.Fields { + fields := map[string]interface{}{ + "response.status": resp.Status, + } + for k, vals := range resp.Header { + k = strings.ToLower(k) + for i, v := range vals { + field := "response.header." + k + if i > 0 { + field = fmt.Sprintf("%s.%d", field, i) + } + fields[field] = v + } + } + + return logrus.Fields(fields) } diff --git a/vendor/github.com/containerd/containerd/remotes/docker/scope.go b/vendor/github.com/containerd/containerd/remotes/docker/scope.go index 86bd81bf..fa840143 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/scope.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/scope.go @@ -51,19 +51,25 @@ func contextWithRepositoryScope(ctx context.Context, refspec reference.Spec, pus if err != nil { return nil, err } - return context.WithValue(ctx, tokenScopesKey{}, []string{s}), nil + return WithScope(ctx, s), nil +} + +// WithScope appends a custom registry auth scope to the context. +func WithScope(ctx context.Context, scope string) context.Context { + var scopes []string + if v := ctx.Value(tokenScopesKey{}); v != nil { + scopes = v.([]string) + scopes = append(scopes, scope) + } else { + scopes = []string{scope} + } + return context.WithValue(ctx, tokenScopesKey{}, scopes) } // contextWithAppendPullRepositoryScope is used to append repository pull // scope into existing scopes indexed by the tokenScopesKey{}. func contextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context { - var scopes []string - - if v := ctx.Value(tokenScopesKey{}); v != nil { - scopes = append(scopes, v.([]string)...) - } - scopes = append(scopes, fmt.Sprintf("repository:%s:pull", repo)) - return context.WithValue(ctx, tokenScopesKey{}, scopes) + return WithScope(ctx, fmt.Sprintf("repository:%s:pull", repo)) } // getTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and common scopes. diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go index 066794c1..671fea10 100644 --- a/vendor/github.com/containerd/containerd/remotes/handlers.go +++ b/vendor/github.com/containerd/containerd/remotes/handlers.go @@ -33,27 +33,46 @@ import ( "github.com/sirupsen/logrus" ) +type refKeyPrefix struct{} + +// WithMediaTypeKeyPrefix adds a custom key prefix for a media type which is used when storing +// data in the content store from the FetchHandler. +// +// Used in `MakeRefKey` to determine what the key prefix should be. +func WithMediaTypeKeyPrefix(ctx context.Context, mediaType, prefix string) context.Context { + var values map[string]string + if v := ctx.Value(refKeyPrefix{}); v != nil { + values = v.(map[string]string) + } else { + values = make(map[string]string) + } + + values[mediaType] = prefix + return context.WithValue(ctx, refKeyPrefix{}, values) +} + // MakeRefKey returns a unique reference for the descriptor. This reference can be // used to lookup ongoing processes related to the descriptor. This function // may look to the context to namespace the reference appropriately. func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string { - // TODO(stevvooe): Need better remote key selection here. Should be a - // product of the context, which may include information about the ongoing - // fetch process. - switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + if v := ctx.Value(refKeyPrefix{}); v != nil { + values := v.(map[string]string) + if prefix := values[desc.MediaType]; prefix != "" { + return prefix + "-" + desc.Digest.String() + } + } + + switch mt := desc.MediaType; { + case mt == images.MediaTypeDockerSchema2Manifest || mt == ocispec.MediaTypeImageManifest: return "manifest-" + desc.Digest.String() - case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + case mt == images.MediaTypeDockerSchema2ManifestList || mt == ocispec.MediaTypeImageIndex: return "index-" + desc.Digest.String() - case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, - images.MediaTypeDockerSchema2LayerForeign, images.MediaTypeDockerSchema2LayerForeignGzip, - ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip, - ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip: + case images.IsLayerType(mt): return "layer-" + desc.Digest.String() - case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + case images.IsKnownConfig(mt): return "config-" + desc.Digest.String() default: - log.G(ctx).Warnf("reference for unknown type: %s", desc.MediaType) + log.G(ctx).Warnf("reference for unknown type: %s", mt) return "unknown-" + desc.Digest.String() } } diff --git a/vendor/github.com/containerd/containerd/rootfs/apply.go b/vendor/github.com/containerd/containerd/rootfs/apply.go index 3ea830f6..73d4ccca 100644 --- a/vendor/github.com/containerd/containerd/rootfs/apply.go +++ b/vendor/github.com/containerd/containerd/rootfs/apply.go @@ -48,6 +48,14 @@ type Layer struct { // Layers are applied in order they are given, making the first layer the // bottom-most layer in the layer chain. func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Applier) (digest.Digest, error) { + return ApplyLayersWithOpts(ctx, layers, sn, a, nil) +} + +// ApplyLayersWithOpts applies all the layers using the given snapshotter, applier, and apply opts. +// The returned result is a chain id digest representing all the applied layers. +// Layers are applied in order they are given, making the first layer the +// bottom-most layer in the layer chain. +func ApplyLayersWithOpts(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, a diff.Applier, applyOpts []diff.ApplyOpt) (digest.Digest, error) { chain := make([]digest.Digest, len(layers)) for i, layer := range layers { chain[i] = layer.Diff.Digest @@ -63,7 +71,7 @@ func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, return "", errors.Wrapf(err, "failed to stat snapshot %s", chainID) } - if err := applyLayers(ctx, layers, chain, sn, a); err != nil && !errdefs.IsAlreadyExists(err) { + if err := applyLayers(ctx, layers, chain, sn, a, nil, applyOpts); err != nil && !errdefs.IsAlreadyExists(err) { return "", err } } @@ -75,6 +83,13 @@ func ApplyLayers(ctx context.Context, layers []Layer, sn snapshots.Snapshotter, // using the provided snapshotter and applier. If the layer was unpacked true // is returned, if the layer already exists false is returned. func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts ...snapshots.Opt) (bool, error) { + return ApplyLayerWithOpts(ctx, layer, chain, sn, a, opts, nil) +} + +// ApplyLayerWithOpts applies a single layer on top of the given provided layer chain, +// using the provided snapshotter, applier, and apply opts. If the layer was unpacked true +// is returned, if the layer already exists false is returned. +func ApplyLayerWithOpts(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts []snapshots.Opt, applyOpts []diff.ApplyOpt) (bool, error) { var ( chainID = identity.ChainID(append(chain, layer.Diff.Digest)).String() applied bool @@ -84,7 +99,7 @@ func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snap return false, errors.Wrapf(err, "failed to stat snapshot %s", chainID) } - if err := applyLayers(ctx, []Layer{layer}, append(chain, layer.Diff.Digest), sn, a, opts...); err != nil { + if err := applyLayers(ctx, []Layer{layer}, append(chain, layer.Diff.Digest), sn, a, opts, applyOpts); err != nil { if !errdefs.IsAlreadyExists(err) { return false, err } @@ -93,9 +108,10 @@ func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snap } } return applied, nil + } -func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts ...snapshots.Opt) error { +func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn snapshots.Snapshotter, a diff.Applier, opts []snapshots.Opt, applyOpts []diff.ApplyOpt) error { var ( parent = identity.ChainID(chain[:len(chain)-1]) chainID = identity.ChainID(chain) @@ -113,7 +129,7 @@ func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn mounts, err = sn.Prepare(ctx, key, parent.String(), opts...) if err != nil { if errdefs.IsNotFound(err) && len(layers) > 1 { - if err := applyLayers(ctx, layers[:len(layers)-1], chain[:len(chain)-1], sn, a); err != nil { + if err := applyLayers(ctx, layers[:len(layers)-1], chain[:len(chain)-1], sn, a, nil, applyOpts); err != nil { if !errdefs.IsAlreadyExists(err) { return err } @@ -144,7 +160,7 @@ func applyLayers(ctx context.Context, layers []Layer, chain []digest.Digest, sn } }() - diff, err = a.Apply(ctx, layer.Blob, mounts) + diff, err = a.Apply(ctx, layer.Blob, mounts, applyOpts...) if err != nil { err = errors.Wrapf(err, "failed to extract layer %s", layer.Diff.Digest) return err diff --git a/vendor/github.com/containerd/containerd/rootfs/diff.go b/vendor/github.com/containerd/containerd/rootfs/diff.go index b3e6ba8a..f396c73a 100644 --- a/vendor/github.com/containerd/containerd/rootfs/diff.go +++ b/vendor/github.com/containerd/containerd/rootfs/diff.go @@ -22,6 +22,7 @@ import ( "github.com/containerd/containerd/diff" "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/snapshots" ocispec "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -31,6 +32,13 @@ import ( // the content creation and the provided snapshotter and mount differ are used // for calculating the diff. The descriptor for the layer diff is returned. func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter, d diff.Comparer, opts ...diff.Opt) (ocispec.Descriptor, error) { + // dctx is used to handle cleanup things just in case the param ctx + // has been canceled, which causes that the defer cleanup fails. + dctx := context.Background() + if ns, ok := namespaces.Namespace(ctx); ok { + dctx = namespaces.WithNamespace(dctx, ns) + } + info, err := sn.Stat(ctx, snapshotID) if err != nil { return ocispec.Descriptor{}, err @@ -41,7 +49,7 @@ func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter if err != nil { return ocispec.Descriptor{}, err } - defer sn.Remove(ctx, lowerKey) + defer sn.Remove(dctx, lowerKey) var upper []mount.Mount if info.Kind == snapshots.KindActive { @@ -55,7 +63,7 @@ func CreateDiff(ctx context.Context, snapshotID string, sn snapshots.Snapshotter if err != nil { return ocispec.Descriptor{}, err } - defer sn.Remove(ctx, upperKey) + defer sn.Remove(dctx, upperKey) } return d.Compare(ctx, lower, upper, opts...) diff --git a/vendor/github.com/containerd/containerd/unpacker.go b/vendor/github.com/containerd/containerd/unpacker.go new file mode 100644 index 00000000..f7580ce8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/unpacker.go @@ -0,0 +1,243 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package containerd + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "sync/atomic" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/rootfs" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" +) + +type layerState struct { + layer rootfs.Layer + downloaded bool + unpacked bool +} + +type unpacker struct { + updateCh chan ocispec.Descriptor + snapshotter string + config UnpackConfig + c *Client +} + +func (c *Client) newUnpacker(ctx context.Context, rCtx *RemoteContext) (*unpacker, error) { + snapshotter, err := c.resolveSnapshotterName(ctx, rCtx.Snapshotter) + if err != nil { + return nil, err + } + var config UnpackConfig + for _, o := range rCtx.UnpackOpts { + if err := o(ctx, &config); err != nil { + return nil, err + } + } + return &unpacker{ + updateCh: make(chan ocispec.Descriptor, 128), + snapshotter: snapshotter, + config: config, + c: c, + }, nil +} + +func (u *unpacker) unpack(ctx context.Context, config ocispec.Descriptor, layers []ocispec.Descriptor) error { + p, err := content.ReadBlob(ctx, u.c.ContentStore(), config) + if err != nil { + return err + } + + var i ocispec.Image + if err := json.Unmarshal(p, &i); err != nil { + return errors.Wrap(err, "unmarshal image config") + } + diffIDs := i.RootFS.DiffIDs + if len(layers) != len(diffIDs) { + return errors.Errorf("number of layers and diffIDs don't match: %d != %d", len(layers), len(diffIDs)) + } + + var ( + sn = u.c.SnapshotService(u.snapshotter) + a = u.c.DiffService() + cs = u.c.ContentStore() + + states []layerState + chain []digest.Digest + ) + for i, desc := range layers { + states = append(states, layerState{ + layer: rootfs.Layer{ + Blob: desc, + Diff: ocispec.Descriptor{ + MediaType: ocispec.MediaTypeImageLayer, + Digest: diffIDs[i], + }, + }, + }) + } + for { + var layer ocispec.Descriptor + select { + case layer = <-u.updateCh: + case <-ctx.Done(): + return ctx.Err() + } + log.G(ctx).WithField("desc", layer).Debug("layer downloaded") + for i := range states { + if states[i].layer.Blob.Digest != layer.Digest { + continue + } + // Different layers may have the same digest. When that + // happens, we should continue marking the next layer + // as downloaded. + if states[i].downloaded { + continue + } + states[i].downloaded = true + break + } + for i := range states { + if !states[i].downloaded { + break + } + if states[i].unpacked { + continue + } + + log.G(ctx).WithFields(logrus.Fields{ + "desc": states[i].layer.Blob, + "diff": states[i].layer.Diff, + }).Debug("unpack layer") + + unpacked, err := rootfs.ApplyLayerWithOpts(ctx, states[i].layer, chain, sn, a, + u.config.SnapshotOpts, u.config.ApplyOpts) + if err != nil { + return err + } + + if unpacked { + // Set the uncompressed label after the uncompressed + // digest has been verified through apply. + cinfo := content.Info{ + Digest: states[i].layer.Blob.Digest, + Labels: map[string]string{ + "containerd.io/uncompressed": states[i].layer.Diff.Digest.String(), + }, + } + if _, err := cs.Update(ctx, cinfo, "labels.containerd.io/uncompressed"); err != nil { + return err + } + } + + chain = append(chain, states[i].layer.Diff.Digest) + states[i].unpacked = true + log.G(ctx).WithFields(logrus.Fields{ + "desc": states[i].layer.Blob, + "diff": states[i].layer.Diff, + }).Debug("layer unpacked") + } + // Check whether all layers are unpacked. + if states[len(states)-1].unpacked { + break + } + } + + chainID := identity.ChainID(chain).String() + cinfo := content.Info{ + Digest: config.Digest, + Labels: map[string]string{ + fmt.Sprintf("containerd.io/gc.ref.snapshot.%s", u.snapshotter): chainID, + }, + } + _, err = cs.Update(ctx, cinfo, fmt.Sprintf("labels.containerd.io/gc.ref.snapshot.%s", u.snapshotter)) + if err != nil { + return err + } + log.G(ctx).WithFields(logrus.Fields{ + "config": config.Digest, + "chainID": chainID, + }).Debug("image unpacked") + return nil +} + +func (u *unpacker) handlerWrapper(uctx context.Context, unpacks *int32) (func(images.Handler) images.Handler, *errgroup.Group) { + eg, uctx := errgroup.WithContext(uctx) + return func(f images.Handler) images.Handler { + var ( + lock sync.Mutex + layers []ocispec.Descriptor + schema1 bool + ) + return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f.Handle(ctx, desc) + if err != nil { + return children, err + } + + // `Pull` only supports one platform, so there is only + // one manifest to handle, and manifest list can be + // safely skipped. + // TODO: support multi-platform unpack. + switch mt := desc.MediaType; { + case mt == images.MediaTypeDockerSchema1Manifest: + lock.Lock() + schema1 = true + lock.Unlock() + case mt == images.MediaTypeDockerSchema2Manifest || mt == ocispec.MediaTypeImageManifest: + lock.Lock() + for _, child := range children { + if child.MediaType == images.MediaTypeDockerSchema2Config || + child.MediaType == ocispec.MediaTypeImageConfig { + continue + } + layers = append(layers, child) + } + lock.Unlock() + case mt == images.MediaTypeDockerSchema2Config || mt == ocispec.MediaTypeImageConfig: + lock.Lock() + l := append([]ocispec.Descriptor{}, layers...) + lock.Unlock() + if len(l) > 0 { + atomic.AddInt32(unpacks, 1) + eg.Go(func() error { + return u.unpack(uctx, desc, l) + }) + } + case images.IsLayerType(mt): + lock.Lock() + update := !schema1 + lock.Unlock() + if update { + u.updateCh <- desc + } + } + return children, nil + }) + }, eg +} diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go index b2874bf6..04cf59c8 100644 --- a/vendor/github.com/containerd/containerd/version/version.go +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -21,7 +21,7 @@ var ( Package = "github.com/containerd/containerd" // Version holds the complete version number. Filled in at linking time. - Version = "1.2.0+unknown" + Version = "1.3.0+unknown" // Revision is filled with the VCS (e.g. git) revision being used to build // the program at linking time. diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE deleted file mode 100644 index 37ec93a1..00000000 --- a/vendor/github.com/coreos/go-systemd/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/coreos/go-systemd/NOTICE b/vendor/github.com/coreos/go-systemd/NOTICE deleted file mode 100644 index 23a0ada2..00000000 --- a/vendor/github.com/coreos/go-systemd/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2018 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-systemd/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/dbus/dbus.go deleted file mode 100644 index f652582e..00000000 --- a/vendor/github.com/coreos/go-systemd/dbus/dbus.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Integration with the systemd D-Bus API. See http://www.freedesktop.org/wiki/Software/systemd/dbus/ -package dbus - -import ( - "encoding/hex" - "fmt" - "os" - "strconv" - "strings" - "sync" - - "github.com/godbus/dbus" -) - -const ( - alpha = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ` - num = `0123456789` - alphanum = alpha + num - signalBuffer = 100 -) - -// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped -func needsEscape(i int, b byte) bool { - // Escape everything that is not a-z-A-Z-0-9 - // Also escape 0-9 if it's the first character - return strings.IndexByte(alphanum, b) == -1 || - (i == 0 && strings.IndexByte(num, b) != -1) -} - -// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the -// rules that systemd uses for serializing special characters. -func PathBusEscape(path string) string { - // Special case the empty string - if len(path) == 0 { - return "_" - } - n := []byte{} - for i := 0; i < len(path); i++ { - c := path[i] - if needsEscape(i, c) { - e := fmt.Sprintf("_%x", c) - n = append(n, []byte(e)...) - } else { - n = append(n, c) - } - } - return string(n) -} - -// pathBusUnescape is the inverse of PathBusEscape. -func pathBusUnescape(path string) string { - if path == "_" { - return "" - } - n := []byte{} - for i := 0; i < len(path); i++ { - c := path[i] - if c == '_' && i+2 < len(path) { - res, err := hex.DecodeString(path[i+1 : i+3]) - if err == nil { - n = append(n, res...) - } - i += 2 - } else { - n = append(n, c) - } - } - return string(n) -} - -// Conn is a connection to systemd's dbus endpoint. -type Conn struct { - // sysconn/sysobj are only used to call dbus methods - sysconn *dbus.Conn - sysobj dbus.BusObject - - // sigconn/sigobj are only used to receive dbus signals - sigconn *dbus.Conn - sigobj dbus.BusObject - - jobListener struct { - jobs map[dbus.ObjectPath]chan<- string - sync.Mutex - } - subStateSubscriber struct { - updateCh chan<- *SubStateUpdate - errCh chan<- error - sync.Mutex - ignore map[dbus.ObjectPath]int64 - cleanIgnore int64 - } - propertiesSubscriber struct { - updateCh chan<- *PropertiesUpdate - errCh chan<- error - sync.Mutex - } -} - -// New establishes a connection to any available bus and authenticates. -// Callers should call Close() when done with the connection. -func New() (*Conn, error) { - conn, err := NewSystemConnection() - if err != nil && os.Geteuid() == 0 { - return NewSystemdConnection() - } - return conn, err -} - -// NewSystemConnection establishes a connection to the system bus and authenticates. -// Callers should call Close() when done with the connection -func NewSystemConnection() (*Conn, error) { - return NewConnection(func() (*dbus.Conn, error) { - return dbusAuthHelloConnection(dbus.SystemBusPrivate) - }) -} - -// NewUserConnection establishes a connection to the session bus and -// authenticates. This can be used to connect to systemd user instances. -// Callers should call Close() when done with the connection. -func NewUserConnection() (*Conn, error) { - return NewConnection(func() (*dbus.Conn, error) { - return dbusAuthHelloConnection(dbus.SessionBusPrivate) - }) -} - -// NewSystemdConnection establishes a private, direct connection to systemd. -// This can be used for communicating with systemd without a dbus daemon. -// Callers should call Close() when done with the connection. -func NewSystemdConnection() (*Conn, error) { - return NewConnection(func() (*dbus.Conn, error) { - // We skip Hello when talking directly to systemd. - return dbusAuthConnection(func(opts ...dbus.ConnOption) (*dbus.Conn, error) { - return dbus.Dial("unix:path=/run/systemd/private") - }) - }) -} - -// Close closes an established connection -func (c *Conn) Close() { - c.sysconn.Close() - c.sigconn.Close() -} - -// NewConnection establishes a connection to a bus using a caller-supplied function. -// This allows connecting to remote buses through a user-supplied mechanism. -// The supplied function may be called multiple times, and should return independent connections. -// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded, -// and any authentication should be handled by the function. -func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) { - sysconn, err := dialBus() - if err != nil { - return nil, err - } - - sigconn, err := dialBus() - if err != nil { - sysconn.Close() - return nil, err - } - - c := &Conn{ - sysconn: sysconn, - sysobj: systemdObject(sysconn), - sigconn: sigconn, - sigobj: systemdObject(sigconn), - } - - c.subStateSubscriber.ignore = make(map[dbus.ObjectPath]int64) - c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string) - - // Setup the listeners on jobs so that we can get completions - c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'") - - c.dispatch() - return c, nil -} - -// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager -// interface. The value is returned in its string representation, as defined at -// https://developer.gnome.org/glib/unstable/gvariant-text.html -func (c *Conn) GetManagerProperty(prop string) (string, error) { - variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop) - if err != nil { - return "", err - } - return variant.String(), nil -} - -func dbusAuthConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { - conn, err := createBus() - if err != nil { - return nil, err - } - - // Only use EXTERNAL method, and hardcode the uid (not username) - // to avoid a username lookup (which requires a dynamically linked - // libc) - methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))} - - err = conn.Auth(methods) - if err != nil { - conn.Close() - return nil, err - } - - return conn, nil -} - -func dbusAuthHelloConnection(createBus func(opts ...dbus.ConnOption) (*dbus.Conn, error)) (*dbus.Conn, error) { - conn, err := dbusAuthConnection(createBus) - if err != nil { - return nil, err - } - - if err = conn.Hello(); err != nil { - conn.Close() - return nil, err - } - - return conn, nil -} - -func systemdObject(conn *dbus.Conn) dbus.BusObject { - return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1")) -} diff --git a/vendor/github.com/coreos/go-systemd/dbus/methods.go b/vendor/github.com/coreos/go-systemd/dbus/methods.go deleted file mode 100644 index 5859583e..00000000 --- a/vendor/github.com/coreos/go-systemd/dbus/methods.go +++ /dev/null @@ -1,600 +0,0 @@ -// Copyright 2015, 2018 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "errors" - "fmt" - "path" - "strconv" - - "github.com/godbus/dbus" -) - -func (c *Conn) jobComplete(signal *dbus.Signal) { - var id uint32 - var job dbus.ObjectPath - var unit string - var result string - dbus.Store(signal.Body, &id, &job, &unit, &result) - c.jobListener.Lock() - out, ok := c.jobListener.jobs[job] - if ok { - out <- result - delete(c.jobListener.jobs, job) - } - c.jobListener.Unlock() -} - -func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) { - if ch != nil { - c.jobListener.Lock() - defer c.jobListener.Unlock() - } - - var p dbus.ObjectPath - err := c.sysobj.Call(job, 0, args...).Store(&p) - if err != nil { - return 0, err - } - - if ch != nil { - c.jobListener.jobs[p] = ch - } - - // ignore error since 0 is fine if conversion fails - jobID, _ := strconv.Atoi(path.Base(string(p))) - - return jobID, nil -} - -// StartUnit enqueues a start job and depending jobs, if any (unless otherwise -// specified by the mode string). -// -// Takes the unit to activate, plus a mode string. The mode needs to be one of -// replace, fail, isolate, ignore-dependencies, ignore-requirements. If -// "replace" the call will start the unit and its dependencies, possibly -// replacing already queued jobs that conflict with this. If "fail" the call -// will start the unit and its dependencies, but will fail if this would change -// an already queued job. If "isolate" the call will start the unit in question -// and terminate all units that aren't dependencies of it. If -// "ignore-dependencies" it will start a unit but ignore all its dependencies. -// If "ignore-requirements" it will start a unit but only ignore the -// requirement dependencies. It is not recommended to make use of the latter -// two options. -// -// If the provided channel is non-nil, a result string will be sent to it upon -// job completion: one of done, canceled, timeout, failed, dependency, skipped. -// done indicates successful execution of a job. canceled indicates that a job -// has been canceled before it finished execution. timeout indicates that the -// job timeout was reached. failed indicates that the job failed. dependency -// indicates that a job this job has been depending on failed and the job hence -// has been removed too. skipped indicates that a job was skipped because it -// didn't apply to the units current state. -// -// If no error occurs, the ID of the underlying systemd job will be returned. There -// does exist the possibility for no error to be returned, but for the returned job -// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint -// should not be considered authoritative. -// -// If an error does occur, it will be returned to the user alongside a job ID of 0. -func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode) -} - -// StopUnit is similar to StartUnit but stops the specified unit rather -// than starting it. -func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode) -} - -// ReloadUnit reloads a unit. Reloading is done only if the unit is already running and fails otherwise. -func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode) -} - -// RestartUnit restarts a service. If a service is restarted that isn't -// running it will be started. -func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode) -} - -// TryRestartUnit is like RestartUnit, except that a service that isn't running -// is not affected by the restart. -func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode) -} - -// ReloadOrRestartUnit attempts a reload if the unit supports it and use a restart -// otherwise. -func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode) -} - -// ReloadOrTryRestartUnit attempts a reload if the unit supports it and use a "Try" -// flavored restart otherwise. -func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode) -} - -// StartTransientUnit() may be used to create and start a transient unit, which -// will be released as soon as it is not running or referenced anymore or the -// system is rebooted. name is the unit name including suffix, and must be -// unique. mode is the same as in StartUnit(), properties contains properties -// of the unit. -func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) { - return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0)) -} - -// KillUnit takes the unit name and a UNIX signal number to send. All of the unit's -// processes are killed. -func (c *Conn) KillUnit(name string, signal int32) { - c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store() -} - -// ResetFailedUnit resets the "failed" state of a specific unit. -func (c *Conn) ResetFailedUnit(name string) error { - return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store() -} - -// SystemState returns the systemd state. Equivalent to `systemctl is-system-running`. -func (c *Conn) SystemState() (*Property, error) { - var err error - var prop dbus.Variant - - obj := c.sysconn.Object("org.freedesktop.systemd1", "/org/freedesktop/systemd1") - err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, "org.freedesktop.systemd1.Manager", "SystemState").Store(&prop) - if err != nil { - return nil, err - } - - return &Property{Name: "SystemState", Value: prop}, nil -} - -// getProperties takes the unit path and returns all of its dbus object properties, for the given dbus interface -func (c *Conn) getProperties(path dbus.ObjectPath, dbusInterface string) (map[string]interface{}, error) { - var err error - var props map[string]dbus.Variant - - if !path.IsValid() { - return nil, fmt.Errorf("invalid unit name: %v", path) - } - - obj := c.sysconn.Object("org.freedesktop.systemd1", path) - err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props) - if err != nil { - return nil, err - } - - out := make(map[string]interface{}, len(props)) - for k, v := range props { - out[k] = v.Value() - } - - return out, nil -} - -// GetUnitProperties takes the (unescaped) unit name and returns all of its dbus object properties. -func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) { - path := unitPath(unit) - return c.getProperties(path, "org.freedesktop.systemd1.Unit") -} - -// GetUnitPathProperties takes the (escaped) unit path and returns all of its dbus object properties. -func (c *Conn) GetUnitPathProperties(path dbus.ObjectPath) (map[string]interface{}, error) { - return c.getProperties(path, "org.freedesktop.systemd1.Unit") -} - -// GetAllProperties takes the (unescaped) unit name and returns all of its dbus object properties. -func (c *Conn) GetAllProperties(unit string) (map[string]interface{}, error) { - path := unitPath(unit) - return c.getProperties(path, "") -} - -func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) { - var err error - var prop dbus.Variant - - path := unitPath(unit) - if !path.IsValid() { - return nil, errors.New("invalid unit name: " + unit) - } - - obj := c.sysconn.Object("org.freedesktop.systemd1", path) - err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop) - if err != nil { - return nil, err - } - - return &Property{Name: propertyName, Value: prop}, nil -} - -func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) { - return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName) -} - -// GetServiceProperty returns property for given service name and property name -func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) { - return c.getProperty(service, "org.freedesktop.systemd1.Service", propertyName) -} - -// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type. -// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope -// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit -func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) { - path := unitPath(unit) - return c.getProperties(path, "org.freedesktop.systemd1."+unitType) -} - -// SetUnitProperties() may be used to modify certain unit properties at runtime. -// Not all properties may be changed at runtime, but many resource management -// settings (primarily those in systemd.cgroup(5)) may. The changes are applied -// instantly, and stored on disk for future boots, unless runtime is true, in which -// case the settings only apply until the next reboot. name is the name of the unit -// to modify. properties are the settings to set, encoded as an array of property -// name and value pairs. -func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error { - return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store() -} - -func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) { - return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName) -} - -type UnitStatus struct { - Name string // The primary unit name as string - Description string // The human readable description string - LoadState string // The load state (i.e. whether the unit file has been loaded successfully) - ActiveState string // The active state (i.e. whether the unit is currently started or not) - SubState string // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not) - Followed string // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string. - Path dbus.ObjectPath // The unit object path - JobId uint32 // If there is a job queued for the job unit the numeric job id, 0 otherwise - JobType string // The job type as string - JobPath dbus.ObjectPath // The job object path -} - -type storeFunc func(retvalues ...interface{}) error - -func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) { - result := make([][]interface{}, 0) - err := f(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - status := make([]UnitStatus, len(result)) - statusInterface := make([]interface{}, len(status)) - for i := range status { - statusInterface[i] = &status[i] - } - - err = dbus.Store(resultInterface, statusInterface...) - if err != nil { - return nil, err - } - - return status, nil -} - -// ListUnits returns an array with all currently loaded units. Note that -// units may be known by multiple names at the same time, and hence there might -// be more unit names loaded than actual units behind them. -// Also note that a unit is only loaded if it is active and/or enabled. -// Units that are both disabled and inactive will thus not be returned. -func (c *Conn) ListUnits() ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store) -} - -// ListUnitsFiltered returns an array with units filtered by state. -// It takes a list of units' statuses to filter. -func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store) -} - -// ListUnitsByPatterns returns an array with units. -// It takes a list of units' statuses and names to filter. -// Note that units may be known by multiple names at the same time, -// and hence there might be more unit names loaded than actual units behind them. -func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store) -} - -// ListUnitsByNames returns an array with units. It takes a list of units' -// names and returns an UnitStatus array. Comparing to ListUnitsByPatterns -// method, this method returns statuses even for inactive or non-existing -// units. Input array should contain exact unit names, but not patterns. -// Note: Requires systemd v230 or higher -func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) { - return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store) -} - -type UnitFile struct { - Path string - Type string -} - -func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) { - result := make([][]interface{}, 0) - err := f(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - files := make([]UnitFile, len(result)) - fileInterface := make([]interface{}, len(files)) - for i := range files { - fileInterface[i] = &files[i] - } - - err = dbus.Store(resultInterface, fileInterface...) - if err != nil { - return nil, err - } - - return files, nil -} - -// ListUnitFiles returns an array of all available units on disk. -func (c *Conn) ListUnitFiles() ([]UnitFile, error) { - return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store) -} - -// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns. -func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) { - return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store) -} - -type LinkUnitFileChange EnableUnitFileChange - -// LinkUnitFiles() links unit files (that are located outside of the -// usual unit search paths) into the unit search path. -// -// It takes a list of absolute paths to unit files to link and two -// booleans. The first boolean controls whether the unit shall be -// enabled for runtime only (true, /run), or persistently (false, -// /etc). -// The second controls whether symlinks pointing to other units shall -// be replaced if necessary. -// -// This call returns a list of the changes made. The list consists of -// structures with three strings: the type of the change (one of symlink -// or unlink), the file name of the symlink and the destination of the -// symlink. -func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]LinkUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -// EnableUnitFiles() may be used to enable one or more units in the system (by -// creating symlinks to them in /etc or /run). -// -// It takes a list of unit files to enable (either just file names or full -// absolute paths if the unit files are residing outside the usual unit -// search paths), and two booleans: the first controls whether the unit shall -// be enabled for runtime only (true, /run), or persistently (false, /etc). -// The second one controls whether symlinks pointing to other units shall -// be replaced if necessary. -// -// This call returns one boolean and an array with the changes made. The -// boolean signals whether the unit files contained any enablement -// information (i.e. an [Install]) section. The changes list consists of -// structures with three strings: the type of the change (one of symlink -// or unlink), the file name of the symlink and the destination of the -// symlink. -func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) { - var carries_install_info bool - - result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result) - if err != nil { - return false, nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]EnableUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return false, nil, err - } - - return carries_install_info, changes, nil -} - -type EnableUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// DisableUnitFiles() may be used to disable one or more units in the system (by -// removing symlinks to them from /etc or /run). -// -// It takes a list of unit files to disable (either just file names or full -// absolute paths if the unit files are residing outside the usual unit -// search paths), and one boolean: whether the unit was enabled for runtime -// only (true, /run), or persistently (false, /etc). -// -// This call returns an array with the changes made. The changes list -// consists of structures with three strings: the type of the change (one of -// symlink or unlink), the file name of the symlink and the destination of the -// symlink. -func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]DisableUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -type DisableUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// MaskUnitFiles masks one or more units in the system -// -// It takes three arguments: -// * list of units to mask (either just file names or full -// absolute paths if the unit files are residing outside -// the usual unit search paths) -// * runtime to specify whether the unit was enabled for runtime -// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) -// * force flag -func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]MaskUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -type MaskUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// UnmaskUnitFiles unmasks one or more units in the system -// -// It takes two arguments: -// * list of unit files to mask (either just file names or full -// absolute paths if the unit files are residing outside -// the usual unit search paths) -// * runtime to specify whether the unit was enabled for runtime -// only (true, /run/systemd/..), or persistently (false, /etc/systemd/..) -func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) { - result := make([][]interface{}, 0) - err := c.sysobj.Call("org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result) - if err != nil { - return nil, err - } - - resultInterface := make([]interface{}, len(result)) - for i := range result { - resultInterface[i] = result[i] - } - - changes := make([]UnmaskUnitFileChange, len(result)) - changesInterface := make([]interface{}, len(changes)) - for i := range changes { - changesInterface[i] = &changes[i] - } - - err = dbus.Store(resultInterface, changesInterface...) - if err != nil { - return nil, err - } - - return changes, nil -} - -type UnmaskUnitFileChange struct { - Type string // Type of the change (one of symlink or unlink) - Filename string // File name of the symlink - Destination string // Destination of the symlink -} - -// Reload instructs systemd to scan for and reload unit files. This is -// equivalent to a 'systemctl daemon-reload'. -func (c *Conn) Reload() error { - return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store() -} - -func unitPath(name string) dbus.ObjectPath { - return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name)) -} - -// unitName returns the unescaped base element of the supplied escaped path -func unitName(dpath dbus.ObjectPath) string { - return pathBusUnescape(path.Base(string(dpath))) -} diff --git a/vendor/github.com/coreos/go-systemd/dbus/properties.go b/vendor/github.com/coreos/go-systemd/dbus/properties.go deleted file mode 100644 index 6c818958..00000000 --- a/vendor/github.com/coreos/go-systemd/dbus/properties.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "github.com/godbus/dbus" -) - -// From the systemd docs: -// -// The properties array of StartTransientUnit() may take many of the settings -// that may also be configured in unit files. Not all parameters are currently -// accepted though, but we plan to cover more properties with future release. -// Currently you may set the Description, Slice and all dependency types of -// units, as well as RemainAfterExit, ExecStart for service units, -// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares, -// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth, -// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit, -// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map -// directly to their counterparts in unit files and as normal D-Bus object -// properties. The exception here is the PIDs field of scope units which is -// used for construction of the scope only and specifies the initial PIDs to -// add to the scope object. - -type Property struct { - Name string - Value dbus.Variant -} - -type PropertyCollection struct { - Name string - Properties []Property -} - -type execStart struct { - Path string // the binary path to execute - Args []string // an array with all arguments to pass to the executed command, starting with argument 0 - UncleanIsFailure bool // a boolean whether it should be considered a failure if the process exits uncleanly -} - -// PropExecStart sets the ExecStart service property. The first argument is a -// slice with the binary path to execute followed by the arguments to pass to -// the executed command. See -// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= -func PropExecStart(command []string, uncleanIsFailure bool) Property { - execStarts := []execStart{ - execStart{ - Path: command[0], - Args: command, - UncleanIsFailure: uncleanIsFailure, - }, - } - - return Property{ - Name: "ExecStart", - Value: dbus.MakeVariant(execStarts), - } -} - -// PropRemainAfterExit sets the RemainAfterExit service property. See -// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit= -func PropRemainAfterExit(b bool) Property { - return Property{ - Name: "RemainAfterExit", - Value: dbus.MakeVariant(b), - } -} - -// PropType sets the Type service property. See -// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type= -func PropType(t string) Property { - return Property{ - Name: "Type", - Value: dbus.MakeVariant(t), - } -} - -// PropDescription sets the Description unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description= -func PropDescription(desc string) Property { - return Property{ - Name: "Description", - Value: dbus.MakeVariant(desc), - } -} - -func propDependency(name string, units []string) Property { - return Property{ - Name: name, - Value: dbus.MakeVariant(units), - } -} - -// PropRequires sets the Requires unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires= -func PropRequires(units ...string) Property { - return propDependency("Requires", units) -} - -// PropRequiresOverridable sets the RequiresOverridable unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable= -func PropRequiresOverridable(units ...string) Property { - return propDependency("RequiresOverridable", units) -} - -// PropRequisite sets the Requisite unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite= -func PropRequisite(units ...string) Property { - return propDependency("Requisite", units) -} - -// PropRequisiteOverridable sets the RequisiteOverridable unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable= -func PropRequisiteOverridable(units ...string) Property { - return propDependency("RequisiteOverridable", units) -} - -// PropWants sets the Wants unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants= -func PropWants(units ...string) Property { - return propDependency("Wants", units) -} - -// PropBindsTo sets the BindsTo unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo= -func PropBindsTo(units ...string) Property { - return propDependency("BindsTo", units) -} - -// PropRequiredBy sets the RequiredBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy= -func PropRequiredBy(units ...string) Property { - return propDependency("RequiredBy", units) -} - -// PropRequiredByOverridable sets the RequiredByOverridable unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable= -func PropRequiredByOverridable(units ...string) Property { - return propDependency("RequiredByOverridable", units) -} - -// PropWantedBy sets the WantedBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy= -func PropWantedBy(units ...string) Property { - return propDependency("WantedBy", units) -} - -// PropBoundBy sets the BoundBy unit property. See -// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy= -func PropBoundBy(units ...string) Property { - return propDependency("BoundBy", units) -} - -// PropConflicts sets the Conflicts unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts= -func PropConflicts(units ...string) Property { - return propDependency("Conflicts", units) -} - -// PropConflictedBy sets the ConflictedBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy= -func PropConflictedBy(units ...string) Property { - return propDependency("ConflictedBy", units) -} - -// PropBefore sets the Before unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before= -func PropBefore(units ...string) Property { - return propDependency("Before", units) -} - -// PropAfter sets the After unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After= -func PropAfter(units ...string) Property { - return propDependency("After", units) -} - -// PropOnFailure sets the OnFailure unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure= -func PropOnFailure(units ...string) Property { - return propDependency("OnFailure", units) -} - -// PropTriggers sets the Triggers unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers= -func PropTriggers(units ...string) Property { - return propDependency("Triggers", units) -} - -// PropTriggeredBy sets the TriggeredBy unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy= -func PropTriggeredBy(units ...string) Property { - return propDependency("TriggeredBy", units) -} - -// PropPropagatesReloadTo sets the PropagatesReloadTo unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo= -func PropPropagatesReloadTo(units ...string) Property { - return propDependency("PropagatesReloadTo", units) -} - -// PropRequiresMountsFor sets the RequiresMountsFor unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor= -func PropRequiresMountsFor(units ...string) Property { - return propDependency("RequiresMountsFor", units) -} - -// PropSlice sets the Slice unit property. See -// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice= -func PropSlice(slice string) Property { - return Property{ - Name: "Slice", - Value: dbus.MakeVariant(slice), - } -} - -// PropPids sets the PIDs field of scope units used in the initial construction -// of the scope only and specifies the initial PIDs to add to the scope object. -// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties -func PropPids(pids ...uint32) Property { - return Property{ - Name: "PIDs", - Value: dbus.MakeVariant(pids), - } -} diff --git a/vendor/github.com/coreos/go-systemd/dbus/set.go b/vendor/github.com/coreos/go-systemd/dbus/set.go deleted file mode 100644 index 17c5d485..00000000 --- a/vendor/github.com/coreos/go-systemd/dbus/set.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -type set struct { - data map[string]bool -} - -func (s *set) Add(value string) { - s.data[value] = true -} - -func (s *set) Remove(value string) { - delete(s.data, value) -} - -func (s *set) Contains(value string) (exists bool) { - _, exists = s.data[value] - return -} - -func (s *set) Length() int { - return len(s.data) -} - -func (s *set) Values() (values []string) { - for val := range s.data { - values = append(values, val) - } - return -} - -func newSet() *set { - return &set{make(map[string]bool)} -} diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/dbus/subscription.go deleted file mode 100644 index f6d7a08a..00000000 --- a/vendor/github.com/coreos/go-systemd/dbus/subscription.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "errors" - "log" - "time" - - "github.com/godbus/dbus" -) - -const ( - cleanIgnoreInterval = int64(10 * time.Second) - ignoreInterval = int64(30 * time.Millisecond) -) - -// Subscribe sets up this connection to subscribe to all systemd dbus events. -// This is required before calling SubscribeUnits. When the connection closes -// systemd will automatically stop sending signals so there is no need to -// explicitly call Unsubscribe(). -func (c *Conn) Subscribe() error { - c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'") - c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0, - "type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'") - - return c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store() -} - -// Unsubscribe this connection from systemd dbus events. -func (c *Conn) Unsubscribe() error { - return c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store() -} - -func (c *Conn) dispatch() { - ch := make(chan *dbus.Signal, signalBuffer) - - c.sigconn.Signal(ch) - - go func() { - for { - signal, ok := <-ch - if !ok { - return - } - - if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" { - c.jobComplete(signal) - } - - if c.subStateSubscriber.updateCh == nil && - c.propertiesSubscriber.updateCh == nil { - continue - } - - var unitPath dbus.ObjectPath - switch signal.Name { - case "org.freedesktop.systemd1.Manager.JobRemoved": - unitName := signal.Body[2].(string) - c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath) - case "org.freedesktop.systemd1.Manager.UnitNew": - unitPath = signal.Body[1].(dbus.ObjectPath) - case "org.freedesktop.DBus.Properties.PropertiesChanged": - if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" { - unitPath = signal.Path - - if len(signal.Body) >= 2 { - if changed, ok := signal.Body[1].(map[string]dbus.Variant); ok { - c.sendPropertiesUpdate(unitPath, changed) - } - } - } - } - - if unitPath == dbus.ObjectPath("") { - continue - } - - c.sendSubStateUpdate(unitPath) - } - }() -} - -// SubscribeUnits returns two unbuffered channels which will receive all changed units every -// interval. Deleted units are sent as nil. -func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) { - return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil) -} - -// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer -// size of the channels, the comparison function for detecting changes and a filter -// function for cutting down on the noise that your channel receives. -func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) { - old := make(map[string]*UnitStatus) - statusChan := make(chan map[string]*UnitStatus, buffer) - errChan := make(chan error, buffer) - - go func() { - for { - timerChan := time.After(interval) - - units, err := c.ListUnits() - if err == nil { - cur := make(map[string]*UnitStatus) - for i := range units { - if filterUnit != nil && filterUnit(units[i].Name) { - continue - } - cur[units[i].Name] = &units[i] - } - - // add all new or changed units - changed := make(map[string]*UnitStatus) - for n, u := range cur { - if oldU, ok := old[n]; !ok || isChanged(oldU, u) { - changed[n] = u - } - delete(old, n) - } - - // add all deleted units - for oldN := range old { - changed[oldN] = nil - } - - old = cur - - if len(changed) != 0 { - statusChan <- changed - } - } else { - errChan <- err - } - - <-timerChan - } - }() - - return statusChan, errChan -} - -type SubStateUpdate struct { - UnitName string - SubState string -} - -// SetSubStateSubscriber writes to updateCh when any unit's substate changes. -// Although this writes to updateCh on every state change, the reported state -// may be more recent than the change that generated it (due to an unavoidable -// race in the systemd dbus interface). That is, this method provides a good -// way to keep a current view of all units' states, but is not guaranteed to -// show every state transition they go through. Furthermore, state changes -// will only be written to the channel with non-blocking writes. If updateCh -// is full, it attempts to write an error to errCh; if errCh is full, the error -// passes silently. -func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) { - if c == nil { - msg := "nil receiver" - select { - case errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } - - c.subStateSubscriber.Lock() - defer c.subStateSubscriber.Unlock() - c.subStateSubscriber.updateCh = updateCh - c.subStateSubscriber.errCh = errCh -} - -func (c *Conn) sendSubStateUpdate(unitPath dbus.ObjectPath) { - c.subStateSubscriber.Lock() - defer c.subStateSubscriber.Unlock() - - if c.subStateSubscriber.updateCh == nil { - return - } - - isIgnored := c.shouldIgnore(unitPath) - defer c.cleanIgnore() - if isIgnored { - return - } - - info, err := c.GetUnitPathProperties(unitPath) - if err != nil { - select { - case c.subStateSubscriber.errCh <- err: - default: - log.Printf("full error channel while reporting: %s\n", err) - } - return - } - defer c.updateIgnore(unitPath, info) - - name, ok := info["Id"].(string) - if !ok { - msg := "failed to cast info.Id" - select { - case c.subStateSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", err) - } - return - } - substate, ok := info["SubState"].(string) - if !ok { - msg := "failed to cast info.SubState" - select { - case c.subStateSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } - - update := &SubStateUpdate{name, substate} - select { - case c.subStateSubscriber.updateCh <- update: - default: - msg := "update channel is full" - select { - case c.subStateSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } -} - -// The ignore functions work around a wart in the systemd dbus interface. -// Requesting the properties of an unloaded unit will cause systemd to send a -// pair of UnitNew/UnitRemoved signals. Because we need to get a unit's -// properties on UnitNew (as that's the only indication of a new unit coming up -// for the first time), we would enter an infinite loop if we did not attempt -// to detect and ignore these spurious signals. The signal themselves are -// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an -// unloaded unit's signals for a short time after requesting its properties. -// This means that we will miss e.g. a transient unit being restarted -// *immediately* upon failure and also a transient unit being started -// immediately after requesting its status (with systemctl status, for example, -// because this causes a UnitNew signal to be sent which then causes us to fetch -// the properties). - -func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool { - t, ok := c.subStateSubscriber.ignore[path] - return ok && t >= time.Now().UnixNano() -} - -func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) { - loadState, ok := info["LoadState"].(string) - if !ok { - return - } - - // unit is unloaded - it will trigger bad systemd dbus behavior - if loadState == "not-found" { - c.subStateSubscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval - } -} - -// without this, ignore would grow unboundedly over time -func (c *Conn) cleanIgnore() { - now := time.Now().UnixNano() - if c.subStateSubscriber.cleanIgnore < now { - c.subStateSubscriber.cleanIgnore = now + cleanIgnoreInterval - - for p, t := range c.subStateSubscriber.ignore { - if t < now { - delete(c.subStateSubscriber.ignore, p) - } - } - } -} - -// PropertiesUpdate holds a map of a unit's changed properties -type PropertiesUpdate struct { - UnitName string - Changed map[string]dbus.Variant -} - -// SetPropertiesSubscriber writes to updateCh when any unit's properties -// change. Every property change reported by systemd will be sent; that is, no -// transitions will be "missed" (as they might be with SetSubStateSubscriber). -// However, state changes will only be written to the channel with non-blocking -// writes. If updateCh is full, it attempts to write an error to errCh; if -// errCh is full, the error passes silently. -func (c *Conn) SetPropertiesSubscriber(updateCh chan<- *PropertiesUpdate, errCh chan<- error) { - c.propertiesSubscriber.Lock() - defer c.propertiesSubscriber.Unlock() - c.propertiesSubscriber.updateCh = updateCh - c.propertiesSubscriber.errCh = errCh -} - -// we don't need to worry about shouldIgnore() here because -// sendPropertiesUpdate doesn't call GetProperties() -func (c *Conn) sendPropertiesUpdate(unitPath dbus.ObjectPath, changedProps map[string]dbus.Variant) { - c.propertiesSubscriber.Lock() - defer c.propertiesSubscriber.Unlock() - - if c.propertiesSubscriber.updateCh == nil { - return - } - - update := &PropertiesUpdate{unitName(unitPath), changedProps} - - select { - case c.propertiesSubscriber.updateCh <- update: - default: - msg := "update channel is full" - select { - case c.propertiesSubscriber.errCh <- errors.New(msg): - default: - log.Printf("full error channel while reporting: %s\n", msg) - } - return - } -} diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go deleted file mode 100644 index 5b408d58..00000000 --- a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package dbus - -import ( - "time" -) - -// SubscriptionSet returns a subscription set which is like conn.Subscribe but -// can filter to only return events for a set of units. -type SubscriptionSet struct { - *set - conn *Conn -} - -func (s *SubscriptionSet) filter(unit string) bool { - return !s.Contains(unit) -} - -// Subscribe starts listening for dbus events for all of the units in the set. -// Returns channels identical to conn.SubscribeUnits. -func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) { - // TODO: Make fully evented by using systemd 209 with properties changed values - return s.conn.SubscribeUnitsCustom(time.Second, 0, - mismatchUnitStatus, - func(unit string) bool { return s.filter(unit) }, - ) -} - -// NewSubscriptionSet returns a new subscription set. -func (conn *Conn) NewSubscriptionSet() *SubscriptionSet { - return &SubscriptionSet{newSet(), conn} -} - -// mismatchUnitStatus returns true if the provided UnitStatus objects -// are not equivalent. false is returned if the objects are equivalent. -// Only the Name, Description and state-related fields are used in -// the comparison. -func mismatchUnitStatus(u1, u2 *UnitStatus) bool { - return u1.Name != u2.Name || - u1.Description != u2.Description || - u1.LoadState != u2.LoadState || - u1.ActiveState != u2.ActiveState || - u1.SubState != u2.SubState -} diff --git a/vendor/github.com/deislabs/cnab-go/action/action.go b/vendor/github.com/deislabs/cnab-go/action/action.go index 04b823e4..1fb919bb 100644 --- a/vendor/github.com/deislabs/cnab-go/action/action.go +++ b/vendor/github.com/deislabs/cnab-go/action/action.go @@ -4,7 +4,6 @@ import ( "encoding/json" "errors" "fmt" - "io" "math" "strings" @@ -28,7 +27,7 @@ const stateful = false // - status type Action interface { // Run an action, and record the status in the given claim - Run(*claim.Claim, credentials.Set, io.Writer) error + Run(*claim.Claim, credentials.Set, ...OperationConfigFunc) error } func golangTypeToJSONType(value interface{}) (string, error) { @@ -176,19 +175,7 @@ func getImageMap(b *bundle.Bundle) ([]byte, error) { return json.Marshal(imgs) } -func appliesToAction(action string, parameter bundle.Parameter) bool { - if len(parameter.ApplyTo) == 0 { - return true - } - for _, act := range parameter.ApplyTo { - if action == act { - return true - } - } - return false -} - -func opFromClaim(action string, stateless bool, c *claim.Claim, ii bundle.InvocationImage, creds credentials.Set, w io.Writer) (*driver.Operation, error) { +func opFromClaim(action string, stateless bool, c *claim.Claim, ii bundle.InvocationImage, creds credentials.Set) (*driver.Operation, error) { env, files, err := creds.Expand(c.Bundle, stateless) if err != nil { return nil, err @@ -205,6 +192,13 @@ func opFromClaim(action string, stateless bool, c *claim.Claim, ii bundle.Invoca return nil, err } + bundleBytes, err := json.Marshal(c.Bundle) + if err != nil { + return nil, fmt.Errorf("failed to marshal bundle contents: %s", err) + } + + files["/cnab/bundle.json"] = string(bundleBytes) + imgMap, err := getImageMap(c.Bundle) if err != nil { return nil, fmt.Errorf("unable to generate image map: %s", err) @@ -227,13 +221,12 @@ func opFromClaim(action string, stateless bool, c *claim.Claim, ii bundle.Invoca Action: action, Installation: c.Name, Parameters: c.Parameters, - Image: ii.Image, - ImageType: ii.ImageType, + Image: ii, Revision: c.Revision, Environment: env, Files: files, Outputs: outputs, - Out: w, + Bundle: c.Bundle, }, nil } @@ -241,12 +234,27 @@ func injectParameters(action string, c *claim.Claim, env, files map[string]strin for k, param := range c.Bundle.Parameters { rawval, ok := c.Parameters[k] if !ok { - if param.Required && appliesToAction(action, param) { + if param.Required && param.AppliesTo(action) { return fmt.Errorf("missing required parameter %q for action %q", k, action) } continue } - value := fmt.Sprintf("%v", rawval) + + contents, err := json.Marshal(rawval) + if err != nil { + return err + } + + // In order to preserve the exact string value the user provided + // we don't marshal string parameters + value := string(contents) + if value[0] == '"' { + value, ok = rawval.(string) + if !ok { + return fmt.Errorf("failed to parse parameter %q as string", k) + } + } + if param.Destination == nil { // env is a CNAB_P_ env[fmt.Sprintf("CNAB_P_%s", strings.ToUpper(k))] = value diff --git a/vendor/github.com/deislabs/cnab-go/action/install.go b/vendor/github.com/deislabs/cnab-go/action/install.go index ddee71fe..b89648a5 100644 --- a/vendor/github.com/deislabs/cnab-go/action/install.go +++ b/vendor/github.com/deislabs/cnab-go/action/install.go @@ -1,8 +1,6 @@ package action import ( - "io" - "github.com/deislabs/cnab-go/claim" "github.com/deislabs/cnab-go/credentials" "github.com/deislabs/cnab-go/driver" @@ -14,13 +12,18 @@ type Install struct { } // Run performs an installation and updates the Claim accordingly -func (i *Install) Run(c *claim.Claim, creds credentials.Set, w io.Writer) error { +func (i *Install) Run(c *claim.Claim, creds credentials.Set, opCfgs ...OperationConfigFunc) error { invocImage, err := selectInvocationImage(i.Driver, c) if err != nil { return err } - op, err := opFromClaim(claim.ActionInstall, stateful, c, invocImage, creds, w) + op, err := opFromClaim(claim.ActionInstall, stateful, c, invocImage, creds) + if err != nil { + return err + } + + err = OperationConfigs(opCfgs).ApplyConfig(op) if err != nil { return err } diff --git a/vendor/github.com/deislabs/cnab-go/action/operation_configs.go b/vendor/github.com/deislabs/cnab-go/action/operation_configs.go new file mode 100644 index 00000000..e4babcad --- /dev/null +++ b/vendor/github.com/deislabs/cnab-go/action/operation_configs.go @@ -0,0 +1,26 @@ +package action + +import ( + "github.com/deislabs/cnab-go/driver" +) + +// OperationConfigFunc is a configuration function that can be applied to an +// operation. +type OperationConfigFunc func(op *driver.Operation) error + +// OperationConfigs is a set of configuration functions that can be applied as a +// unit to an operation. +type OperationConfigs []OperationConfigFunc + +// ApplyConfig safely applies the configuration function to the operation, if +// defined, and stops immediately upon the first error. +func (cfgs OperationConfigs) ApplyConfig(op *driver.Operation) error { + var err error + for _, cfg := range cfgs { + err = cfg(op) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/deislabs/cnab-go/action/run_custom.go b/vendor/github.com/deislabs/cnab-go/action/run_custom.go index b55f7df0..ccd554f4 100644 --- a/vendor/github.com/deislabs/cnab-go/action/run_custom.go +++ b/vendor/github.com/deislabs/cnab-go/action/run_custom.go @@ -2,7 +2,6 @@ package action import ( "errors" - "io" "github.com/deislabs/cnab-go/claim" "github.com/deislabs/cnab-go/credentials" @@ -28,7 +27,7 @@ type RunCustom struct { var blockedActions = map[string]struct{}{"install": {}, "uninstall": {}, "upgrade": {}} // Run executes a status action in an image -func (i *RunCustom) Run(c *claim.Claim, creds credentials.Set, w io.Writer) error { +func (i *RunCustom) Run(c *claim.Claim, creds credentials.Set, opCfgs ...OperationConfigFunc) error { if _, ok := blockedActions[i.Action]; ok { return ErrBlockedAction } @@ -43,7 +42,12 @@ func (i *RunCustom) Run(c *claim.Claim, creds credentials.Set, w io.Writer) erro return err } - op, err := opFromClaim(i.Action, actionDef.Stateless, c, invocImage, creds, w) + op, err := opFromClaim(i.Action, actionDef.Stateless, c, invocImage, creds) + if err != nil { + return err + } + + err = OperationConfigs(opCfgs).ApplyConfig(op) if err != nil { return err } diff --git a/vendor/github.com/deislabs/cnab-go/action/status.go b/vendor/github.com/deislabs/cnab-go/action/status.go index ba420cc0..d05829f3 100644 --- a/vendor/github.com/deislabs/cnab-go/action/status.go +++ b/vendor/github.com/deislabs/cnab-go/action/status.go @@ -1,8 +1,6 @@ package action import ( - "io" - "github.com/deislabs/cnab-go/claim" "github.com/deislabs/cnab-go/credentials" "github.com/deislabs/cnab-go/driver" @@ -14,13 +12,18 @@ type Status struct { } // Run executes a status action in an image -func (i *Status) Run(c *claim.Claim, creds credentials.Set, w io.Writer) error { +func (i *Status) Run(c *claim.Claim, creds credentials.Set, opCfgs ...OperationConfigFunc) error { invocImage, err := selectInvocationImage(i.Driver, c) if err != nil { return err } - op, err := opFromClaim(claim.ActionStatus, stateful, c, invocImage, creds, w) + op, err := opFromClaim(claim.ActionStatus, stateful, c, invocImage, creds) + if err != nil { + return err + } + + err = OperationConfigs(opCfgs).ApplyConfig(op) if err != nil { return err } diff --git a/vendor/github.com/deislabs/cnab-go/action/uninstall.go b/vendor/github.com/deislabs/cnab-go/action/uninstall.go index db96d690..4744b623 100644 --- a/vendor/github.com/deislabs/cnab-go/action/uninstall.go +++ b/vendor/github.com/deislabs/cnab-go/action/uninstall.go @@ -1,8 +1,6 @@ package action import ( - "io" - "github.com/deislabs/cnab-go/claim" "github.com/deislabs/cnab-go/credentials" "github.com/deislabs/cnab-go/driver" @@ -14,13 +12,18 @@ type Uninstall struct { } // Run performs the uninstall steps and updates the Claim -func (u *Uninstall) Run(c *claim.Claim, creds credentials.Set, w io.Writer) error { +func (u *Uninstall) Run(c *claim.Claim, creds credentials.Set, opCfgs ...OperationConfigFunc) error { invocImage, err := selectInvocationImage(u.Driver, c) if err != nil { return err } - op, err := opFromClaim(claim.ActionUninstall, stateful, c, invocImage, creds, w) + op, err := opFromClaim(claim.ActionUninstall, stateful, c, invocImage, creds) + if err != nil { + return err + } + + err = OperationConfigs(opCfgs).ApplyConfig(op) if err != nil { return err } diff --git a/vendor/github.com/deislabs/cnab-go/action/upgrade.go b/vendor/github.com/deislabs/cnab-go/action/upgrade.go index 0655d4d9..c04d88ca 100644 --- a/vendor/github.com/deislabs/cnab-go/action/upgrade.go +++ b/vendor/github.com/deislabs/cnab-go/action/upgrade.go @@ -1,8 +1,6 @@ package action import ( - "io" - "github.com/deislabs/cnab-go/claim" "github.com/deislabs/cnab-go/credentials" "github.com/deislabs/cnab-go/driver" @@ -14,13 +12,18 @@ type Upgrade struct { } // Run performs the upgrade steps and updates the Claim -func (u *Upgrade) Run(c *claim.Claim, creds credentials.Set, w io.Writer) error { +func (u *Upgrade) Run(c *claim.Claim, creds credentials.Set, opCfgs ...OperationConfigFunc) error { invocImage, err := selectInvocationImage(u.Driver, c) if err != nil { return err } - op, err := opFromClaim(claim.ActionUpgrade, stateful, c, invocImage, creds, w) + op, err := opFromClaim(claim.ActionUpgrade, stateful, c, invocImage, creds) + if err != nil { + return err + } + + err = OperationConfigs(opCfgs).ApplyConfig(op) if err != nil { return err } diff --git a/vendor/github.com/deislabs/cnab-go/bundle/bundle.go b/vendor/github.com/deislabs/cnab-go/bundle/bundle.go index 3c344992..043cab14 100644 --- a/vendor/github.com/deislabs/cnab-go/bundle/bundle.go +++ b/vendor/github.com/deislabs/cnab-go/bundle/bundle.go @@ -87,17 +87,38 @@ type BaseImage struct { MediaType string `json:"mediaType,omitempty" yaml:"mediaType,omitempty"` } +func (i *BaseImage) DeepCopy() *BaseImage { + i2 := *i + i2.Labels = make(map[string]string, len(i.Labels)) + for key, value := range i.Labels { + i2.Labels[key] = value + } + return &i2 +} + // Image describes a container image in the bundle type Image struct { BaseImage `yaml:",inline"` Description string `json:"description" yaml:"description"` //TODO: change? see where it's being used? change to description? } +func (i *Image) DeepCopy() *Image { + i2 := *i + i2.BaseImage = *i.BaseImage.DeepCopy() + return &i2 +} + // InvocationImage contains the image type and location for the installation of a bundle type InvocationImage struct { BaseImage `yaml:",inline"` } +func (img *InvocationImage) DeepCopy() *InvocationImage { + img2 := *img + img2.BaseImage = *img.BaseImage.DeepCopy() + return &img2 +} + // Location provides the location where a value should be written in // the invocation image. // diff --git a/vendor/github.com/deislabs/cnab-go/bundle/definition/schema.go b/vendor/github.com/deislabs/cnab-go/bundle/definition/schema.go index 6b3e7dfb..25269f7c 100644 --- a/vendor/github.com/deislabs/cnab-go/bundle/definition/schema.go +++ b/vendor/github.com/deislabs/cnab-go/bundle/definition/schema.go @@ -6,7 +6,6 @@ import ( "strings" "github.com/pkg/errors" - "github.com/qri-io/jsonschema" ) type Definitions map[string]*Schema @@ -31,19 +30,19 @@ type Schema struct { Else *Schema `json:"else,omitempty" yaml:"else,omitempty"` Enum []interface{} `json:"enum,omitempty" yaml:"enum,omitempty"` Examples []interface{} `json:"examples,omitempty" yaml:"examples,omitempty"` - ExclusiveMaximum *float64 `json:"exclusiveMaximum,omitempty" yaml:"exclusiveMaximum,omitempty"` - ExclusiveMinimum *float64 `json:"exclusiveMinimum,omitempty" yaml:"exclusiveMinimum,omitempty"` + ExclusiveMaximum *int `json:"exclusiveMaximum,omitempty" yaml:"exclusiveMaximum,omitempty"` + ExclusiveMinimum *int `json:"exclusiveMinimum,omitempty" yaml:"exclusiveMinimum,omitempty"` Format string `json:"format,omitempty" yaml:"format,omitempty"` If *Schema `json:"if,omitempty" yaml:"if,omitempty"` //Items can be a Schema or an Array of Schema :( Items interface{} `json:"items,omitempty" yaml:"items,omitempty"` - Maximum *float64 `json:"maximum,omitempty" yaml:"maximum,omitempty"` - MaxLength *float64 `json:"maxLength,omitempty" yaml:"maxLength,omitempty"` - MinItems *float64 `json:"minItems,omitempty" yaml:"minItems,omitempty"` - MinLength *float64 `json:"minLength,omitempty" yaml:"minLength,omitempty"` - MinProperties *float64 `json:"minProperties,omitempty" yaml:"minProperties,omitempty"` - Minimum *float64 `json:"minimum,omitempty" yaml:"minimum,omitempty"` - MultipleOf *float64 `json:"multipleOf,omitempty" yaml:"multipleOf,omitempty"` + Maximum *int `json:"maximum,omitempty" yaml:"maximum,omitempty"` + MaxLength *int `json:"maxLength,omitempty" yaml:"maxLength,omitempty"` + MinItems *int `json:"minItems,omitempty" yaml:"minItems,omitempty"` + MinLength *int `json:"minLength,omitempty" yaml:"minLength,omitempty"` + MinProperties *int `json:"minProperties,omitempty" yaml:"minProperties,omitempty"` + Minimum *int `json:"minimum,omitempty" yaml:"minimum,omitempty"` + MultipleOf *int `json:"multipleOf,omitempty" yaml:"multipleOf,omitempty"` Not *Schema `json:"not,omitempty" yaml:"not,omitempty"` OneOf *Schema `json:"oneOf,omitempty" yaml:"oneOf,omitempty"` @@ -96,7 +95,7 @@ func (s *Schema) UnmarshalJSON(data []byte) error { // Before we unmarshal into the cnab-go bundle/definition/Schema type, unmarshal into // the library struct so we can handle any validation errors in the schema. If there // are any errors, return those. - js := new(jsonschema.RootSchema) + js := NewRootSchema() if err := js.UnmarshalJSON(data); err != nil { return err } @@ -121,12 +120,6 @@ func (s *Schema) ConvertValue(val string) (interface{}, error) { switch dataType { case "string": return val, nil - case "number": - num, err := strconv.ParseFloat(val, 64) - if err != nil { - return nil, errors.Wrapf(err, "unable to convert %s to number", val) - } - return num, nil case "integer": return strconv.Atoi(val) case "boolean": diff --git a/vendor/github.com/deislabs/cnab-go/bundle/definition/validation.go b/vendor/github.com/deislabs/cnab-go/bundle/definition/validation.go index 4a1511e2..058d9f92 100644 --- a/vendor/github.com/deislabs/cnab-go/bundle/definition/validation.go +++ b/vendor/github.com/deislabs/cnab-go/bundle/definition/validation.go @@ -4,7 +4,6 @@ import ( "encoding/json" "github.com/pkg/errors" - "github.com/qri-io/jsonschema" ) // ValidationError error represents a validation error @@ -24,7 +23,7 @@ func (s *Schema) Validate(data interface{}) ([]ValidationError, error) { if err != nil { return nil, errors.Wrap(err, "unable to load schema") } - def := new(jsonschema.RootSchema) + def := NewRootSchema() err = json.Unmarshal([]byte(b), def) if err != nil { return nil, errors.Wrap(err, "unable to build schema") diff --git a/vendor/github.com/deislabs/cnab-go/bundle/definition/validators.go b/vendor/github.com/deislabs/cnab-go/bundle/definition/validators.go new file mode 100644 index 00000000..ddc56a81 --- /dev/null +++ b/vendor/github.com/deislabs/cnab-go/bundle/definition/validators.go @@ -0,0 +1,45 @@ +package definition + +import ( + "encoding/base64" + "fmt" + + "github.com/qri-io/jsonschema" +) + +// ContentEncoding represents a "custom" Schema property +type ContentEncoding string + +// NewContentEncoding allocates a new ContentEncoding validator +func NewContentEncoding() jsonschema.Validator { + return new(ContentEncoding) +} + +// Validate implements the Validator interface for ContentEncoding +// which, as of writing, isn't included by default in the jsonschema library we consume +func (c ContentEncoding) Validate(propPath string, data interface{}, errs *[]jsonschema.ValError) { + if obj, ok := data.(string); ok { + switch c { + case "base64": + _, err := base64.StdEncoding.DecodeString(obj) + if err != nil { + jsonschema.AddError(errs, propPath, data, fmt.Sprintf("invalid %s value: %s", c, obj)) + } + // Add validation support for other encodings as needed + // See https://json-schema.org/latest/json-schema-validation.html#rfc.section.8.3 + default: + jsonschema.AddError(errs, propPath, data, fmt.Sprintf("unsupported or invalid contentEncoding type of %s", c)) + } + } +} + +// NewRootSchema returns a jsonschema.RootSchema with any needed custom +// jsonschema.Validators pre-registered +func NewRootSchema() *jsonschema.RootSchema { + // Register custom validators here + // Note: as of writing, jsonschema doesn't have a stock validator for instances of type `contentEncoding` + // There may be others missing in the library that exist in http://json-schema.org/draft-07/schema# + // and thus, we'd need to create/register them here (if not included upstream) + jsonschema.RegisterValidator("contentEncoding", NewContentEncoding) + return new(jsonschema.RootSchema) +} diff --git a/vendor/github.com/deislabs/cnab-go/bundle/outputs.go b/vendor/github.com/deislabs/cnab-go/bundle/outputs.go index a17cfa80..c5aa5ded 100644 --- a/vendor/github.com/deislabs/cnab-go/bundle/outputs.go +++ b/vendor/github.com/deislabs/cnab-go/bundle/outputs.go @@ -6,3 +6,17 @@ type Output struct { Description string `json:"description,omitempty" yaml:"description,omitempty"` Path string `json:"path" yaml:"path"` } + +// AppliesTo returns a boolean value specifying whether or not +// the Output applies to the provided action +func (output *Output) AppliesTo(action string) bool { + if len(output.ApplyTo) == 0 { + return true + } + for _, act := range output.ApplyTo { + if action == act { + return true + } + } + return false +} diff --git a/vendor/github.com/deislabs/cnab-go/bundle/parameters.go b/vendor/github.com/deislabs/cnab-go/bundle/parameters.go index c461c577..de3992f7 100644 --- a/vendor/github.com/deislabs/cnab-go/bundle/parameters.go +++ b/vendor/github.com/deislabs/cnab-go/bundle/parameters.go @@ -8,3 +8,17 @@ type Parameter struct { Destination *Location `json:"destination,omitemtpty" yaml:"destination,omitempty"` Required bool `json:"required,omitempty" yaml:"required,omitempty"` } + +// AppliesTo returns a boolean value specifying whether or not +// the Parameter applies to the provided action +func (parameter *Parameter) AppliesTo(action string) bool { + if len(parameter.ApplyTo) == 0 { + return true + } + for _, act := range parameter.ApplyTo { + if action == act { + return true + } + } + return false +} diff --git a/vendor/github.com/deislabs/cnab-go/claim/claim.go b/vendor/github.com/deislabs/cnab-go/claim/claim.go index 1355c512..95c83202 100644 --- a/vendor/github.com/deislabs/cnab-go/claim/claim.go +++ b/vendor/github.com/deislabs/cnab-go/claim/claim.go @@ -48,7 +48,7 @@ type Claim struct { } // ValidName is a regular expression that indicates whether a name is a valid claim name. -var ValidName = regexp.MustCompile("^[a-zA-Z0-9_-]+$") +var ValidName = regexp.MustCompile("^[a-zA-Z0-9._-]+$") // New creates a new Claim initialized for an installation operation. func New(name string) (*Claim, error) { diff --git a/vendor/github.com/deislabs/cnab-go/driver/docker/docker.go b/vendor/github.com/deislabs/cnab-go/driver/docker/docker.go index 181c9851..5d96830c 100644 --- a/vendor/github.com/deislabs/cnab-go/driver/docker/docker.go +++ b/vendor/github.com/deislabs/cnab-go/driver/docker/docker.go @@ -55,11 +55,20 @@ func (d *Driver) Config() map[string]string { "PULL_ALWAYS": "Always pull image, even if locally available (0|1)", "DOCKER_DRIVER_QUIET": "Make the Docker driver quiet (only print container stdout/stderr)", "OUTPUTS_MOUNT_PATH": "Absolute path to where Docker driver can create temporary directories to bundle outputs. Defaults to temp dir.", + "CLEANUP_CONTAINERS": "If true, the docker container will be destroyed when it finishes running. If false, it will not be destroyed. The supported values are true and false. Defaults to true.", } } // SetConfig sets Docker driver configuration func (d *Driver) SetConfig(settings map[string]string) { + // Set default and provide feedback on acceptable input values. + value, ok := settings["CLEANUP_CONTAINERS"] + if !ok { + settings["CLEANUP_CONTAINERS"] = "true" + } else if value != "true" && value != "false" { + fmt.Printf("CLEANUP_CONTAINERS environment variable has unexpected value %q. Supported values are 'true', 'false', or unset.", value) + } + d.config = settings } @@ -137,7 +146,7 @@ func (d *Driver) exec(op *driver.Operation) (driver.OperationResult, error) { return driver.OperationResult{}, nil } if d.config["PULL_ALWAYS"] == "1" { - if err := pullImage(ctx, cli, op.Image); err != nil { + if err := pullImage(ctx, cli, op.Image.Image); err != nil { return driver.OperationResult{}, err } } @@ -147,7 +156,7 @@ func (d *Driver) exec(op *driver.Operation) (driver.OperationResult, error) { } cfg := &container.Config{ - Image: op.Image, + Image: op.Image.Image, Env: env, Entrypoint: strslice.StrSlice{"/cnab/app/run"}, AttachStderr: true, @@ -164,8 +173,8 @@ func (d *Driver) exec(op *driver.Operation) (driver.OperationResult, error) { resp, err := cli.Client().ContainerCreate(ctx, cfg, hostCfg, nil, "") switch { case client.IsErrNotFound(err): - fmt.Fprintf(cli.Err(), "Unable to find image '%s' locally\n", op.Image) - if err := pullImage(ctx, cli, op.Image); err != nil { + fmt.Fprintf(cli.Err(), "Unable to find image '%s' locally\n", op.Image.Image) + if err := pullImage(ctx, cli, op.Image.Image); err != nil { return driver.OperationResult{}, err } if resp, err = cli.Client().ContainerCreate(ctx, cfg, hostCfg, nil, ""); err != nil { @@ -175,7 +184,9 @@ func (d *Driver) exec(op *driver.Operation) (driver.OperationResult, error) { return driver.OperationResult{}, fmt.Errorf("cannot create container: %v", err) } - defer cli.Client().ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{}) + if d.config["CLEANUP_CONTAINERS"] == "true" { + defer cli.Client().ContainerRemove(ctx, resp.ID, types.ContainerRemoveOptions{}) + } tarContent, err := generateTar(op.Files) if err != nil { @@ -301,9 +312,36 @@ func (d *Driver) fetchOutputs(ctx context.Context, container string, op *driver. return opResult, err } + // if an applicable output is expected but does not exist and it has a + // non-empty default value, create an entry in the map with the + // default value as its contents + for name, output := range op.Bundle.Outputs { + filepath := unix_path.Join("/cnab", "app", "outputs", name) + if !existsInOutputsMap(opResult.Outputs, filepath) && output.AppliesTo(op.Action) { + if outputDefinition, exists := op.Bundle.Definitions[output.Definition]; exists { + outputDefault := outputDefinition.Default + if outputDefault != nil { + contents := fmt.Sprintf("%v", outputDefault) + opResult.Outputs[filepath] = contents + } else { + return opResult, fmt.Errorf("required output %s is missing and has no default", name) + } + } + } + } + return opResult, nil } +func existsInOutputsMap(outputsMap map[string]string, path string) bool { + for outputPath := range outputsMap { + if outputPath == path { + return true + } + } + return false +} + func generateTar(files map[string]string) (io.Reader, error) { r, w := io.Pipe() tw := tar.NewWriter(w) diff --git a/vendor/github.com/deislabs/cnab-go/driver/driver.go b/vendor/github.com/deislabs/cnab-go/driver/driver.go index 3039b416..7c93e2df 100644 --- a/vendor/github.com/deislabs/cnab-go/driver/driver.go +++ b/vendor/github.com/deislabs/cnab-go/driver/driver.go @@ -4,6 +4,7 @@ import ( "fmt" "io" + "github.com/deislabs/cnab-go/bundle" "github.com/docker/go/canonical/json" ) @@ -26,9 +27,7 @@ type Operation struct { // Parameters are the parameters to be injected into the container Parameters map[string]interface{} `json:"parameters"` // Image is the invocation image - Image string `json:"image"` - // ImageType is the type of image. - ImageType string `json:"image_type"` + Image bundle.InvocationImage `json:"image"` // Environment contains environment variables that should be injected into the invocation image Environment map[string]string `json:"environment"` // Files contains files that should be injected into the invocation image. @@ -37,6 +36,8 @@ type Operation struct { Outputs []string `json:"outputs"` // Output stream for log messages from the driver Out io.Writer `json:"-"` + // Bundle represents the bundle information for use by the operation + Bundle *bundle.Bundle } // ResolvedCred is a credential that has been resolved and is ready for injection into the runtime. diff --git a/vendor/github.com/godbus/dbus/LICENSE b/vendor/github.com/godbus/dbus/LICENSE deleted file mode 100644 index 670d88fc..00000000 --- a/vendor/github.com/godbus/dbus/LICENSE +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2013, Georg Reinke (), Google -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/godbus/dbus/auth.go b/vendor/github.com/godbus/dbus/auth.go deleted file mode 100644 index b0dcb54e..00000000 --- a/vendor/github.com/godbus/dbus/auth.go +++ /dev/null @@ -1,252 +0,0 @@ -package dbus - -import ( - "bufio" - "bytes" - "errors" - "io" - "os" - "strconv" -) - -// AuthStatus represents the Status of an authentication mechanism. -type AuthStatus byte - -const ( - // AuthOk signals that authentication is finished; the next command - // from the server should be an OK. - AuthOk AuthStatus = iota - - // AuthContinue signals that additional data is needed; the next command - // from the server should be a DATA. - AuthContinue - - // AuthError signals an error; the server sent invalid data or some - // other unexpected thing happened and the current authentication - // process should be aborted. - AuthError -) - -type authState byte - -const ( - waitingForData authState = iota - waitingForOk - waitingForReject -) - -// Auth defines the behaviour of an authentication mechanism. -type Auth interface { - // Return the name of the mechnism, the argument to the first AUTH command - // and the next status. - FirstData() (name, resp []byte, status AuthStatus) - - // Process the given DATA command, and return the argument to the DATA - // command and the next status. If len(resp) == 0, no DATA command is sent. - HandleData(data []byte) (resp []byte, status AuthStatus) -} - -// Auth authenticates the connection, trying the given list of authentication -// mechanisms (in that order). If nil is passed, the EXTERNAL and -// DBUS_COOKIE_SHA1 mechanisms are tried for the current user. For private -// connections, this method must be called before sending any messages to the -// bus. Auth must not be called on shared connections. -func (conn *Conn) Auth(methods []Auth) error { - if methods == nil { - uid := strconv.Itoa(os.Getuid()) - methods = []Auth{AuthExternal(uid), AuthCookieSha1(uid, getHomeDir())} - } - in := bufio.NewReader(conn.transport) - err := conn.transport.SendNullByte() - if err != nil { - return err - } - err = authWriteLine(conn.transport, []byte("AUTH")) - if err != nil { - return err - } - s, err := authReadLine(in) - if err != nil { - return err - } - if len(s) < 2 || !bytes.Equal(s[0], []byte("REJECTED")) { - return errors.New("dbus: authentication protocol error") - } - s = s[1:] - for _, v := range s { - for _, m := range methods { - if name, data, status := m.FirstData(); bytes.Equal(v, name) { - var ok bool - err = authWriteLine(conn.transport, []byte("AUTH"), []byte(v), data) - if err != nil { - return err - } - switch status { - case AuthOk: - err, ok = conn.tryAuth(m, waitingForOk, in) - case AuthContinue: - err, ok = conn.tryAuth(m, waitingForData, in) - default: - panic("dbus: invalid authentication status") - } - if err != nil { - return err - } - if ok { - if conn.transport.SupportsUnixFDs() { - err = authWriteLine(conn, []byte("NEGOTIATE_UNIX_FD")) - if err != nil { - return err - } - line, err := authReadLine(in) - if err != nil { - return err - } - switch { - case bytes.Equal(line[0], []byte("AGREE_UNIX_FD")): - conn.EnableUnixFDs() - conn.unixFD = true - case bytes.Equal(line[0], []byte("ERROR")): - default: - return errors.New("dbus: authentication protocol error") - } - } - err = authWriteLine(conn.transport, []byte("BEGIN")) - if err != nil { - return err - } - go conn.inWorker() - return nil - } - } - } - } - return errors.New("dbus: authentication failed") -} - -// tryAuth tries to authenticate with m as the mechanism, using state as the -// initial authState and in for reading input. It returns (nil, true) on -// success, (nil, false) on a REJECTED and (someErr, false) if some other -// error occured. -func (conn *Conn) tryAuth(m Auth, state authState, in *bufio.Reader) (error, bool) { - for { - s, err := authReadLine(in) - if err != nil { - return err, false - } - switch { - case state == waitingForData && string(s[0]) == "DATA": - if len(s) != 2 { - err = authWriteLine(conn.transport, []byte("ERROR")) - if err != nil { - return err, false - } - continue - } - data, status := m.HandleData(s[1]) - switch status { - case AuthOk, AuthContinue: - if len(data) != 0 { - err = authWriteLine(conn.transport, []byte("DATA"), data) - if err != nil { - return err, false - } - } - if status == AuthOk { - state = waitingForOk - } - case AuthError: - err = authWriteLine(conn.transport, []byte("ERROR")) - if err != nil { - return err, false - } - } - case state == waitingForData && string(s[0]) == "REJECTED": - return nil, false - case state == waitingForData && string(s[0]) == "ERROR": - err = authWriteLine(conn.transport, []byte("CANCEL")) - if err != nil { - return err, false - } - state = waitingForReject - case state == waitingForData && string(s[0]) == "OK": - if len(s) != 2 { - err = authWriteLine(conn.transport, []byte("CANCEL")) - if err != nil { - return err, false - } - state = waitingForReject - } - conn.uuid = string(s[1]) - return nil, true - case state == waitingForData: - err = authWriteLine(conn.transport, []byte("ERROR")) - if err != nil { - return err, false - } - case state == waitingForOk && string(s[0]) == "OK": - if len(s) != 2 { - err = authWriteLine(conn.transport, []byte("CANCEL")) - if err != nil { - return err, false - } - state = waitingForReject - } - conn.uuid = string(s[1]) - return nil, true - case state == waitingForOk && string(s[0]) == "REJECTED": - return nil, false - case state == waitingForOk && (string(s[0]) == "DATA" || - string(s[0]) == "ERROR"): - - err = authWriteLine(conn.transport, []byte("CANCEL")) - if err != nil { - return err, false - } - state = waitingForReject - case state == waitingForOk: - err = authWriteLine(conn.transport, []byte("ERROR")) - if err != nil { - return err, false - } - case state == waitingForReject && string(s[0]) == "REJECTED": - return nil, false - case state == waitingForReject: - return errors.New("dbus: authentication protocol error"), false - default: - panic("dbus: invalid auth state") - } - } -} - -// authReadLine reads a line and separates it into its fields. -func authReadLine(in *bufio.Reader) ([][]byte, error) { - data, err := in.ReadBytes('\n') - if err != nil { - return nil, err - } - data = bytes.TrimSuffix(data, []byte("\r\n")) - return bytes.Split(data, []byte{' '}), nil -} - -// authWriteLine writes the given line in the authentication protocol format -// (elements of data separated by a " " and terminated by "\r\n"). -func authWriteLine(out io.Writer, data ...[]byte) error { - buf := make([]byte, 0) - for i, v := range data { - buf = append(buf, v...) - if i != len(data)-1 { - buf = append(buf, ' ') - } - } - buf = append(buf, '\r') - buf = append(buf, '\n') - n, err := out.Write(buf) - if err != nil { - return err - } - if n != len(buf) { - return io.ErrUnexpectedEOF - } - return nil -} diff --git a/vendor/github.com/godbus/dbus/auth_anonymous.go b/vendor/github.com/godbus/dbus/auth_anonymous.go deleted file mode 100644 index 75f3ad34..00000000 --- a/vendor/github.com/godbus/dbus/auth_anonymous.go +++ /dev/null @@ -1,16 +0,0 @@ -package dbus - -// AuthAnonymous returns an Auth that uses the ANONYMOUS mechanism. -func AuthAnonymous() Auth { - return &authAnonymous{} -} - -type authAnonymous struct{} - -func (a *authAnonymous) FirstData() (name, resp []byte, status AuthStatus) { - return []byte("ANONYMOUS"), nil, AuthOk -} - -func (a *authAnonymous) HandleData(data []byte) (resp []byte, status AuthStatus) { - return nil, AuthError -} diff --git a/vendor/github.com/godbus/dbus/auth_external.go b/vendor/github.com/godbus/dbus/auth_external.go deleted file mode 100644 index 7e376d3e..00000000 --- a/vendor/github.com/godbus/dbus/auth_external.go +++ /dev/null @@ -1,26 +0,0 @@ -package dbus - -import ( - "encoding/hex" -) - -// AuthExternal returns an Auth that authenticates as the given user with the -// EXTERNAL mechanism. -func AuthExternal(user string) Auth { - return authExternal{user} -} - -// AuthExternal implements the EXTERNAL authentication mechanism. -type authExternal struct { - user string -} - -func (a authExternal) FirstData() ([]byte, []byte, AuthStatus) { - b := make([]byte, 2*len(a.user)) - hex.Encode(b, []byte(a.user)) - return []byte("EXTERNAL"), b, AuthOk -} - -func (a authExternal) HandleData(b []byte) ([]byte, AuthStatus) { - return nil, AuthError -} diff --git a/vendor/github.com/godbus/dbus/auth_sha1.go b/vendor/github.com/godbus/dbus/auth_sha1.go deleted file mode 100644 index df15b461..00000000 --- a/vendor/github.com/godbus/dbus/auth_sha1.go +++ /dev/null @@ -1,102 +0,0 @@ -package dbus - -import ( - "bufio" - "bytes" - "crypto/rand" - "crypto/sha1" - "encoding/hex" - "os" -) - -// AuthCookieSha1 returns an Auth that authenticates as the given user with the -// DBUS_COOKIE_SHA1 mechanism. The home parameter should specify the home -// directory of the user. -func AuthCookieSha1(user, home string) Auth { - return authCookieSha1{user, home} -} - -type authCookieSha1 struct { - user, home string -} - -func (a authCookieSha1) FirstData() ([]byte, []byte, AuthStatus) { - b := make([]byte, 2*len(a.user)) - hex.Encode(b, []byte(a.user)) - return []byte("DBUS_COOKIE_SHA1"), b, AuthContinue -} - -func (a authCookieSha1) HandleData(data []byte) ([]byte, AuthStatus) { - challenge := make([]byte, len(data)/2) - _, err := hex.Decode(challenge, data) - if err != nil { - return nil, AuthError - } - b := bytes.Split(challenge, []byte{' '}) - if len(b) != 3 { - return nil, AuthError - } - context := b[0] - id := b[1] - svchallenge := b[2] - cookie := a.getCookie(context, id) - if cookie == nil { - return nil, AuthError - } - clchallenge := a.generateChallenge() - if clchallenge == nil { - return nil, AuthError - } - hash := sha1.New() - hash.Write(bytes.Join([][]byte{svchallenge, clchallenge, cookie}, []byte{':'})) - hexhash := make([]byte, 2*hash.Size()) - hex.Encode(hexhash, hash.Sum(nil)) - data = append(clchallenge, ' ') - data = append(data, hexhash...) - resp := make([]byte, 2*len(data)) - hex.Encode(resp, data) - return resp, AuthOk -} - -// getCookie searches for the cookie identified by id in context and returns -// the cookie content or nil. (Since HandleData can't return a specific error, -// but only whether an error occured, this function also doesn't bother to -// return an error.) -func (a authCookieSha1) getCookie(context, id []byte) []byte { - file, err := os.Open(a.home + "/.dbus-keyrings/" + string(context)) - if err != nil { - return nil - } - defer file.Close() - rd := bufio.NewReader(file) - for { - line, err := rd.ReadBytes('\n') - if err != nil { - return nil - } - line = line[:len(line)-1] - b := bytes.Split(line, []byte{' '}) - if len(b) != 3 { - return nil - } - if bytes.Equal(b[0], id) { - return b[2] - } - } -} - -// generateChallenge returns a random, hex-encoded challenge, or nil on error -// (see above). -func (a authCookieSha1) generateChallenge() []byte { - b := make([]byte, 16) - n, err := rand.Read(b) - if err != nil { - return nil - } - if n != 16 { - return nil - } - enc := make([]byte, 32) - hex.Encode(enc, b) - return enc -} diff --git a/vendor/github.com/godbus/dbus/call.go b/vendor/github.com/godbus/dbus/call.go deleted file mode 100644 index 2cb18901..00000000 --- a/vendor/github.com/godbus/dbus/call.go +++ /dev/null @@ -1,60 +0,0 @@ -package dbus - -import ( - "context" - "errors" -) - -var errSignature = errors.New("dbus: mismatched signature") - -// Call represents a pending or completed method call. -type Call struct { - Destination string - Path ObjectPath - Method string - Args []interface{} - - // Strobes when the call is complete. - Done chan *Call - - // After completion, the error status. If this is non-nil, it may be an - // error message from the peer (with Error as its type) or some other error. - Err error - - // Holds the response once the call is done. - Body []interface{} - - // tracks context and canceler - ctx context.Context - ctxCanceler context.CancelFunc -} - -func (c *Call) Context() context.Context { - if c.ctx == nil { - return context.Background() - } - - return c.ctx -} - -func (c *Call) ContextCancel() { - if c.ctxCanceler != nil { - c.ctxCanceler() - } -} - -// Store stores the body of the reply into the provided pointers. It returns -// an error if the signatures of the body and retvalues don't match, or if -// the error status is not nil. -func (c *Call) Store(retvalues ...interface{}) error { - if c.Err != nil { - return c.Err - } - - return Store(c.Body, retvalues...) -} - -func (c *Call) done() { - c.Done <- c - c.ContextCancel() -} diff --git a/vendor/github.com/godbus/dbus/conn.go b/vendor/github.com/godbus/dbus/conn.go deleted file mode 100644 index b38920ba..00000000 --- a/vendor/github.com/godbus/dbus/conn.go +++ /dev/null @@ -1,847 +0,0 @@ -package dbus - -import ( - "context" - "errors" - "io" - "os" - "reflect" - "strings" - "sync" -) - -var ( - systemBus *Conn - systemBusLck sync.Mutex - sessionBus *Conn - sessionBusLck sync.Mutex -) - -// ErrClosed is the error returned by calls on a closed connection. -var ErrClosed = errors.New("dbus: connection closed by user") - -// Conn represents a connection to a message bus (usually, the system or -// session bus). -// -// Connections are either shared or private. Shared connections -// are shared between calls to the functions that return them. As a result, -// the methods Close, Auth and Hello must not be called on them. -// -// Multiple goroutines may invoke methods on a connection simultaneously. -type Conn struct { - transport - - busObj BusObject - unixFD bool - uuid string - - handler Handler - signalHandler SignalHandler - serialGen SerialGenerator - - names *nameTracker - calls *callTracker - outHandler *outputHandler - - eavesdropped chan<- *Message - eavesdroppedLck sync.Mutex -} - -// SessionBus returns a shared connection to the session bus, connecting to it -// if not already done. -func SessionBus() (conn *Conn, err error) { - sessionBusLck.Lock() - defer sessionBusLck.Unlock() - if sessionBus != nil { - return sessionBus, nil - } - defer func() { - if conn != nil { - sessionBus = conn - } - }() - conn, err = SessionBusPrivate() - if err != nil { - return - } - if err = conn.Auth(nil); err != nil { - conn.Close() - conn = nil - return - } - if err = conn.Hello(); err != nil { - conn.Close() - conn = nil - } - return -} - -func getSessionBusAddress() (string, error) { - if address := os.Getenv("DBUS_SESSION_BUS_ADDRESS"); address != "" && address != "autolaunch:" { - return address, nil - - } else if address := tryDiscoverDbusSessionBusAddress(); address != "" { - os.Setenv("DBUS_SESSION_BUS_ADDRESS", address) - return address, nil - } - return getSessionBusPlatformAddress() -} - -// SessionBusPrivate returns a new private connection to the session bus. -func SessionBusPrivate(opts ...ConnOption) (*Conn, error) { - address, err := getSessionBusAddress() - if err != nil { - return nil, err - } - - return Dial(address, opts...) -} - -// SessionBusPrivate returns a new private connection to the session bus. -// -// Deprecated: use SessionBusPrivate with options instead. -func SessionBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) { - return SessionBusPrivate(WithHandler(handler), WithSignalHandler(signalHandler)) -} - -// SystemBus returns a shared connection to the system bus, connecting to it if -// not already done. -func SystemBus() (conn *Conn, err error) { - systemBusLck.Lock() - defer systemBusLck.Unlock() - if systemBus != nil { - return systemBus, nil - } - defer func() { - if conn != nil { - systemBus = conn - } - }() - conn, err = SystemBusPrivate() - if err != nil { - return - } - if err = conn.Auth(nil); err != nil { - conn.Close() - conn = nil - return - } - if err = conn.Hello(); err != nil { - conn.Close() - conn = nil - } - return -} - -// SystemBusPrivate returns a new private connection to the system bus. -func SystemBusPrivate(opts ...ConnOption) (*Conn, error) { - return Dial(getSystemBusPlatformAddress(), opts...) -} - -// SystemBusPrivateHandler returns a new private connection to the system bus, using the provided handlers. -// -// Deprecated: use SystemBusPrivate with options instead. -func SystemBusPrivateHandler(handler Handler, signalHandler SignalHandler) (*Conn, error) { - return SystemBusPrivate(WithHandler(handler), WithSignalHandler(signalHandler)) -} - -// Dial establishes a new private connection to the message bus specified by address. -func Dial(address string, opts ...ConnOption) (*Conn, error) { - tr, err := getTransport(address) - if err != nil { - return nil, err - } - return newConn(tr, opts...) -} - -// DialHandler establishes a new private connection to the message bus specified by address, using the supplied handlers. -// -// Deprecated: use Dial with options instead. -func DialHandler(address string, handler Handler, signalHandler SignalHandler) (*Conn, error) { - return Dial(address, WithSignalHandler(signalHandler)) -} - -// ConnOption is a connection option. -type ConnOption func(conn *Conn) error - -// WithHandler overrides the default handler. -func WithHandler(handler Handler) ConnOption { - return func(conn *Conn) error { - conn.handler = handler - return nil - } -} - -// WithSignalHandler overrides the default signal handler. -func WithSignalHandler(handler SignalHandler) ConnOption { - return func(conn *Conn) error { - conn.signalHandler = handler - return nil - } -} - -// WithSerialGenerator overrides the default signals generator. -func WithSerialGenerator(gen SerialGenerator) ConnOption { - return func(conn *Conn) error { - conn.serialGen = gen - return nil - } -} - -// NewConn creates a new private *Conn from an already established connection. -func NewConn(conn io.ReadWriteCloser, opts ...ConnOption) (*Conn, error) { - return newConn(genericTransport{conn}, opts...) -} - -// NewConnHandler creates a new private *Conn from an already established connection, using the supplied handlers. -// -// Deprecated: use NewConn with options instead. -func NewConnHandler(conn io.ReadWriteCloser, handler Handler, signalHandler SignalHandler) (*Conn, error) { - return NewConn(genericTransport{conn}, WithHandler(handler), WithSignalHandler(signalHandler)) -} - -// newConn creates a new *Conn from a transport. -func newConn(tr transport, opts ...ConnOption) (*Conn, error) { - conn := new(Conn) - conn.transport = tr - for _, opt := range opts { - if err := opt(conn); err != nil { - return nil, err - } - } - conn.calls = newCallTracker() - if conn.handler == nil { - conn.handler = NewDefaultHandler() - } - if conn.signalHandler == nil { - conn.signalHandler = NewDefaultSignalHandler() - } - if conn.serialGen == nil { - conn.serialGen = newSerialGenerator() - } - conn.outHandler = &outputHandler{conn: conn} - conn.names = newNameTracker() - conn.busObj = conn.Object("org.freedesktop.DBus", "/org/freedesktop/DBus") - return conn, nil -} - -// BusObject returns the object owned by the bus daemon which handles -// administrative requests. -func (conn *Conn) BusObject() BusObject { - return conn.busObj -} - -// Close closes the connection. Any blocked operations will return with errors -// and the channels passed to Eavesdrop and Signal are closed. This method must -// not be called on shared connections. -func (conn *Conn) Close() error { - conn.outHandler.close() - if term, ok := conn.signalHandler.(Terminator); ok { - term.Terminate() - } - - if term, ok := conn.handler.(Terminator); ok { - term.Terminate() - } - - conn.eavesdroppedLck.Lock() - if conn.eavesdropped != nil { - close(conn.eavesdropped) - } - conn.eavesdroppedLck.Unlock() - - return conn.transport.Close() -} - -// Eavesdrop causes conn to send all incoming messages to the given channel -// without further processing. Method replies, errors and signals will not be -// sent to the appropiate channels and method calls will not be handled. If nil -// is passed, the normal behaviour is restored. -// -// The caller has to make sure that ch is sufficiently buffered; -// if a message arrives when a write to ch is not possible, the message is -// discarded. -func (conn *Conn) Eavesdrop(ch chan<- *Message) { - conn.eavesdroppedLck.Lock() - conn.eavesdropped = ch - conn.eavesdroppedLck.Unlock() -} - -// GetSerial returns an unused serial. -func (conn *Conn) getSerial() uint32 { - return conn.serialGen.GetSerial() -} - -// Hello sends the initial org.freedesktop.DBus.Hello call. This method must be -// called after authentication, but before sending any other messages to the -// bus. Hello must not be called for shared connections. -func (conn *Conn) Hello() error { - var s string - err := conn.busObj.Call("org.freedesktop.DBus.Hello", 0).Store(&s) - if err != nil { - return err - } - conn.names.acquireUniqueConnectionName(s) - return nil -} - -// inWorker runs in an own goroutine, reading incoming messages from the -// transport and dispatching them appropiately. -func (conn *Conn) inWorker() { - for { - msg, err := conn.ReadMessage() - if err != nil { - if _, ok := err.(InvalidMessageError); !ok { - // Some read error occured (usually EOF); we can't really do - // anything but to shut down all stuff and returns errors to all - // pending replies. - conn.Close() - conn.calls.finalizeAllWithError(err) - return - } - // invalid messages are ignored - continue - } - conn.eavesdroppedLck.Lock() - if conn.eavesdropped != nil { - select { - case conn.eavesdropped <- msg: - default: - } - conn.eavesdroppedLck.Unlock() - continue - } - conn.eavesdroppedLck.Unlock() - dest, _ := msg.Headers[FieldDestination].value.(string) - found := dest == "" || - !conn.names.uniqueNameIsKnown() || - conn.names.isKnownName(dest) - if !found { - // Eavesdropped a message, but no channel for it is registered. - // Ignore it. - continue - } - switch msg.Type { - case TypeError: - conn.serialGen.RetireSerial(conn.calls.handleDBusError(msg)) - case TypeMethodReply: - conn.serialGen.RetireSerial(conn.calls.handleReply(msg)) - case TypeSignal: - conn.handleSignal(msg) - case TypeMethodCall: - go conn.handleCall(msg) - } - - } -} - -func (conn *Conn) handleSignal(msg *Message) { - iface := msg.Headers[FieldInterface].value.(string) - member := msg.Headers[FieldMember].value.(string) - // as per http://dbus.freedesktop.org/doc/dbus-specification.html , - // sender is optional for signals. - sender, _ := msg.Headers[FieldSender].value.(string) - if iface == "org.freedesktop.DBus" && sender == "org.freedesktop.DBus" { - if member == "NameLost" { - // If we lost the name on the bus, remove it from our - // tracking list. - name, ok := msg.Body[0].(string) - if !ok { - panic("Unable to read the lost name") - } - conn.names.loseName(name) - } else if member == "NameAcquired" { - // If we acquired the name on the bus, add it to our - // tracking list. - name, ok := msg.Body[0].(string) - if !ok { - panic("Unable to read the acquired name") - } - conn.names.acquireName(name) - } - } - signal := &Signal{ - Sender: sender, - Path: msg.Headers[FieldPath].value.(ObjectPath), - Name: iface + "." + member, - Body: msg.Body, - } - conn.signalHandler.DeliverSignal(iface, member, signal) -} - -// Names returns the list of all names that are currently owned by this -// connection. The slice is always at least one element long, the first element -// being the unique name of the connection. -func (conn *Conn) Names() []string { - return conn.names.listKnownNames() -} - -// Object returns the object identified by the given destination name and path. -func (conn *Conn) Object(dest string, path ObjectPath) BusObject { - return &Object{conn, dest, path} -} - -// outWorker runs in an own goroutine, encoding and sending messages that are -// sent to conn.out. -func (conn *Conn) sendMessage(msg *Message) { - conn.sendMessageAndIfClosed(msg, func() {}) -} - -func (conn *Conn) sendMessageAndIfClosed(msg *Message, ifClosed func()) { - err := conn.outHandler.sendAndIfClosed(msg, ifClosed) - conn.calls.handleSendError(msg, err) - if err != nil { - conn.serialGen.RetireSerial(msg.serial) - } else if msg.Type != TypeMethodCall { - conn.serialGen.RetireSerial(msg.serial) - } -} - -// Send sends the given message to the message bus. You usually don't need to -// use this; use the higher-level equivalents (Call / Go, Emit and Export) -// instead. If msg is a method call and NoReplyExpected is not set, a non-nil -// call is returned and the same value is sent to ch (which must be buffered) -// once the call is complete. Otherwise, ch is ignored and a Call structure is -// returned of which only the Err member is valid. -func (conn *Conn) Send(msg *Message, ch chan *Call) *Call { - return conn.send(context.Background(), msg, ch) -} - -// SendWithContext acts like Send but takes a context -func (conn *Conn) SendWithContext(ctx context.Context, msg *Message, ch chan *Call) *Call { - return conn.send(ctx, msg, ch) -} - -func (conn *Conn) send(ctx context.Context, msg *Message, ch chan *Call) *Call { - if ctx == nil { - panic("nil context") - } - - var call *Call - ctx, canceler := context.WithCancel(ctx) - msg.serial = conn.getSerial() - if msg.Type == TypeMethodCall && msg.Flags&FlagNoReplyExpected == 0 { - if ch == nil { - ch = make(chan *Call, 5) - } else if cap(ch) == 0 { - panic("dbus: unbuffered channel passed to (*Conn).Send") - } - call = new(Call) - call.Destination, _ = msg.Headers[FieldDestination].value.(string) - call.Path, _ = msg.Headers[FieldPath].value.(ObjectPath) - iface, _ := msg.Headers[FieldInterface].value.(string) - member, _ := msg.Headers[FieldMember].value.(string) - call.Method = iface + "." + member - call.Args = msg.Body - call.Done = ch - call.ctx = ctx - call.ctxCanceler = canceler - conn.calls.track(msg.serial, call) - go func() { - <-ctx.Done() - conn.calls.handleSendError(msg, ctx.Err()) - }() - conn.sendMessageAndIfClosed(msg, func() { - conn.calls.handleSendError(msg, ErrClosed) - canceler() - }) - } else { - canceler() - call = &Call{Err: nil} - conn.sendMessageAndIfClosed(msg, func() { - call = &Call{Err: ErrClosed} - }) - } - return call -} - -// sendError creates an error message corresponding to the parameters and sends -// it to conn.out. -func (conn *Conn) sendError(err error, dest string, serial uint32) { - var e *Error - switch em := err.(type) { - case Error: - e = &em - case *Error: - e = em - case DBusError: - name, body := em.DBusError() - e = NewError(name, body) - default: - e = MakeFailedError(err) - } - msg := new(Message) - msg.Type = TypeError - msg.serial = conn.getSerial() - msg.Headers = make(map[HeaderField]Variant) - if dest != "" { - msg.Headers[FieldDestination] = MakeVariant(dest) - } - msg.Headers[FieldErrorName] = MakeVariant(e.Name) - msg.Headers[FieldReplySerial] = MakeVariant(serial) - msg.Body = e.Body - if len(e.Body) > 0 { - msg.Headers[FieldSignature] = MakeVariant(SignatureOf(e.Body...)) - } - conn.sendMessage(msg) -} - -// sendReply creates a method reply message corresponding to the parameters and -// sends it to conn.out. -func (conn *Conn) sendReply(dest string, serial uint32, values ...interface{}) { - msg := new(Message) - msg.Type = TypeMethodReply - msg.serial = conn.getSerial() - msg.Headers = make(map[HeaderField]Variant) - if dest != "" { - msg.Headers[FieldDestination] = MakeVariant(dest) - } - msg.Headers[FieldReplySerial] = MakeVariant(serial) - msg.Body = values - if len(values) > 0 { - msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...)) - } - conn.sendMessage(msg) -} - -func (conn *Conn) defaultSignalAction(fn func(h *defaultSignalHandler, ch chan<- *Signal), ch chan<- *Signal) { - if !isDefaultSignalHandler(conn.signalHandler) { - return - } - handler := conn.signalHandler.(*defaultSignalHandler) - fn(handler, ch) -} - -// Signal registers the given channel to be passed all received signal messages. -// The caller has to make sure that ch is sufficiently buffered; if a message -// arrives when a write to c is not possible, it is discarded. -// -// Multiple of these channels can be registered at the same time. -// -// These channels are "overwritten" by Eavesdrop; i.e., if there currently is a -// channel for eavesdropped messages, this channel receives all signals, and -// none of the channels passed to Signal will receive any signals. -func (conn *Conn) Signal(ch chan<- *Signal) { - conn.defaultSignalAction((*defaultSignalHandler).addSignal, ch) -} - -// RemoveSignal removes the given channel from the list of the registered channels. -func (conn *Conn) RemoveSignal(ch chan<- *Signal) { - conn.defaultSignalAction((*defaultSignalHandler).removeSignal, ch) -} - -// SupportsUnixFDs returns whether the underlying transport supports passing of -// unix file descriptors. If this is false, method calls containing unix file -// descriptors will return an error and emitted signals containing them will -// not be sent. -func (conn *Conn) SupportsUnixFDs() bool { - return conn.unixFD -} - -// Error represents a D-Bus message of type Error. -type Error struct { - Name string - Body []interface{} -} - -func NewError(name string, body []interface{}) *Error { - return &Error{name, body} -} - -func (e Error) Error() string { - if len(e.Body) >= 1 { - s, ok := e.Body[0].(string) - if ok { - return s - } - } - return e.Name -} - -// Signal represents a D-Bus message of type Signal. The name member is given in -// "interface.member" notation, e.g. org.freedesktop.D-Bus.NameLost. -type Signal struct { - Sender string - Path ObjectPath - Name string - Body []interface{} -} - -// transport is a D-Bus transport. -type transport interface { - // Read and Write raw data (for example, for the authentication protocol). - io.ReadWriteCloser - - // Send the initial null byte used for the EXTERNAL mechanism. - SendNullByte() error - - // Returns whether this transport supports passing Unix FDs. - SupportsUnixFDs() bool - - // Signal the transport that Unix FD passing is enabled for this connection. - EnableUnixFDs() - - // Read / send a message, handling things like Unix FDs. - ReadMessage() (*Message, error) - SendMessage(*Message) error -} - -var ( - transports = make(map[string]func(string) (transport, error)) -) - -func getTransport(address string) (transport, error) { - var err error - var t transport - - addresses := strings.Split(address, ";") - for _, v := range addresses { - i := strings.IndexRune(v, ':') - if i == -1 { - err = errors.New("dbus: invalid bus address (no transport)") - continue - } - f := transports[v[:i]] - if f == nil { - err = errors.New("dbus: invalid bus address (invalid or unsupported transport)") - continue - } - t, err = f(v[i+1:]) - if err == nil { - return t, nil - } - } - return nil, err -} - -// dereferenceAll returns a slice that, assuming that vs is a slice of pointers -// of arbitrary types, containes the values that are obtained from dereferencing -// all elements in vs. -func dereferenceAll(vs []interface{}) []interface{} { - for i := range vs { - v := reflect.ValueOf(vs[i]) - v = v.Elem() - vs[i] = v.Interface() - } - return vs -} - -// getKey gets a key from a the list of keys. Returns "" on error / not found... -func getKey(s, key string) string { - for _, keyEqualsValue := range strings.Split(s, ",") { - keyValue := strings.SplitN(keyEqualsValue, "=", 2) - if len(keyValue) == 2 && keyValue[0] == key { - return keyValue[1] - } - } - return "" -} - -type outputHandler struct { - conn *Conn - sendLck sync.Mutex - closed struct { - isClosed bool - lck sync.RWMutex - } -} - -func (h *outputHandler) sendAndIfClosed(msg *Message, ifClosed func()) error { - h.closed.lck.RLock() - defer h.closed.lck.RUnlock() - if h.closed.isClosed { - ifClosed() - return nil - } - h.sendLck.Lock() - defer h.sendLck.Unlock() - return h.conn.SendMessage(msg) -} - -func (h *outputHandler) close() { - h.closed.lck.Lock() - defer h.closed.lck.Unlock() - h.closed.isClosed = true -} - -type serialGenerator struct { - lck sync.Mutex - nextSerial uint32 - serialUsed map[uint32]bool -} - -func newSerialGenerator() *serialGenerator { - return &serialGenerator{ - serialUsed: map[uint32]bool{0: true}, - nextSerial: 1, - } -} - -func (gen *serialGenerator) GetSerial() uint32 { - gen.lck.Lock() - defer gen.lck.Unlock() - n := gen.nextSerial - for gen.serialUsed[n] { - n++ - } - gen.serialUsed[n] = true - gen.nextSerial = n + 1 - return n -} - -func (gen *serialGenerator) RetireSerial(serial uint32) { - gen.lck.Lock() - defer gen.lck.Unlock() - delete(gen.serialUsed, serial) -} - -type nameTracker struct { - lck sync.RWMutex - unique string - names map[string]struct{} -} - -func newNameTracker() *nameTracker { - return &nameTracker{names: map[string]struct{}{}} -} -func (tracker *nameTracker) acquireUniqueConnectionName(name string) { - tracker.lck.Lock() - defer tracker.lck.Unlock() - tracker.unique = name -} -func (tracker *nameTracker) acquireName(name string) { - tracker.lck.Lock() - defer tracker.lck.Unlock() - tracker.names[name] = struct{}{} -} -func (tracker *nameTracker) loseName(name string) { - tracker.lck.Lock() - defer tracker.lck.Unlock() - delete(tracker.names, name) -} - -func (tracker *nameTracker) uniqueNameIsKnown() bool { - tracker.lck.RLock() - defer tracker.lck.RUnlock() - return tracker.unique != "" -} -func (tracker *nameTracker) isKnownName(name string) bool { - tracker.lck.RLock() - defer tracker.lck.RUnlock() - _, ok := tracker.names[name] - return ok || name == tracker.unique -} -func (tracker *nameTracker) listKnownNames() []string { - tracker.lck.RLock() - defer tracker.lck.RUnlock() - out := make([]string, 0, len(tracker.names)+1) - out = append(out, tracker.unique) - for k := range tracker.names { - out = append(out, k) - } - return out -} - -type callTracker struct { - calls map[uint32]*Call - lck sync.RWMutex -} - -func newCallTracker() *callTracker { - return &callTracker{calls: map[uint32]*Call{}} -} - -func (tracker *callTracker) track(sn uint32, call *Call) { - tracker.lck.Lock() - tracker.calls[sn] = call - tracker.lck.Unlock() -} - -func (tracker *callTracker) handleReply(msg *Message) uint32 { - serial := msg.Headers[FieldReplySerial].value.(uint32) - tracker.lck.RLock() - _, ok := tracker.calls[serial] - tracker.lck.RUnlock() - if ok { - tracker.finalizeWithBody(serial, msg.Body) - } - return serial -} - -func (tracker *callTracker) handleDBusError(msg *Message) uint32 { - serial := msg.Headers[FieldReplySerial].value.(uint32) - tracker.lck.RLock() - _, ok := tracker.calls[serial] - tracker.lck.RUnlock() - if ok { - name, _ := msg.Headers[FieldErrorName].value.(string) - tracker.finalizeWithError(serial, Error{name, msg.Body}) - } - return serial -} - -func (tracker *callTracker) handleSendError(msg *Message, err error) { - if err == nil { - return - } - tracker.lck.RLock() - _, ok := tracker.calls[msg.serial] - tracker.lck.RUnlock() - if ok { - tracker.finalizeWithError(msg.serial, err) - } -} - -// finalize was the only func that did not strobe Done -func (tracker *callTracker) finalize(sn uint32) { - tracker.lck.Lock() - defer tracker.lck.Unlock() - c, ok := tracker.calls[sn] - if ok { - delete(tracker.calls, sn) - c.ContextCancel() - } - return -} - -func (tracker *callTracker) finalizeWithBody(sn uint32, body []interface{}) { - tracker.lck.Lock() - c, ok := tracker.calls[sn] - if ok { - delete(tracker.calls, sn) - } - tracker.lck.Unlock() - if ok { - c.Body = body - c.done() - } - return -} - -func (tracker *callTracker) finalizeWithError(sn uint32, err error) { - tracker.lck.Lock() - c, ok := tracker.calls[sn] - if ok { - delete(tracker.calls, sn) - } - tracker.lck.Unlock() - if ok { - c.Err = err - c.done() - } - return -} - -func (tracker *callTracker) finalizeAllWithError(err error) { - tracker.lck.Lock() - closedCalls := make([]*Call, 0, len(tracker.calls)) - for sn := range tracker.calls { - closedCalls = append(closedCalls, tracker.calls[sn]) - } - tracker.calls = map[uint32]*Call{} - tracker.lck.Unlock() - for _, call := range closedCalls { - call.Err = err - call.done() - } -} diff --git a/vendor/github.com/godbus/dbus/conn_darwin.go b/vendor/github.com/godbus/dbus/conn_darwin.go deleted file mode 100644 index 6e2e4020..00000000 --- a/vendor/github.com/godbus/dbus/conn_darwin.go +++ /dev/null @@ -1,37 +0,0 @@ -package dbus - -import ( - "errors" - "fmt" - "os" - "os/exec" -) - -const defaultSystemBusAddress = "unix:path=/opt/local/var/run/dbus/system_bus_socket" - -func getSessionBusPlatformAddress() (string, error) { - cmd := exec.Command("launchctl", "getenv", "DBUS_LAUNCHD_SESSION_BUS_SOCKET") - b, err := cmd.CombinedOutput() - - if err != nil { - return "", err - } - - if len(b) == 0 { - return "", errors.New("dbus: couldn't determine address of session bus") - } - - return "unix:path=" + string(b[:len(b)-1]), nil -} - -func getSystemBusPlatformAddress() string { - address := os.Getenv("DBUS_LAUNCHD_SESSION_BUS_SOCKET") - if address != "" { - return fmt.Sprintf("unix:path=%s", address) - } - return defaultSystemBusAddress -} - -func tryDiscoverDbusSessionBusAddress() string { - return "" -} diff --git a/vendor/github.com/godbus/dbus/conn_other.go b/vendor/github.com/godbus/dbus/conn_other.go deleted file mode 100644 index 289044a4..00000000 --- a/vendor/github.com/godbus/dbus/conn_other.go +++ /dev/null @@ -1,91 +0,0 @@ -// +build !darwin - -package dbus - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "os" - "os/exec" - "os/user" - "path" - "strings" -) - -func getSessionBusPlatformAddress() (string, error) { - cmd := exec.Command("dbus-launch") - b, err := cmd.CombinedOutput() - - if err != nil { - return "", err - } - - i := bytes.IndexByte(b, '=') - j := bytes.IndexByte(b, '\n') - - if i == -1 || j == -1 { - return "", errors.New("dbus: couldn't determine address of session bus") - } - - env, addr := string(b[0:i]), string(b[i+1:j]) - os.Setenv(env, addr) - - return addr, nil -} - -// tryDiscoverDbusSessionBusAddress tries to discover an existing dbus session -// and return the value of its DBUS_SESSION_BUS_ADDRESS. -// It tries different techniques employed by different operating systems, -// returning the first valid address it finds, or an empty string. -// -// * /run/user//bus if this exists, it *is* the bus socket. present on -// Ubuntu 18.04 -// * /run/user//dbus-session: if this exists, it can be parsed for the bus -// address. present on Ubuntu 16.04 -// -// See https://dbus.freedesktop.org/doc/dbus-launch.1.html -func tryDiscoverDbusSessionBusAddress() string { - if runtimeDirectory, err := getRuntimeDirectory(); err == nil { - - if runUserBusFile := path.Join(runtimeDirectory, "bus"); fileExists(runUserBusFile) { - // if /run/user//bus exists, that file itself - // *is* the unix socket, so return its path - return fmt.Sprintf("unix:path=%s", runUserBusFile) - } - if runUserSessionDbusFile := path.Join(runtimeDirectory, "dbus-session"); fileExists(runUserSessionDbusFile) { - // if /run/user//dbus-session exists, it's a - // text file // containing the address of the socket, e.g.: - // DBUS_SESSION_BUS_ADDRESS=unix:abstract=/tmp/dbus-E1c73yNqrG - - if f, err := ioutil.ReadFile(runUserSessionDbusFile); err == nil { - fileContent := string(f) - - prefix := "DBUS_SESSION_BUS_ADDRESS=" - - if strings.HasPrefix(fileContent, prefix) { - address := strings.TrimRight(strings.TrimPrefix(fileContent, prefix), "\n\r") - return address - } - } - } - } - return "" -} - -func getRuntimeDirectory() (string, error) { - if currentUser, err := user.Current(); err != nil { - return "", err - } else { - return fmt.Sprintf("/run/user/%s", currentUser.Uid), nil - } -} - -func fileExists(filename string) bool { - if _, err := os.Stat(filename); !os.IsNotExist(err) { - return true - } else { - return false - } -} diff --git a/vendor/github.com/godbus/dbus/conn_unix.go b/vendor/github.com/godbus/dbus/conn_unix.go deleted file mode 100644 index 4cba8ae8..00000000 --- a/vendor/github.com/godbus/dbus/conn_unix.go +++ /dev/null @@ -1,18 +0,0 @@ -//+build !windows,!solaris,!darwin - -package dbus - -import ( - "os" - "fmt" -) - -const defaultSystemBusAddress = "unix:path=/var/run/dbus/system_bus_socket" - -func getSystemBusPlatformAddress() string { - address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS") - if address != "" { - return fmt.Sprintf("unix:path=%s", address) - } - return defaultSystemBusAddress -} \ No newline at end of file diff --git a/vendor/github.com/godbus/dbus/conn_windows.go b/vendor/github.com/godbus/dbus/conn_windows.go deleted file mode 100644 index 4291e451..00000000 --- a/vendor/github.com/godbus/dbus/conn_windows.go +++ /dev/null @@ -1,15 +0,0 @@ -//+build windows - -package dbus - -import "os" - -const defaultSystemBusAddress = "tcp:host=127.0.0.1,port=12434" - -func getSystemBusPlatformAddress() string { - address := os.Getenv("DBUS_SYSTEM_BUS_ADDRESS") - if address != "" { - return address - } - return defaultSystemBusAddress -} diff --git a/vendor/github.com/godbus/dbus/dbus.go b/vendor/github.com/godbus/dbus/dbus.go deleted file mode 100644 index c6d0d3ce..00000000 --- a/vendor/github.com/godbus/dbus/dbus.go +++ /dev/null @@ -1,427 +0,0 @@ -package dbus - -import ( - "errors" - "fmt" - "reflect" - "strings" -) - -var ( - byteType = reflect.TypeOf(byte(0)) - boolType = reflect.TypeOf(false) - uint8Type = reflect.TypeOf(uint8(0)) - int16Type = reflect.TypeOf(int16(0)) - uint16Type = reflect.TypeOf(uint16(0)) - intType = reflect.TypeOf(int(0)) - uintType = reflect.TypeOf(uint(0)) - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) - stringType = reflect.TypeOf("") - signatureType = reflect.TypeOf(Signature{""}) - objectPathType = reflect.TypeOf(ObjectPath("")) - variantType = reflect.TypeOf(Variant{Signature{""}, nil}) - interfacesType = reflect.TypeOf([]interface{}{}) - interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() - unixFDType = reflect.TypeOf(UnixFD(0)) - unixFDIndexType = reflect.TypeOf(UnixFDIndex(0)) -) - -// An InvalidTypeError signals that a value which cannot be represented in the -// D-Bus wire format was passed to a function. -type InvalidTypeError struct { - Type reflect.Type -} - -func (e InvalidTypeError) Error() string { - return "dbus: invalid type " + e.Type.String() -} - -// Store copies the values contained in src to dest, which must be a slice of -// pointers. It converts slices of interfaces from src to corresponding structs -// in dest. An error is returned if the lengths of src and dest or the types of -// their elements don't match. -func Store(src []interface{}, dest ...interface{}) error { - if len(src) != len(dest) { - return errors.New("dbus.Store: length mismatch") - } - - for i := range src { - if err := storeInterfaces(src[i], dest[i]); err != nil { - return err - } - } - return nil -} - -func storeInterfaces(src, dest interface{}) error { - return store(reflect.ValueOf(dest), reflect.ValueOf(src)) -} - -func store(dest, src reflect.Value) error { - if dest.Kind() == reflect.Ptr { - return store(dest.Elem(), src) - } - switch src.Kind() { - case reflect.Slice: - return storeSlice(dest, src) - case reflect.Map: - return storeMap(dest, src) - default: - return storeBase(dest, src) - } -} - -func storeBase(dest, src reflect.Value) error { - return setDest(dest, src) -} - -func setDest(dest, src reflect.Value) error { - if !isVariant(src.Type()) && isVariant(dest.Type()) { - //special conversion for dbus.Variant - dest.Set(reflect.ValueOf(MakeVariant(src.Interface()))) - return nil - } - if isVariant(src.Type()) && !isVariant(dest.Type()) { - src = getVariantValue(src) - } - if !src.Type().ConvertibleTo(dest.Type()) { - return fmt.Errorf( - "dbus.Store: type mismatch: cannot convert %s to %s", - src.Type(), dest.Type()) - } - dest.Set(src.Convert(dest.Type())) - return nil -} - -func kindsAreCompatible(dest, src reflect.Type) bool { - switch { - case isVariant(dest): - return true - case dest.Kind() == reflect.Interface: - return true - default: - return dest.Kind() == src.Kind() - } -} - -func isConvertibleTo(dest, src reflect.Type) bool { - switch { - case isVariant(dest): - return true - case dest.Kind() == reflect.Interface: - return true - case dest.Kind() == reflect.Slice: - return src.Kind() == reflect.Slice && - isConvertibleTo(dest.Elem(), src.Elem()) - case dest.Kind() == reflect.Struct: - return src == interfacesType - default: - return src.ConvertibleTo(dest) - } -} - -func storeMap(dest, src reflect.Value) error { - switch { - case !kindsAreCompatible(dest.Type(), src.Type()): - return fmt.Errorf( - "dbus.Store: type mismatch: "+ - "map: cannot store a value of %s into %s", - src.Type(), dest.Type()) - case isVariant(dest.Type()): - return storeMapIntoVariant(dest, src) - case dest.Kind() == reflect.Interface: - return storeMapIntoInterface(dest, src) - case isConvertibleTo(dest.Type().Key(), src.Type().Key()) && - isConvertibleTo(dest.Type().Elem(), src.Type().Elem()): - return storeMapIntoMap(dest, src) - default: - return fmt.Errorf( - "dbus.Store: type mismatch: "+ - "map: cannot convert a value of %s into %s", - src.Type(), dest.Type()) - } -} - -func storeMapIntoVariant(dest, src reflect.Value) error { - dv := reflect.MakeMap(src.Type()) - err := store(dv, src) - if err != nil { - return err - } - return storeBase(dest, dv) -} - -func storeMapIntoInterface(dest, src reflect.Value) error { - var dv reflect.Value - if isVariant(src.Type().Elem()) { - //Convert variants to interface{} recursively when converting - //to interface{} - dv = reflect.MakeMap( - reflect.MapOf(src.Type().Key(), interfaceType)) - } else { - dv = reflect.MakeMap(src.Type()) - } - err := store(dv, src) - if err != nil { - return err - } - return storeBase(dest, dv) -} - -func storeMapIntoMap(dest, src reflect.Value) error { - if dest.IsNil() { - dest.Set(reflect.MakeMap(dest.Type())) - } - keys := src.MapKeys() - for _, key := range keys { - dkey := key.Convert(dest.Type().Key()) - dval := reflect.New(dest.Type().Elem()).Elem() - err := store(dval, getVariantValue(src.MapIndex(key))) - if err != nil { - return err - } - dest.SetMapIndex(dkey, dval) - } - return nil -} - -func storeSlice(dest, src reflect.Value) error { - switch { - case src.Type() == interfacesType && dest.Kind() == reflect.Struct: - //The decoder always decodes structs as slices of interface{} - return storeStruct(dest, src) - case !kindsAreCompatible(dest.Type(), src.Type()): - return fmt.Errorf( - "dbus.Store: type mismatch: "+ - "slice: cannot store a value of %s into %s", - src.Type(), dest.Type()) - case isVariant(dest.Type()): - return storeSliceIntoVariant(dest, src) - case dest.Kind() == reflect.Interface: - return storeSliceIntoInterface(dest, src) - case isConvertibleTo(dest.Type().Elem(), src.Type().Elem()): - return storeSliceIntoSlice(dest, src) - default: - return fmt.Errorf( - "dbus.Store: type mismatch: "+ - "slice: cannot convert a value of %s into %s", - src.Type(), dest.Type()) - } -} - -func storeStruct(dest, src reflect.Value) error { - if isVariant(dest.Type()) { - return storeBase(dest, src) - } - dval := make([]interface{}, 0, dest.NumField()) - dtype := dest.Type() - for i := 0; i < dest.NumField(); i++ { - field := dest.Field(i) - ftype := dtype.Field(i) - if ftype.PkgPath != "" { - continue - } - if ftype.Tag.Get("dbus") == "-" { - continue - } - dval = append(dval, field.Addr().Interface()) - } - if src.Len() != len(dval) { - return fmt.Errorf( - "dbus.Store: type mismatch: "+ - "destination struct does not have "+ - "enough fields need: %d have: %d", - src.Len(), len(dval)) - } - return Store(src.Interface().([]interface{}), dval...) -} - -func storeSliceIntoVariant(dest, src reflect.Value) error { - dv := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) - err := store(dv, src) - if err != nil { - return err - } - return storeBase(dest, dv) -} - -func storeSliceIntoInterface(dest, src reflect.Value) error { - var dv reflect.Value - if isVariant(src.Type().Elem()) { - //Convert variants to interface{} recursively when converting - //to interface{} - dv = reflect.MakeSlice(reflect.SliceOf(interfaceType), - src.Len(), src.Cap()) - } else { - dv = reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) - } - err := store(dv, src) - if err != nil { - return err - } - return storeBase(dest, dv) -} - -func storeSliceIntoSlice(dest, src reflect.Value) error { - if dest.IsNil() || dest.Len() < src.Len() { - dest.Set(reflect.MakeSlice(dest.Type(), src.Len(), src.Cap())) - } - if dest.Len() != src.Len() { - return fmt.Errorf( - "dbus.Store: type mismatch: "+ - "slices are different lengths "+ - "need: %d have: %d", - src.Len(), dest.Len()) - } - for i := 0; i < src.Len(); i++ { - err := store(dest.Index(i), getVariantValue(src.Index(i))) - if err != nil { - return err - } - } - return nil -} - -func getVariantValue(in reflect.Value) reflect.Value { - if isVariant(in.Type()) { - return reflect.ValueOf(in.Interface().(Variant).Value()) - } - return in -} - -func isVariant(t reflect.Type) bool { - return t == variantType -} - -// An ObjectPath is an object path as defined by the D-Bus spec. -type ObjectPath string - -// IsValid returns whether the object path is valid. -func (o ObjectPath) IsValid() bool { - s := string(o) - if len(s) == 0 { - return false - } - if s[0] != '/' { - return false - } - if s[len(s)-1] == '/' && len(s) != 1 { - return false - } - // probably not used, but technically possible - if s == "/" { - return true - } - split := strings.Split(s[1:], "/") - for _, v := range split { - if len(v) == 0 { - return false - } - for _, c := range v { - if !isMemberChar(c) { - return false - } - } - } - return true -} - -// A UnixFD is a Unix file descriptor sent over the wire. See the package-level -// documentation for more information about Unix file descriptor passsing. -type UnixFD int32 - -// A UnixFDIndex is the representation of a Unix file descriptor in a message. -type UnixFDIndex uint32 - -// alignment returns the alignment of values of type t. -func alignment(t reflect.Type) int { - switch t { - case variantType: - return 1 - case objectPathType: - return 4 - case signatureType: - return 1 - case interfacesType: - return 4 - } - switch t.Kind() { - case reflect.Uint8: - return 1 - case reflect.Uint16, reflect.Int16: - return 2 - case reflect.Uint, reflect.Int, reflect.Uint32, reflect.Int32, reflect.String, reflect.Array, reflect.Slice, reflect.Map: - return 4 - case reflect.Uint64, reflect.Int64, reflect.Float64, reflect.Struct: - return 8 - case reflect.Ptr: - return alignment(t.Elem()) - } - return 1 -} - -// isKeyType returns whether t is a valid type for a D-Bus dict. -func isKeyType(t reflect.Type) bool { - switch t.Kind() { - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float64, - reflect.String, reflect.Uint, reflect.Int: - - return true - } - return false -} - -// isValidInterface returns whether s is a valid name for an interface. -func isValidInterface(s string) bool { - if len(s) == 0 || len(s) > 255 || s[0] == '.' { - return false - } - elem := strings.Split(s, ".") - if len(elem) < 2 { - return false - } - for _, v := range elem { - if len(v) == 0 { - return false - } - if v[0] >= '0' && v[0] <= '9' { - return false - } - for _, c := range v { - if !isMemberChar(c) { - return false - } - } - } - return true -} - -// isValidMember returns whether s is a valid name for a member. -func isValidMember(s string) bool { - if len(s) == 0 || len(s) > 255 { - return false - } - i := strings.Index(s, ".") - if i != -1 { - return false - } - if s[0] >= '0' && s[0] <= '9' { - return false - } - for _, c := range s { - if !isMemberChar(c) { - return false - } - } - return true -} - -func isMemberChar(c rune) bool { - return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z') || - (c >= 'a' && c <= 'z') || c == '_' -} diff --git a/vendor/github.com/godbus/dbus/decoder.go b/vendor/github.com/godbus/dbus/decoder.go deleted file mode 100644 index 5c27d3b5..00000000 --- a/vendor/github.com/godbus/dbus/decoder.go +++ /dev/null @@ -1,235 +0,0 @@ -package dbus - -import ( - "encoding/binary" - "io" - "reflect" -) - -type decoder struct { - in io.Reader - order binary.ByteOrder - pos int -} - -// newDecoder returns a new decoder that reads values from in. The input is -// expected to be in the given byte order. -func newDecoder(in io.Reader, order binary.ByteOrder) *decoder { - dec := new(decoder) - dec.in = in - dec.order = order - return dec -} - -// align aligns the input to the given boundary and panics on error. -func (dec *decoder) align(n int) { - if dec.pos%n != 0 { - newpos := (dec.pos + n - 1) & ^(n - 1) - empty := make([]byte, newpos-dec.pos) - if _, err := io.ReadFull(dec.in, empty); err != nil { - panic(err) - } - dec.pos = newpos - } -} - -// Calls binary.Read(dec.in, dec.order, v) and panics on read errors. -func (dec *decoder) binread(v interface{}) { - if err := binary.Read(dec.in, dec.order, v); err != nil { - panic(err) - } -} - -func (dec *decoder) Decode(sig Signature) (vs []interface{}, err error) { - defer func() { - var ok bool - v := recover() - if err, ok = v.(error); ok { - if err == io.EOF || err == io.ErrUnexpectedEOF { - err = FormatError("unexpected EOF") - } - } - }() - vs = make([]interface{}, 0) - s := sig.str - for s != "" { - err, rem := validSingle(s, 0) - if err != nil { - return nil, err - } - v := dec.decode(s[:len(s)-len(rem)], 0) - vs = append(vs, v) - s = rem - } - return vs, nil -} - -func (dec *decoder) decode(s string, depth int) interface{} { - dec.align(alignment(typeFor(s))) - switch s[0] { - case 'y': - var b [1]byte - if _, err := dec.in.Read(b[:]); err != nil { - panic(err) - } - dec.pos++ - return b[0] - case 'b': - i := dec.decode("u", depth).(uint32) - switch { - case i == 0: - return false - case i == 1: - return true - default: - panic(FormatError("invalid value for boolean")) - } - case 'n': - var i int16 - dec.binread(&i) - dec.pos += 2 - return i - case 'i': - var i int32 - dec.binread(&i) - dec.pos += 4 - return i - case 'x': - var i int64 - dec.binread(&i) - dec.pos += 8 - return i - case 'q': - var i uint16 - dec.binread(&i) - dec.pos += 2 - return i - case 'u': - var i uint32 - dec.binread(&i) - dec.pos += 4 - return i - case 't': - var i uint64 - dec.binread(&i) - dec.pos += 8 - return i - case 'd': - var f float64 - dec.binread(&f) - dec.pos += 8 - return f - case 's': - length := dec.decode("u", depth).(uint32) - b := make([]byte, int(length)+1) - if _, err := io.ReadFull(dec.in, b); err != nil { - panic(err) - } - dec.pos += int(length) + 1 - return string(b[:len(b)-1]) - case 'o': - return ObjectPath(dec.decode("s", depth).(string)) - case 'g': - length := dec.decode("y", depth).(byte) - b := make([]byte, int(length)+1) - if _, err := io.ReadFull(dec.in, b); err != nil { - panic(err) - } - dec.pos += int(length) + 1 - sig, err := ParseSignature(string(b[:len(b)-1])) - if err != nil { - panic(err) - } - return sig - case 'v': - if depth >= 64 { - panic(FormatError("input exceeds container depth limit")) - } - var variant Variant - sig := dec.decode("g", depth).(Signature) - if len(sig.str) == 0 { - panic(FormatError("variant signature is empty")) - } - err, rem := validSingle(sig.str, 0) - if err != nil { - panic(err) - } - if rem != "" { - panic(FormatError("variant signature has multiple types")) - } - variant.sig = sig - variant.value = dec.decode(sig.str, depth+1) - return variant - case 'h': - return UnixFDIndex(dec.decode("u", depth).(uint32)) - case 'a': - if len(s) > 1 && s[1] == '{' { - ksig := s[2:3] - vsig := s[3 : len(s)-1] - v := reflect.MakeMap(reflect.MapOf(typeFor(ksig), typeFor(vsig))) - if depth >= 63 { - panic(FormatError("input exceeds container depth limit")) - } - length := dec.decode("u", depth).(uint32) - // Even for empty maps, the correct padding must be included - dec.align(8) - spos := dec.pos - for dec.pos < spos+int(length) { - dec.align(8) - if !isKeyType(v.Type().Key()) { - panic(InvalidTypeError{v.Type()}) - } - kv := dec.decode(ksig, depth+2) - vv := dec.decode(vsig, depth+2) - v.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv)) - } - return v.Interface() - } - if depth >= 64 { - panic(FormatError("input exceeds container depth limit")) - } - length := dec.decode("u", depth).(uint32) - v := reflect.MakeSlice(reflect.SliceOf(typeFor(s[1:])), 0, int(length)) - // Even for empty arrays, the correct padding must be included - align := alignment(typeFor(s[1:])) - if len(s) > 1 && s[1] == '(' { - //Special case for arrays of structs - //structs decode as a slice of interface{} values - //but the dbus alignment does not match this - align = 8 - } - dec.align(align) - spos := dec.pos - for dec.pos < spos+int(length) { - ev := dec.decode(s[1:], depth+1) - v = reflect.Append(v, reflect.ValueOf(ev)) - } - return v.Interface() - case '(': - if depth >= 64 { - panic(FormatError("input exceeds container depth limit")) - } - dec.align(8) - v := make([]interface{}, 0) - s = s[1 : len(s)-1] - for s != "" { - err, rem := validSingle(s, 0) - if err != nil { - panic(err) - } - ev := dec.decode(s[:len(s)-len(rem)], depth+1) - v = append(v, ev) - s = rem - } - return v - default: - panic(SignatureError{Sig: s}) - } -} - -// A FormatError is an error in the wire format. -type FormatError string - -func (e FormatError) Error() string { - return "dbus: wire format error: " + string(e) -} diff --git a/vendor/github.com/godbus/dbus/default_handler.go b/vendor/github.com/godbus/dbus/default_handler.go deleted file mode 100644 index 81dbcc7e..00000000 --- a/vendor/github.com/godbus/dbus/default_handler.go +++ /dev/null @@ -1,321 +0,0 @@ -package dbus - -import ( - "bytes" - "reflect" - "strings" - "sync" -) - -func newIntrospectIntf(h *defaultHandler) *exportedIntf { - methods := make(map[string]Method) - methods["Introspect"] = exportedMethod{ - reflect.ValueOf(func(msg Message) (string, *Error) { - path := msg.Headers[FieldPath].value.(ObjectPath) - return h.introspectPath(path), nil - }), - } - return newExportedIntf(methods, true) -} - -//NewDefaultHandler returns an instance of the default -//call handler. This is useful if you want to implement only -//one of the two handlers but not both. -// -// Deprecated: this is the default value, don't use it, it will be unexported. -func NewDefaultHandler() *defaultHandler { - h := &defaultHandler{ - objects: make(map[ObjectPath]*exportedObj), - defaultIntf: make(map[string]*exportedIntf), - } - h.defaultIntf["org.freedesktop.DBus.Introspectable"] = newIntrospectIntf(h) - return h -} - -type defaultHandler struct { - sync.RWMutex - objects map[ObjectPath]*exportedObj - defaultIntf map[string]*exportedIntf -} - -func (h *defaultHandler) PathExists(path ObjectPath) bool { - _, ok := h.objects[path] - return ok -} - -func (h *defaultHandler) introspectPath(path ObjectPath) string { - subpath := make(map[string]struct{}) - var xml bytes.Buffer - xml.WriteString("") - for obj, _ := range h.objects { - p := string(path) - if p != "/" { - p += "/" - } - if strings.HasPrefix(string(obj), p) { - node_name := strings.Split(string(obj[len(p):]), "/")[0] - subpath[node_name] = struct{}{} - } - } - for s, _ := range subpath { - xml.WriteString("\n\t") - } - xml.WriteString("\n") - return xml.String() -} - -func (h *defaultHandler) LookupObject(path ObjectPath) (ServerObject, bool) { - h.RLock() - defer h.RUnlock() - object, ok := h.objects[path] - if ok { - return object, ok - } - - // If an object wasn't found for this exact path, - // look for a matching subtree registration - subtreeObject := newExportedObject() - path = path[:strings.LastIndex(string(path), "/")] - for len(path) > 0 { - object, ok = h.objects[path] - if ok { - for name, iface := range object.interfaces { - // Only include this handler if it registered for the subtree - if iface.isFallbackInterface() { - subtreeObject.interfaces[name] = iface - } - } - break - } - - path = path[:strings.LastIndex(string(path), "/")] - } - - for name, intf := range h.defaultIntf { - if _, exists := subtreeObject.interfaces[name]; exists { - continue - } - subtreeObject.interfaces[name] = intf - } - - return subtreeObject, true -} - -func (h *defaultHandler) AddObject(path ObjectPath, object *exportedObj) { - h.Lock() - h.objects[path] = object - h.Unlock() -} - -func (h *defaultHandler) DeleteObject(path ObjectPath) { - h.Lock() - delete(h.objects, path) - h.Unlock() -} - -type exportedMethod struct { - reflect.Value -} - -func (m exportedMethod) Call(args ...interface{}) ([]interface{}, error) { - t := m.Type() - - params := make([]reflect.Value, len(args)) - for i := 0; i < len(args); i++ { - params[i] = reflect.ValueOf(args[i]).Elem() - } - - ret := m.Value.Call(params) - - err := ret[t.NumOut()-1].Interface().(*Error) - ret = ret[:t.NumOut()-1] - out := make([]interface{}, len(ret)) - for i, val := range ret { - out[i] = val.Interface() - } - if err == nil { - //concrete type to interface nil is a special case - return out, nil - } - return out, err -} - -func (m exportedMethod) NumArguments() int { - return m.Value.Type().NumIn() -} - -func (m exportedMethod) ArgumentValue(i int) interface{} { - return reflect.Zero(m.Type().In(i)).Interface() -} - -func (m exportedMethod) NumReturns() int { - return m.Value.Type().NumOut() -} - -func (m exportedMethod) ReturnValue(i int) interface{} { - return reflect.Zero(m.Type().Out(i)).Interface() -} - -func newExportedObject() *exportedObj { - return &exportedObj{ - interfaces: make(map[string]*exportedIntf), - } -} - -type exportedObj struct { - mu sync.RWMutex - interfaces map[string]*exportedIntf -} - -func (obj *exportedObj) LookupInterface(name string) (Interface, bool) { - if name == "" { - return obj, true - } - obj.mu.RLock() - defer obj.mu.RUnlock() - intf, exists := obj.interfaces[name] - return intf, exists -} - -func (obj *exportedObj) AddInterface(name string, iface *exportedIntf) { - obj.mu.Lock() - defer obj.mu.Unlock() - obj.interfaces[name] = iface -} - -func (obj *exportedObj) DeleteInterface(name string) { - obj.mu.Lock() - defer obj.mu.Unlock() - delete(obj.interfaces, name) -} - -func (obj *exportedObj) LookupMethod(name string) (Method, bool) { - obj.mu.RLock() - defer obj.mu.RUnlock() - for _, intf := range obj.interfaces { - method, exists := intf.LookupMethod(name) - if exists { - return method, exists - } - } - return nil, false -} - -func (obj *exportedObj) isFallbackInterface() bool { - return false -} - -func newExportedIntf(methods map[string]Method, includeSubtree bool) *exportedIntf { - return &exportedIntf{ - methods: methods, - includeSubtree: includeSubtree, - } -} - -type exportedIntf struct { - methods map[string]Method - - // Whether or not this export is for the entire subtree - includeSubtree bool -} - -func (obj *exportedIntf) LookupMethod(name string) (Method, bool) { - out, exists := obj.methods[name] - return out, exists -} - -func (obj *exportedIntf) isFallbackInterface() bool { - return obj.includeSubtree -} - -//NewDefaultSignalHandler returns an instance of the default -//signal handler. This is useful if you want to implement only -//one of the two handlers but not both. -// -// Deprecated: this is the default value, don't use it, it will be unexported. -func NewDefaultSignalHandler() *defaultSignalHandler { - return &defaultSignalHandler{ - closeChan: make(chan struct{}), - } -} - -func isDefaultSignalHandler(handler SignalHandler) bool { - _, ok := handler.(*defaultSignalHandler) - return ok -} - -type defaultSignalHandler struct { - sync.RWMutex - closed bool - signals []chan<- *Signal - closeChan chan struct{} -} - -func (sh *defaultSignalHandler) DeliverSignal(intf, name string, signal *Signal) { - sh.RLock() - defer sh.RUnlock() - if sh.closed { - return - } - for _, ch := range sh.signals { - select { - case ch <- signal: - case <-sh.closeChan: - return - default: - go func() { - select { - case ch <- signal: - case <-sh.closeChan: - return - } - }() - } - } -} - -func (sh *defaultSignalHandler) Init() error { - sh.Lock() - sh.signals = make([]chan<- *Signal, 0) - sh.closeChan = make(chan struct{}) - sh.Unlock() - return nil -} - -func (sh *defaultSignalHandler) Terminate() { - sh.Lock() - if !sh.closed { - close(sh.closeChan) - } - sh.closed = true - for _, ch := range sh.signals { - close(ch) - } - sh.signals = nil - sh.Unlock() -} - -func (sh *defaultSignalHandler) addSignal(ch chan<- *Signal) { - sh.Lock() - defer sh.Unlock() - if sh.closed { - return - } - sh.signals = append(sh.signals, ch) - -} - -func (sh *defaultSignalHandler) removeSignal(ch chan<- *Signal) { - sh.Lock() - defer sh.Unlock() - if sh.closed { - return - } - for i := len(sh.signals) - 1; i >= 0; i-- { - if ch == sh.signals[i] { - copy(sh.signals[i:], sh.signals[i+1:]) - sh.signals[len(sh.signals)-1] = nil - sh.signals = sh.signals[:len(sh.signals)-1] - } - } -} diff --git a/vendor/github.com/godbus/dbus/doc.go b/vendor/github.com/godbus/dbus/doc.go deleted file mode 100644 index 895036a8..00000000 --- a/vendor/github.com/godbus/dbus/doc.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Package dbus implements bindings to the D-Bus message bus system. - -To use the message bus API, you first need to connect to a bus (usually the -session or system bus). The acquired connection then can be used to call methods -on remote objects and emit or receive signals. Using the Export method, you can -arrange D-Bus methods calls to be directly translated to method calls on a Go -value. - -Conversion Rules - -For outgoing messages, Go types are automatically converted to the -corresponding D-Bus types. The following types are directly encoded as their -respective D-Bus equivalents: - - Go type | D-Bus type - ------------+----------- - byte | BYTE - bool | BOOLEAN - int16 | INT16 - uint16 | UINT16 - int | INT32 - uint | UINT32 - int32 | INT32 - uint32 | UINT32 - int64 | INT64 - uint64 | UINT64 - float64 | DOUBLE - string | STRING - ObjectPath | OBJECT_PATH - Signature | SIGNATURE - Variant | VARIANT - interface{} | VARIANT - UnixFDIndex | UNIX_FD - -Slices and arrays encode as ARRAYs of their element type. - -Maps encode as DICTs, provided that their key type can be used as a key for -a DICT. - -Structs other than Variant and Signature encode as a STRUCT containing their -exported fields. Fields whose tags contain `dbus:"-"` and unexported fields will -be skipped. - -Pointers encode as the value they're pointed to. - -Types convertible to one of the base types above will be mapped as the -base type. - -Trying to encode any other type or a slice, map or struct containing an -unsupported type will result in an InvalidTypeError. - -For incoming messages, the inverse of these rules are used, with the exception -of STRUCTs. Incoming STRUCTS are represented as a slice of empty interfaces -containing the struct fields in the correct order. The Store function can be -used to convert such values to Go structs. - -Unix FD passing - -Handling Unix file descriptors deserves special mention. To use them, you should -first check that they are supported on a connection by calling SupportsUnixFDs. -If it returns true, all method of Connection will translate messages containing -UnixFD's to messages that are accompanied by the given file descriptors with the -UnixFD values being substituted by the correct indices. Similarily, the indices -of incoming messages are automatically resolved. It shouldn't be necessary to use -UnixFDIndex. - -*/ -package dbus diff --git a/vendor/github.com/godbus/dbus/encoder.go b/vendor/github.com/godbus/dbus/encoder.go deleted file mode 100644 index 8bb71776..00000000 --- a/vendor/github.com/godbus/dbus/encoder.go +++ /dev/null @@ -1,210 +0,0 @@ -package dbus - -import ( - "bytes" - "encoding/binary" - "io" - "reflect" -) - -// An encoder encodes values to the D-Bus wire format. -type encoder struct { - out io.Writer - order binary.ByteOrder - pos int -} - -// NewEncoder returns a new encoder that writes to out in the given byte order. -func newEncoder(out io.Writer, order binary.ByteOrder) *encoder { - return newEncoderAtOffset(out, 0, order) -} - -// newEncoderAtOffset returns a new encoder that writes to out in the given -// byte order. Specify the offset to initialize pos for proper alignment -// computation. -func newEncoderAtOffset(out io.Writer, offset int, order binary.ByteOrder) *encoder { - enc := new(encoder) - enc.out = out - enc.order = order - enc.pos = offset - return enc -} - -// Aligns the next output to be on a multiple of n. Panics on write errors. -func (enc *encoder) align(n int) { - pad := enc.padding(0, n) - if pad > 0 { - empty := make([]byte, pad) - if _, err := enc.out.Write(empty); err != nil { - panic(err) - } - enc.pos += pad - } -} - -// pad returns the number of bytes of padding, based on current position and additional offset. -// and alignment. -func (enc *encoder) padding(offset, algn int) int { - abs := enc.pos + offset - if abs%algn != 0 { - newabs := (abs + algn - 1) & ^(algn - 1) - return newabs - abs - } - return 0 -} - -// Calls binary.Write(enc.out, enc.order, v) and panics on write errors. -func (enc *encoder) binwrite(v interface{}) { - if err := binary.Write(enc.out, enc.order, v); err != nil { - panic(err) - } -} - -// Encode encodes the given values to the underyling reader. All written values -// are aligned properly as required by the D-Bus spec. -func (enc *encoder) Encode(vs ...interface{}) (err error) { - defer func() { - err, _ = recover().(error) - }() - for _, v := range vs { - enc.encode(reflect.ValueOf(v), 0) - } - return nil -} - -// encode encodes the given value to the writer and panics on error. depth holds -// the depth of the container nesting. -func (enc *encoder) encode(v reflect.Value, depth int) { - enc.align(alignment(v.Type())) - switch v.Kind() { - case reflect.Uint8: - var b [1]byte - b[0] = byte(v.Uint()) - if _, err := enc.out.Write(b[:]); err != nil { - panic(err) - } - enc.pos++ - case reflect.Bool: - if v.Bool() { - enc.encode(reflect.ValueOf(uint32(1)), depth) - } else { - enc.encode(reflect.ValueOf(uint32(0)), depth) - } - case reflect.Int16: - enc.binwrite(int16(v.Int())) - enc.pos += 2 - case reflect.Uint16: - enc.binwrite(uint16(v.Uint())) - enc.pos += 2 - case reflect.Int, reflect.Int32: - enc.binwrite(int32(v.Int())) - enc.pos += 4 - case reflect.Uint, reflect.Uint32: - enc.binwrite(uint32(v.Uint())) - enc.pos += 4 - case reflect.Int64: - enc.binwrite(v.Int()) - enc.pos += 8 - case reflect.Uint64: - enc.binwrite(v.Uint()) - enc.pos += 8 - case reflect.Float64: - enc.binwrite(v.Float()) - enc.pos += 8 - case reflect.String: - enc.encode(reflect.ValueOf(uint32(len(v.String()))), depth) - b := make([]byte, v.Len()+1) - copy(b, v.String()) - b[len(b)-1] = 0 - n, err := enc.out.Write(b) - if err != nil { - panic(err) - } - enc.pos += n - case reflect.Ptr: - enc.encode(v.Elem(), depth) - case reflect.Slice, reflect.Array: - if depth >= 64 { - panic(FormatError("input exceeds container depth limit")) - } - // Lookahead offset: 4 bytes for uint32 length (with alignment), - // plus alignment for elements. - n := enc.padding(0, 4) + 4 - offset := enc.pos + n + enc.padding(n, alignment(v.Type().Elem())) - - var buf bytes.Buffer - bufenc := newEncoderAtOffset(&buf, offset, enc.order) - - for i := 0; i < v.Len(); i++ { - bufenc.encode(v.Index(i), depth+1) - } - enc.encode(reflect.ValueOf(uint32(buf.Len())), depth) - length := buf.Len() - enc.align(alignment(v.Type().Elem())) - if _, err := buf.WriteTo(enc.out); err != nil { - panic(err) - } - enc.pos += length - case reflect.Struct: - if depth >= 64 && v.Type() != signatureType { - panic(FormatError("input exceeds container depth limit")) - } - switch t := v.Type(); t { - case signatureType: - str := v.Field(0) - enc.encode(reflect.ValueOf(byte(str.Len())), depth+1) - b := make([]byte, str.Len()+1) - copy(b, str.String()) - b[len(b)-1] = 0 - n, err := enc.out.Write(b) - if err != nil { - panic(err) - } - enc.pos += n - case variantType: - variant := v.Interface().(Variant) - enc.encode(reflect.ValueOf(variant.sig), depth+1) - enc.encode(reflect.ValueOf(variant.value), depth+1) - default: - for i := 0; i < v.Type().NumField(); i++ { - field := t.Field(i) - if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { - enc.encode(v.Field(i), depth+1) - } - } - } - case reflect.Map: - // Maps are arrays of structures, so they actually increase the depth by - // 2. - if depth >= 63 { - panic(FormatError("input exceeds container depth limit")) - } - if !isKeyType(v.Type().Key()) { - panic(InvalidTypeError{v.Type()}) - } - keys := v.MapKeys() - // Lookahead offset: 4 bytes for uint32 length (with alignment), - // plus 8-byte alignment - n := enc.padding(0, 4) + 4 - offset := enc.pos + n + enc.padding(n, 8) - - var buf bytes.Buffer - bufenc := newEncoderAtOffset(&buf, offset, enc.order) - for _, k := range keys { - bufenc.align(8) - bufenc.encode(k, depth+2) - bufenc.encode(v.MapIndex(k), depth+2) - } - enc.encode(reflect.ValueOf(uint32(buf.Len())), depth) - length := buf.Len() - enc.align(8) - if _, err := buf.WriteTo(enc.out); err != nil { - panic(err) - } - enc.pos += length - case reflect.Interface: - enc.encode(reflect.ValueOf(MakeVariant(v.Interface())), depth) - default: - panic(InvalidTypeError{v.Type()}) - } -} diff --git a/vendor/github.com/godbus/dbus/export.go b/vendor/github.com/godbus/dbus/export.go deleted file mode 100644 index 95d0e295..00000000 --- a/vendor/github.com/godbus/dbus/export.go +++ /dev/null @@ -1,412 +0,0 @@ -package dbus - -import ( - "errors" - "fmt" - "reflect" - "strings" -) - -var ( - ErrMsgInvalidArg = Error{ - "org.freedesktop.DBus.Error.InvalidArgs", - []interface{}{"Invalid type / number of args"}, - } - ErrMsgNoObject = Error{ - "org.freedesktop.DBus.Error.NoSuchObject", - []interface{}{"No such object"}, - } - ErrMsgUnknownMethod = Error{ - "org.freedesktop.DBus.Error.UnknownMethod", - []interface{}{"Unknown / invalid method"}, - } - ErrMsgUnknownInterface = Error{ - "org.freedesktop.DBus.Error.UnknownInterface", - []interface{}{"Object does not implement the interface"}, - } -) - -func MakeFailedError(err error) *Error { - return &Error{ - "org.freedesktop.DBus.Error.Failed", - []interface{}{err.Error()}, - } -} - -// Sender is a type which can be used in exported methods to receive the message -// sender. -type Sender string - -func computeMethodName(name string, mapping map[string]string) string { - newname, ok := mapping[name] - if ok { - name = newname - } - return name -} - -func getMethods(in interface{}, mapping map[string]string) map[string]reflect.Value { - if in == nil { - return nil - } - methods := make(map[string]reflect.Value) - val := reflect.ValueOf(in) - typ := val.Type() - for i := 0; i < typ.NumMethod(); i++ { - methtype := typ.Method(i) - method := val.Method(i) - t := method.Type() - // only track valid methods must return *Error as last arg - // and must be exported - if t.NumOut() == 0 || - t.Out(t.NumOut()-1) != reflect.TypeOf(&ErrMsgInvalidArg) || - methtype.PkgPath != "" { - continue - } - // map names while building table - methods[computeMethodName(methtype.Name, mapping)] = method - } - return methods -} - -func standardMethodArgumentDecode(m Method, sender string, msg *Message, body []interface{}) ([]interface{}, error) { - pointers := make([]interface{}, m.NumArguments()) - decode := make([]interface{}, 0, len(body)) - - for i := 0; i < m.NumArguments(); i++ { - tp := reflect.TypeOf(m.ArgumentValue(i)) - val := reflect.New(tp) - pointers[i] = val.Interface() - if tp == reflect.TypeOf((*Sender)(nil)).Elem() { - val.Elem().SetString(sender) - } else if tp == reflect.TypeOf((*Message)(nil)).Elem() { - val.Elem().Set(reflect.ValueOf(*msg)) - } else { - decode = append(decode, pointers[i]) - } - } - - if len(decode) != len(body) { - return nil, ErrMsgInvalidArg - } - - if err := Store(body, decode...); err != nil { - return nil, ErrMsgInvalidArg - } - - return pointers, nil -} - -func (conn *Conn) decodeArguments(m Method, sender string, msg *Message) ([]interface{}, error) { - if decoder, ok := m.(ArgumentDecoder); ok { - return decoder.DecodeArguments(conn, sender, msg, msg.Body) - } - return standardMethodArgumentDecode(m, sender, msg, msg.Body) -} - -// handleCall handles the given method call (i.e. looks if it's one of the -// pre-implemented ones and searches for a corresponding handler if not). -func (conn *Conn) handleCall(msg *Message) { - name := msg.Headers[FieldMember].value.(string) - path := msg.Headers[FieldPath].value.(ObjectPath) - ifaceName, _ := msg.Headers[FieldInterface].value.(string) - sender, hasSender := msg.Headers[FieldSender].value.(string) - serial := msg.serial - if ifaceName == "org.freedesktop.DBus.Peer" { - switch name { - case "Ping": - conn.sendReply(sender, serial) - case "GetMachineId": - conn.sendReply(sender, serial, conn.uuid) - default: - conn.sendError(ErrMsgUnknownMethod, sender, serial) - } - return - } - if len(name) == 0 { - conn.sendError(ErrMsgUnknownMethod, sender, serial) - } - - object, ok := conn.handler.LookupObject(path) - if !ok { - conn.sendError(ErrMsgNoObject, sender, serial) - return - } - - iface, exists := object.LookupInterface(ifaceName) - if !exists { - conn.sendError(ErrMsgUnknownInterface, sender, serial) - return - } - - m, exists := iface.LookupMethod(name) - if !exists { - conn.sendError(ErrMsgUnknownMethod, sender, serial) - return - } - args, err := conn.decodeArguments(m, sender, msg) - if err != nil { - conn.sendError(err, sender, serial) - return - } - - ret, err := m.Call(args...) - if err != nil { - conn.sendError(err, sender, serial) - return - } - - if msg.Flags&FlagNoReplyExpected == 0 { - reply := new(Message) - reply.Type = TypeMethodReply - reply.serial = conn.getSerial() - reply.Headers = make(map[HeaderField]Variant) - if hasSender { - reply.Headers[FieldDestination] = msg.Headers[FieldSender] - } - reply.Headers[FieldReplySerial] = MakeVariant(msg.serial) - reply.Body = make([]interface{}, len(ret)) - for i := 0; i < len(ret); i++ { - reply.Body[i] = ret[i] - } - reply.Headers[FieldSignature] = MakeVariant(SignatureOf(reply.Body...)) - - conn.sendMessage(reply) - } -} - -// Emit emits the given signal on the message bus. The name parameter must be -// formatted as "interface.member", e.g., "org.freedesktop.DBus.NameLost". -func (conn *Conn) Emit(path ObjectPath, name string, values ...interface{}) error { - if !path.IsValid() { - return errors.New("dbus: invalid object path") - } - i := strings.LastIndex(name, ".") - if i == -1 { - return errors.New("dbus: invalid method name") - } - iface := name[:i] - member := name[i+1:] - if !isValidMember(member) { - return errors.New("dbus: invalid method name") - } - if !isValidInterface(iface) { - return errors.New("dbus: invalid interface name") - } - msg := new(Message) - msg.Type = TypeSignal - msg.serial = conn.getSerial() - msg.Headers = make(map[HeaderField]Variant) - msg.Headers[FieldInterface] = MakeVariant(iface) - msg.Headers[FieldMember] = MakeVariant(member) - msg.Headers[FieldPath] = MakeVariant(path) - msg.Body = values - if len(values) > 0 { - msg.Headers[FieldSignature] = MakeVariant(SignatureOf(values...)) - } - - var closed bool - conn.sendMessageAndIfClosed(msg, func() { - closed = true - }) - if closed { - return ErrClosed - } - return nil -} - -// Export registers the given value to be exported as an object on the -// message bus. -// -// If a method call on the given path and interface is received, an exported -// method with the same name is called with v as the receiver if the -// parameters match and the last return value is of type *Error. If this -// *Error is not nil, it is sent back to the caller as an error. -// Otherwise, a method reply is sent with the other return values as its body. -// -// Any parameters with the special type Sender are set to the sender of the -// dbus message when the method is called. Parameters of this type do not -// contribute to the dbus signature of the method (i.e. the method is exposed -// as if the parameters of type Sender were not there). -// -// Similarly, any parameters with the type Message are set to the raw message -// received on the bus. Again, parameters of this type do not contribute to the -// dbus signature of the method. -// -// Every method call is executed in a new goroutine, so the method may be called -// in multiple goroutines at once. -// -// Method calls on the interface org.freedesktop.DBus.Peer will be automatically -// handled for every object. -// -// Passing nil as the first parameter will cause conn to cease handling calls on -// the given combination of path and interface. -// -// Export returns an error if path is not a valid path name. -func (conn *Conn) Export(v interface{}, path ObjectPath, iface string) error { - return conn.ExportWithMap(v, nil, path, iface) -} - -// ExportWithMap works exactly like Export but provides the ability to remap -// method names (e.g. export a lower-case method). -// -// The keys in the map are the real method names (exported on the struct), and -// the values are the method names to be exported on DBus. -func (conn *Conn) ExportWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error { - return conn.export(getMethods(v, mapping), path, iface, false) -} - -// ExportSubtree works exactly like Export but registers the given value for -// an entire subtree rather under the root path provided. -// -// In order to make this useful, one parameter in each of the value's exported -// methods should be a Message, in which case it will contain the raw message -// (allowing one to get access to the path that caused the method to be called). -// -// Note that more specific export paths take precedence over less specific. For -// example, a method call using the ObjectPath /foo/bar/baz will call a method -// exported on /foo/bar before a method exported on /foo. -func (conn *Conn) ExportSubtree(v interface{}, path ObjectPath, iface string) error { - return conn.ExportSubtreeWithMap(v, nil, path, iface) -} - -// ExportSubtreeWithMap works exactly like ExportSubtree but provides the -// ability to remap method names (e.g. export a lower-case method). -// -// The keys in the map are the real method names (exported on the struct), and -// the values are the method names to be exported on DBus. -func (conn *Conn) ExportSubtreeWithMap(v interface{}, mapping map[string]string, path ObjectPath, iface string) error { - return conn.export(getMethods(v, mapping), path, iface, true) -} - -// ExportMethodTable like Export registers the given methods as an object -// on the message bus. Unlike Export the it uses a method table to define -// the object instead of a native go object. -// -// The method table is a map from method name to function closure -// representing the method. This allows an object exported on the bus to not -// necessarily be a native go object. It can be useful for generating exposed -// methods on the fly. -// -// Any non-function objects in the method table are ignored. -func (conn *Conn) ExportMethodTable(methods map[string]interface{}, path ObjectPath, iface string) error { - return conn.exportMethodTable(methods, path, iface, false) -} - -// Like ExportSubtree, but with the same caveats as ExportMethodTable. -func (conn *Conn) ExportSubtreeMethodTable(methods map[string]interface{}, path ObjectPath, iface string) error { - return conn.exportMethodTable(methods, path, iface, true) -} - -func (conn *Conn) exportMethodTable(methods map[string]interface{}, path ObjectPath, iface string, includeSubtree bool) error { - out := make(map[string]reflect.Value) - for name, method := range methods { - rval := reflect.ValueOf(method) - if rval.Kind() != reflect.Func { - continue - } - t := rval.Type() - // only track valid methods must return *Error as last arg - if t.NumOut() == 0 || - t.Out(t.NumOut()-1) != reflect.TypeOf(&ErrMsgInvalidArg) { - continue - } - out[name] = rval - } - return conn.export(out, path, iface, includeSubtree) -} - -func (conn *Conn) unexport(h *defaultHandler, path ObjectPath, iface string) error { - if h.PathExists(path) { - obj := h.objects[path] - obj.DeleteInterface(iface) - if len(obj.interfaces) == 0 { - h.DeleteObject(path) - } - } - return nil -} - -// exportWithMap is the worker function for all exports/registrations. -func (conn *Conn) export(methods map[string]reflect.Value, path ObjectPath, iface string, includeSubtree bool) error { - h, ok := conn.handler.(*defaultHandler) - if !ok { - return fmt.Errorf( - `dbus: export only allowed on the default hander handler have %T"`, - conn.handler) - } - - if !path.IsValid() { - return fmt.Errorf(`dbus: Invalid path name: "%s"`, path) - } - - // Remove a previous export if the interface is nil - if methods == nil { - return conn.unexport(h, path, iface) - } - - // If this is the first handler for this path, make a new map to hold all - // handlers for this path. - if !h.PathExists(path) { - h.AddObject(path, newExportedObject()) - } - - exportedMethods := make(map[string]Method) - for name, method := range methods { - exportedMethods[name] = exportedMethod{method} - } - - // Finally, save this handler - obj := h.objects[path] - obj.AddInterface(iface, newExportedIntf(exportedMethods, includeSubtree)) - - return nil -} - -// ReleaseName calls org.freedesktop.DBus.ReleaseName and awaits a response. -func (conn *Conn) ReleaseName(name string) (ReleaseNameReply, error) { - var r uint32 - err := conn.busObj.Call("org.freedesktop.DBus.ReleaseName", 0, name).Store(&r) - if err != nil { - return 0, err - } - return ReleaseNameReply(r), nil -} - -// RequestName calls org.freedesktop.DBus.RequestName and awaits a response. -func (conn *Conn) RequestName(name string, flags RequestNameFlags) (RequestNameReply, error) { - var r uint32 - err := conn.busObj.Call("org.freedesktop.DBus.RequestName", 0, name, flags).Store(&r) - if err != nil { - return 0, err - } - return RequestNameReply(r), nil -} - -// ReleaseNameReply is the reply to a ReleaseName call. -type ReleaseNameReply uint32 - -const ( - ReleaseNameReplyReleased ReleaseNameReply = 1 + iota - ReleaseNameReplyNonExistent - ReleaseNameReplyNotOwner -) - -// RequestNameFlags represents the possible flags for a RequestName call. -type RequestNameFlags uint32 - -const ( - NameFlagAllowReplacement RequestNameFlags = 1 << iota - NameFlagReplaceExisting - NameFlagDoNotQueue -) - -// RequestNameReply is the reply to a RequestName call. -type RequestNameReply uint32 - -const ( - RequestNameReplyPrimaryOwner RequestNameReply = 1 + iota - RequestNameReplyInQueue - RequestNameReplyExists - RequestNameReplyAlreadyOwner -) diff --git a/vendor/github.com/godbus/dbus/homedir.go b/vendor/github.com/godbus/dbus/homedir.go deleted file mode 100644 index 0b745f93..00000000 --- a/vendor/github.com/godbus/dbus/homedir.go +++ /dev/null @@ -1,28 +0,0 @@ -package dbus - -import ( - "os" - "sync" -) - -var ( - homeDir string - homeDirLock sync.Mutex -) - -func getHomeDir() string { - homeDirLock.Lock() - defer homeDirLock.Unlock() - - if homeDir != "" { - return homeDir - } - - homeDir = os.Getenv("HOME") - if homeDir != "" { - return homeDir - } - - homeDir = lookupHomeDir() - return homeDir -} diff --git a/vendor/github.com/godbus/dbus/homedir_dynamic.go b/vendor/github.com/godbus/dbus/homedir_dynamic.go deleted file mode 100644 index 2732081e..00000000 --- a/vendor/github.com/godbus/dbus/homedir_dynamic.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !static_build - -package dbus - -import ( - "os/user" -) - -func lookupHomeDir() string { - u, err := user.Current() - if err != nil { - return "/" - } - return u.HomeDir -} diff --git a/vendor/github.com/godbus/dbus/homedir_static.go b/vendor/github.com/godbus/dbus/homedir_static.go deleted file mode 100644 index b9d9cb55..00000000 --- a/vendor/github.com/godbus/dbus/homedir_static.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build static_build - -package dbus - -import ( - "bufio" - "os" - "strconv" - "strings" -) - -func lookupHomeDir() string { - myUid := os.Getuid() - - f, err := os.Open("/etc/passwd") - if err != nil { - return "/" - } - defer f.Close() - - s := bufio.NewScanner(f) - - for s.Scan() { - if err := s.Err(); err != nil { - break - } - - line := strings.TrimSpace(s.Text()) - if line == "" { - continue - } - - parts := strings.Split(line, ":") - - if len(parts) >= 6 { - uid, err := strconv.Atoi(parts[2]) - if err == nil && uid == myUid { - return parts[5] - } - } - } - - // Default to / if we can't get a better value - return "/" -} diff --git a/vendor/github.com/godbus/dbus/message.go b/vendor/github.com/godbus/dbus/message.go deleted file mode 100644 index 6a925367..00000000 --- a/vendor/github.com/godbus/dbus/message.go +++ /dev/null @@ -1,353 +0,0 @@ -package dbus - -import ( - "bytes" - "encoding/binary" - "errors" - "io" - "reflect" - "strconv" -) - -const protoVersion byte = 1 - -// Flags represents the possible flags of a D-Bus message. -type Flags byte - -const ( - // FlagNoReplyExpected signals that the message is not expected to generate - // a reply. If this flag is set on outgoing messages, any possible reply - // will be discarded. - FlagNoReplyExpected Flags = 1 << iota - // FlagNoAutoStart signals that the message bus should not automatically - // start an application when handling this message. - FlagNoAutoStart - // FlagAllowInteractiveAuthorization may be set on a method call - // message to inform the receiving side that the caller is prepared - // to wait for interactive authorization, which might take a - // considerable time to complete. For instance, if this flag is set, - // it would be appropriate to query the user for passwords or - // confirmation via Polkit or a similar framework. - FlagAllowInteractiveAuthorization -) - -// Type represents the possible types of a D-Bus message. -type Type byte - -const ( - TypeMethodCall Type = 1 + iota - TypeMethodReply - TypeError - TypeSignal - typeMax -) - -func (t Type) String() string { - switch t { - case TypeMethodCall: - return "method call" - case TypeMethodReply: - return "reply" - case TypeError: - return "error" - case TypeSignal: - return "signal" - } - return "invalid" -} - -// HeaderField represents the possible byte codes for the headers -// of a D-Bus message. -type HeaderField byte - -const ( - FieldPath HeaderField = 1 + iota - FieldInterface - FieldMember - FieldErrorName - FieldReplySerial - FieldDestination - FieldSender - FieldSignature - FieldUnixFDs - fieldMax -) - -// An InvalidMessageError describes the reason why a D-Bus message is regarded as -// invalid. -type InvalidMessageError string - -func (e InvalidMessageError) Error() string { - return "dbus: invalid message: " + string(e) -} - -// fieldType are the types of the various header fields. -var fieldTypes = [fieldMax]reflect.Type{ - FieldPath: objectPathType, - FieldInterface: stringType, - FieldMember: stringType, - FieldErrorName: stringType, - FieldReplySerial: uint32Type, - FieldDestination: stringType, - FieldSender: stringType, - FieldSignature: signatureType, - FieldUnixFDs: uint32Type, -} - -// requiredFields lists the header fields that are required by the different -// message types. -var requiredFields = [typeMax][]HeaderField{ - TypeMethodCall: {FieldPath, FieldMember}, - TypeMethodReply: {FieldReplySerial}, - TypeError: {FieldErrorName, FieldReplySerial}, - TypeSignal: {FieldPath, FieldInterface, FieldMember}, -} - -// Message represents a single D-Bus message. -type Message struct { - Type - Flags - Headers map[HeaderField]Variant - Body []interface{} - - serial uint32 -} - -type header struct { - Field byte - Variant -} - -// DecodeMessage tries to decode a single message in the D-Bus wire format -// from the given reader. The byte order is figured out from the first byte. -// The possibly returned error can be an error of the underlying reader, an -// InvalidMessageError or a FormatError. -func DecodeMessage(rd io.Reader) (msg *Message, err error) { - var order binary.ByteOrder - var hlength, length uint32 - var typ, flags, proto byte - var headers []header - - b := make([]byte, 1) - _, err = rd.Read(b) - if err != nil { - return - } - switch b[0] { - case 'l': - order = binary.LittleEndian - case 'B': - order = binary.BigEndian - default: - return nil, InvalidMessageError("invalid byte order") - } - - dec := newDecoder(rd, order) - dec.pos = 1 - - msg = new(Message) - vs, err := dec.Decode(Signature{"yyyuu"}) - if err != nil { - return nil, err - } - if err = Store(vs, &typ, &flags, &proto, &length, &msg.serial); err != nil { - return nil, err - } - msg.Type = Type(typ) - msg.Flags = Flags(flags) - - // get the header length separately because we need it later - b = make([]byte, 4) - _, err = io.ReadFull(rd, b) - if err != nil { - return nil, err - } - binary.Read(bytes.NewBuffer(b), order, &hlength) - if hlength+length+16 > 1<<27 { - return nil, InvalidMessageError("message is too long") - } - dec = newDecoder(io.MultiReader(bytes.NewBuffer(b), rd), order) - dec.pos = 12 - vs, err = dec.Decode(Signature{"a(yv)"}) - if err != nil { - return nil, err - } - if err = Store(vs, &headers); err != nil { - return nil, err - } - - msg.Headers = make(map[HeaderField]Variant) - for _, v := range headers { - msg.Headers[HeaderField(v.Field)] = v.Variant - } - - dec.align(8) - body := make([]byte, int(length)) - if length != 0 { - _, err := io.ReadFull(rd, body) - if err != nil { - return nil, err - } - } - - if err = msg.IsValid(); err != nil { - return nil, err - } - sig, _ := msg.Headers[FieldSignature].value.(Signature) - if sig.str != "" { - buf := bytes.NewBuffer(body) - dec = newDecoder(buf, order) - vs, err := dec.Decode(sig) - if err != nil { - return nil, err - } - msg.Body = vs - } - - return -} - -// EncodeTo encodes and sends a message to the given writer. The byte order must -// be either binary.LittleEndian or binary.BigEndian. If the message is not -// valid or an error occurs when writing, an error is returned. -func (msg *Message) EncodeTo(out io.Writer, order binary.ByteOrder) error { - if err := msg.IsValid(); err != nil { - return err - } - var vs [7]interface{} - switch order { - case binary.LittleEndian: - vs[0] = byte('l') - case binary.BigEndian: - vs[0] = byte('B') - default: - return errors.New("dbus: invalid byte order") - } - body := new(bytes.Buffer) - enc := newEncoder(body, order) - if len(msg.Body) != 0 { - enc.Encode(msg.Body...) - } - vs[1] = msg.Type - vs[2] = msg.Flags - vs[3] = protoVersion - vs[4] = uint32(len(body.Bytes())) - vs[5] = msg.serial - headers := make([]header, 0, len(msg.Headers)) - for k, v := range msg.Headers { - headers = append(headers, header{byte(k), v}) - } - vs[6] = headers - var buf bytes.Buffer - enc = newEncoder(&buf, order) - enc.Encode(vs[:]...) - enc.align(8) - body.WriteTo(&buf) - if buf.Len() > 1<<27 { - return InvalidMessageError("message is too long") - } - if _, err := buf.WriteTo(out); err != nil { - return err - } - return nil -} - -// IsValid checks whether msg is a valid message and returns an -// InvalidMessageError if it is not. -func (msg *Message) IsValid() error { - if msg.Flags & ^(FlagNoAutoStart|FlagNoReplyExpected|FlagAllowInteractiveAuthorization) != 0 { - return InvalidMessageError("invalid flags") - } - if msg.Type == 0 || msg.Type >= typeMax { - return InvalidMessageError("invalid message type") - } - for k, v := range msg.Headers { - if k == 0 || k >= fieldMax { - return InvalidMessageError("invalid header") - } - if reflect.TypeOf(v.value) != fieldTypes[k] { - return InvalidMessageError("invalid type of header field") - } - } - for _, v := range requiredFields[msg.Type] { - if _, ok := msg.Headers[v]; !ok { - return InvalidMessageError("missing required header") - } - } - if path, ok := msg.Headers[FieldPath]; ok { - if !path.value.(ObjectPath).IsValid() { - return InvalidMessageError("invalid path name") - } - } - if iface, ok := msg.Headers[FieldInterface]; ok { - if !isValidInterface(iface.value.(string)) { - return InvalidMessageError("invalid interface name") - } - } - if member, ok := msg.Headers[FieldMember]; ok { - if !isValidMember(member.value.(string)) { - return InvalidMessageError("invalid member name") - } - } - if errname, ok := msg.Headers[FieldErrorName]; ok { - if !isValidInterface(errname.value.(string)) { - return InvalidMessageError("invalid error name") - } - } - if len(msg.Body) != 0 { - if _, ok := msg.Headers[FieldSignature]; !ok { - return InvalidMessageError("missing signature") - } - } - return nil -} - -// Serial returns the message's serial number. The returned value is only valid -// for messages received by eavesdropping. -func (msg *Message) Serial() uint32 { - return msg.serial -} - -// String returns a string representation of a message similar to the format of -// dbus-monitor. -func (msg *Message) String() string { - if err := msg.IsValid(); err != nil { - return "" - } - s := msg.Type.String() - if v, ok := msg.Headers[FieldSender]; ok { - s += " from " + v.value.(string) - } - if v, ok := msg.Headers[FieldDestination]; ok { - s += " to " + v.value.(string) - } - s += " serial " + strconv.FormatUint(uint64(msg.serial), 10) - if v, ok := msg.Headers[FieldReplySerial]; ok { - s += " reply_serial " + strconv.FormatUint(uint64(v.value.(uint32)), 10) - } - if v, ok := msg.Headers[FieldUnixFDs]; ok { - s += " unixfds " + strconv.FormatUint(uint64(v.value.(uint32)), 10) - } - if v, ok := msg.Headers[FieldPath]; ok { - s += " path " + string(v.value.(ObjectPath)) - } - if v, ok := msg.Headers[FieldInterface]; ok { - s += " interface " + v.value.(string) - } - if v, ok := msg.Headers[FieldErrorName]; ok { - s += " error " + v.value.(string) - } - if v, ok := msg.Headers[FieldMember]; ok { - s += " member " + v.value.(string) - } - if len(msg.Body) != 0 { - s += "\n" - } - for i, v := range msg.Body { - s += " " + MakeVariant(v).String() - if i != len(msg.Body)-1 { - s += "\n" - } - } - return s -} diff --git a/vendor/github.com/godbus/dbus/object.go b/vendor/github.com/godbus/dbus/object.go deleted file mode 100644 index f27ffe14..00000000 --- a/vendor/github.com/godbus/dbus/object.go +++ /dev/null @@ -1,219 +0,0 @@ -package dbus - -import ( - "context" - "errors" - "strings" -) - -// BusObject is the interface of a remote object on which methods can be -// invoked. -type BusObject interface { - Call(method string, flags Flags, args ...interface{}) *Call - CallWithContext(ctx context.Context, method string, flags Flags, args ...interface{}) *Call - Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call - GoWithContext(ctx context.Context, method string, flags Flags, ch chan *Call, args ...interface{}) *Call - AddMatchSignal(iface, member string, options ...MatchOption) *Call - RemoveMatchSignal(iface, member string, options ...MatchOption) *Call - GetProperty(p string) (Variant, error) - Destination() string - Path() ObjectPath -} - -// Object represents a remote object on which methods can be invoked. -type Object struct { - conn *Conn - dest string - path ObjectPath -} - -// Call calls a method with (*Object).Go and waits for its reply. -func (o *Object) Call(method string, flags Flags, args ...interface{}) *Call { - return <-o.createCall(context.Background(), method, flags, make(chan *Call, 1), args...).Done -} - -// CallWithContext acts like Call but takes a context -func (o *Object) CallWithContext(ctx context.Context, method string, flags Flags, args ...interface{}) *Call { - return <-o.createCall(ctx, method, flags, make(chan *Call, 1), args...).Done -} - -// MatchOption specifies option for dbus routing match rule. Options can be constructed with WithMatch* helpers. -// For full list of available options consult -// https://dbus.freedesktop.org/doc/dbus-specification.html#message-bus-routing-match-rules -type MatchOption struct { - key string - value string -} - -// WithMatchOption creates match option with given key and value -func WithMatchOption(key, value string) MatchOption { - return MatchOption{key, value} -} - -// WithMatchObjectPath creates match option that filters events based on given path -func WithMatchObjectPath(path ObjectPath) MatchOption { - return MatchOption{"path", string(path)} -} - -func formatMatchOptions(options []MatchOption) string { - items := make([]string, 0, len(options)) - for _, option := range options { - items = append(items, option.key+"='"+option.value+"'") - } - - return strings.Join(items, ",") -} - -// AddMatchSignal subscribes BusObject to signals from specified interface, -// method (member). Additional filter rules can be added via WithMatch* option constructors. -// Note: To filter events by object path you have to specify this path via an option. -func (o *Object) AddMatchSignal(iface, member string, options ...MatchOption) *Call { - base := []MatchOption{ - {"type", "signal"}, - {"interface", iface}, - {"member", member}, - } - - options = append(base, options...) - return o.conn.BusObject().Call( - "org.freedesktop.DBus.AddMatch", - 0, - formatMatchOptions(options), - ) -} - -// RemoveMatchSignal unsubscribes BusObject from signals from specified interface, -// method (member). Additional filter rules can be added via WithMatch* option constructors -func (o *Object) RemoveMatchSignal(iface, member string, options ...MatchOption) *Call { - base := []MatchOption{ - {"type", "signal"}, - {"interface", iface}, - {"member", member}, - } - - options = append(base, options...) - return o.conn.BusObject().Call( - "org.freedesktop.DBus.RemoveMatch", - 0, - formatMatchOptions(options), - ) -} - -// Go calls a method with the given arguments asynchronously. It returns a -// Call structure representing this method call. The passed channel will -// return the same value once the call is done. If ch is nil, a new channel -// will be allocated. Otherwise, ch has to be buffered or Go will panic. -// -// If the flags include FlagNoReplyExpected, ch is ignored and a Call structure -// is returned with any error in Err and a closed channel in Done containing -// the returned Call as it's one entry. -// -// If the method parameter contains a dot ('.'), the part before the last dot -// specifies the interface on which the method is called. -func (o *Object) Go(method string, flags Flags, ch chan *Call, args ...interface{}) *Call { - return o.createCall(context.Background(), method, flags, ch, args...) -} - -// GoWithContext acts like Go but takes a context -func (o *Object) GoWithContext(ctx context.Context, method string, flags Flags, ch chan *Call, args ...interface{}) *Call { - return o.createCall(ctx, method, flags, ch, args...) -} - -func (o *Object) createCall(ctx context.Context, method string, flags Flags, ch chan *Call, args ...interface{}) *Call { - if ctx == nil { - panic("nil context") - } - iface := "" - i := strings.LastIndex(method, ".") - if i != -1 { - iface = method[:i] - } - method = method[i+1:] - msg := new(Message) - msg.Type = TypeMethodCall - msg.serial = o.conn.getSerial() - msg.Flags = flags & (FlagNoAutoStart | FlagNoReplyExpected) - msg.Headers = make(map[HeaderField]Variant) - msg.Headers[FieldPath] = MakeVariant(o.path) - msg.Headers[FieldDestination] = MakeVariant(o.dest) - msg.Headers[FieldMember] = MakeVariant(method) - if iface != "" { - msg.Headers[FieldInterface] = MakeVariant(iface) - } - msg.Body = args - if len(args) > 0 { - msg.Headers[FieldSignature] = MakeVariant(SignatureOf(args...)) - } - if msg.Flags&FlagNoReplyExpected == 0 { - if ch == nil { - ch = make(chan *Call, 10) - } else if cap(ch) == 0 { - panic("dbus: unbuffered channel passed to (*Object).Go") - } - ctx, cancel := context.WithCancel(ctx) - call := &Call{ - Destination: o.dest, - Path: o.path, - Method: method, - Args: args, - Done: ch, - ctxCanceler: cancel, - ctx: ctx, - } - o.conn.calls.track(msg.serial, call) - o.conn.sendMessageAndIfClosed(msg, func() { - o.conn.calls.handleSendError(msg, ErrClosed) - cancel() - }) - go func() { - <-ctx.Done() - o.conn.calls.handleSendError(msg, ctx.Err()) - }() - - return call - } - done := make(chan *Call, 1) - call := &Call{ - Err: nil, - Done: done, - } - defer func() { - call.Done <- call - close(done) - }() - o.conn.sendMessageAndIfClosed(msg, func() { - call.Err = ErrClosed - }) - return call -} - -// GetProperty calls org.freedesktop.DBus.Properties.GetProperty on the given -// object. The property name must be given in interface.member notation. -func (o *Object) GetProperty(p string) (Variant, error) { - idx := strings.LastIndex(p, ".") - if idx == -1 || idx+1 == len(p) { - return Variant{}, errors.New("dbus: invalid property " + p) - } - - iface := p[:idx] - prop := p[idx+1:] - - result := Variant{} - err := o.Call("org.freedesktop.DBus.Properties.Get", 0, iface, prop).Store(&result) - - if err != nil { - return Variant{}, err - } - - return result, nil -} - -// Destination returns the destination that calls on (o *Object) are sent to. -func (o *Object) Destination() string { - return o.dest -} - -// Path returns the path that calls on (o *Object") are sent to. -func (o *Object) Path() ObjectPath { - return o.path -} diff --git a/vendor/github.com/godbus/dbus/server_interfaces.go b/vendor/github.com/godbus/dbus/server_interfaces.go deleted file mode 100644 index 01166f0b..00000000 --- a/vendor/github.com/godbus/dbus/server_interfaces.go +++ /dev/null @@ -1,99 +0,0 @@ -package dbus - -// Terminator allows a handler to implement a shutdown mechanism that -// is called when the connection terminates. -type Terminator interface { - Terminate() -} - -// Handler is the representation of a D-Bus Application. -// -// The Handler must have a way to lookup objects given -// an ObjectPath. The returned object must implement the -// ServerObject interface. -type Handler interface { - LookupObject(path ObjectPath) (ServerObject, bool) -} - -// ServerObject is the representation of an D-Bus Object. -// -// Objects are registered at a path for a given Handler. -// The Objects implement D-Bus interfaces. The semantics -// of Interface lookup is up to the implementation of -// the ServerObject. The ServerObject implementation may -// choose to implement empty string as a valid interface -// represeting all methods or not per the D-Bus specification. -type ServerObject interface { - LookupInterface(name string) (Interface, bool) -} - -// An Interface is the representation of a D-Bus Interface. -// -// Interfaces are a grouping of methods implemented by the Objects. -// Interfaces are responsible for routing method calls. -type Interface interface { - LookupMethod(name string) (Method, bool) -} - -// A Method represents the exposed methods on D-Bus. -type Method interface { - // Call requires that all arguments are decoded before being passed to it. - Call(args ...interface{}) ([]interface{}, error) - NumArguments() int - NumReturns() int - // ArgumentValue returns a representative value for the argument at position - // it should be of the proper type. reflect.Zero would be a good mechanism - // to use for this Value. - ArgumentValue(position int) interface{} - // ReturnValue returns a representative value for the return at position - // it should be of the proper type. reflect.Zero would be a good mechanism - // to use for this Value. - ReturnValue(position int) interface{} -} - -// An Argument Decoder can decode arguments using the non-standard mechanism -// -// If a method implements this interface then the non-standard -// decoder will be used. -// -// Method arguments must be decoded from the message. -// The mechanism for doing this will vary based on the -// implementation of the method. A normal approach is provided -// as part of this library, but may be replaced with -// any other decoding scheme. -type ArgumentDecoder interface { - // To decode the arguments of a method the sender and message are - // provided incase the semantics of the implementer provides access - // to these as part of the method invocation. - DecodeArguments(conn *Conn, sender string, msg *Message, args []interface{}) ([]interface{}, error) -} - -// A SignalHandler is responsible for delivering a signal. -// -// Signal delivery may be changed from the default channel -// based approach by Handlers implementing the SignalHandler -// interface. -type SignalHandler interface { - DeliverSignal(iface, name string, signal *Signal) -} - -// A DBusError is used to convert a generic object to a D-Bus error. -// -// Any custom error mechanism may implement this interface to provide -// a custom encoding of the error on D-Bus. By default if a normal -// error is returned, it will be encoded as the generic -// "org.freedesktop.DBus.Error.Failed" error. By implementing this -// interface as well a custom encoding may be provided. -type DBusError interface { - DBusError() (string, []interface{}) -} - -// SerialGenerator is responsible for serials generation. -// -// Different approaches for the serial generation can be used, -// maintaining a map guarded with a mutex (the standard way) or -// simply increment an atomic counter. -type SerialGenerator interface { - GetSerial() uint32 - RetireSerial(serial uint32) -} diff --git a/vendor/github.com/godbus/dbus/sig.go b/vendor/github.com/godbus/dbus/sig.go deleted file mode 100644 index c1b80920..00000000 --- a/vendor/github.com/godbus/dbus/sig.go +++ /dev/null @@ -1,259 +0,0 @@ -package dbus - -import ( - "fmt" - "reflect" - "strings" -) - -var sigToType = map[byte]reflect.Type{ - 'y': byteType, - 'b': boolType, - 'n': int16Type, - 'q': uint16Type, - 'i': int32Type, - 'u': uint32Type, - 'x': int64Type, - 't': uint64Type, - 'd': float64Type, - 's': stringType, - 'g': signatureType, - 'o': objectPathType, - 'v': variantType, - 'h': unixFDIndexType, -} - -// Signature represents a correct type signature as specified by the D-Bus -// specification. The zero value represents the empty signature, "". -type Signature struct { - str string -} - -// SignatureOf returns the concatenation of all the signatures of the given -// values. It panics if one of them is not representable in D-Bus. -func SignatureOf(vs ...interface{}) Signature { - var s string - for _, v := range vs { - s += getSignature(reflect.TypeOf(v)) - } - return Signature{s} -} - -// SignatureOfType returns the signature of the given type. It panics if the -// type is not representable in D-Bus. -func SignatureOfType(t reflect.Type) Signature { - return Signature{getSignature(t)} -} - -// getSignature returns the signature of the given type and panics on unknown types. -func getSignature(t reflect.Type) string { - // handle simple types first - switch t.Kind() { - case reflect.Uint8: - return "y" - case reflect.Bool: - return "b" - case reflect.Int16: - return "n" - case reflect.Uint16: - return "q" - case reflect.Int, reflect.Int32: - if t == unixFDType { - return "h" - } - return "i" - case reflect.Uint, reflect.Uint32: - if t == unixFDIndexType { - return "h" - } - return "u" - case reflect.Int64: - return "x" - case reflect.Uint64: - return "t" - case reflect.Float64: - return "d" - case reflect.Ptr: - return getSignature(t.Elem()) - case reflect.String: - if t == objectPathType { - return "o" - } - return "s" - case reflect.Struct: - if t == variantType { - return "v" - } else if t == signatureType { - return "g" - } - var s string - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - if field.PkgPath == "" && field.Tag.Get("dbus") != "-" { - s += getSignature(t.Field(i).Type) - } - } - return "(" + s + ")" - case reflect.Array, reflect.Slice: - return "a" + getSignature(t.Elem()) - case reflect.Map: - if !isKeyType(t.Key()) { - panic(InvalidTypeError{t}) - } - return "a{" + getSignature(t.Key()) + getSignature(t.Elem()) + "}" - case reflect.Interface: - return "v" - } - panic(InvalidTypeError{t}) -} - -// ParseSignature returns the signature represented by this string, or a -// SignatureError if the string is not a valid signature. -func ParseSignature(s string) (sig Signature, err error) { - if len(s) == 0 { - return - } - if len(s) > 255 { - return Signature{""}, SignatureError{s, "too long"} - } - sig.str = s - for err == nil && len(s) != 0 { - err, s = validSingle(s, 0) - } - if err != nil { - sig = Signature{""} - } - - return -} - -// ParseSignatureMust behaves like ParseSignature, except that it panics if s -// is not valid. -func ParseSignatureMust(s string) Signature { - sig, err := ParseSignature(s) - if err != nil { - panic(err) - } - return sig -} - -// Empty retruns whether the signature is the empty signature. -func (s Signature) Empty() bool { - return s.str == "" -} - -// Single returns whether the signature represents a single, complete type. -func (s Signature) Single() bool { - err, r := validSingle(s.str, 0) - return err != nil && r == "" -} - -// String returns the signature's string representation. -func (s Signature) String() string { - return s.str -} - -// A SignatureError indicates that a signature passed to a function or received -// on a connection is not a valid signature. -type SignatureError struct { - Sig string - Reason string -} - -func (e SignatureError) Error() string { - return fmt.Sprintf("dbus: invalid signature: %q (%s)", e.Sig, e.Reason) -} - -// Try to read a single type from this string. If it was successful, err is nil -// and rem is the remaining unparsed part. Otherwise, err is a non-nil -// SignatureError and rem is "". depth is the current recursion depth which may -// not be greater than 64 and should be given as 0 on the first call. -func validSingle(s string, depth int) (err error, rem string) { - if s == "" { - return SignatureError{Sig: s, Reason: "empty signature"}, "" - } - if depth > 64 { - return SignatureError{Sig: s, Reason: "container nesting too deep"}, "" - } - switch s[0] { - case 'y', 'b', 'n', 'q', 'i', 'u', 'x', 't', 'd', 's', 'g', 'o', 'v', 'h': - return nil, s[1:] - case 'a': - if len(s) > 1 && s[1] == '{' { - i := findMatching(s[1:], '{', '}') - if i == -1 { - return SignatureError{Sig: s, Reason: "unmatched '{'"}, "" - } - i++ - rem = s[i+1:] - s = s[2:i] - if err, _ = validSingle(s[:1], depth+1); err != nil { - return err, "" - } - err, nr := validSingle(s[1:], depth+1) - if err != nil { - return err, "" - } - if nr != "" { - return SignatureError{Sig: s, Reason: "too many types in dict"}, "" - } - return nil, rem - } - return validSingle(s[1:], depth+1) - case '(': - i := findMatching(s, '(', ')') - if i == -1 { - return SignatureError{Sig: s, Reason: "unmatched ')'"}, "" - } - rem = s[i+1:] - s = s[1:i] - for err == nil && s != "" { - err, s = validSingle(s, depth+1) - } - if err != nil { - rem = "" - } - return - } - return SignatureError{Sig: s, Reason: "invalid type character"}, "" -} - -func findMatching(s string, left, right rune) int { - n := 0 - for i, v := range s { - if v == left { - n++ - } else if v == right { - n-- - } - if n == 0 { - return i - } - } - return -1 -} - -// typeFor returns the type of the given signature. It ignores any left over -// characters and panics if s doesn't start with a valid type signature. -func typeFor(s string) (t reflect.Type) { - err, _ := validSingle(s, 0) - if err != nil { - panic(err) - } - - if t, ok := sigToType[s[0]]; ok { - return t - } - switch s[0] { - case 'a': - if s[1] == '{' { - i := strings.LastIndex(s, "}") - t = reflect.MapOf(sigToType[s[2]], typeFor(s[3:i])) - } else { - t = reflect.SliceOf(typeFor(s[1:])) - } - case '(': - t = interfacesType - } - return -} diff --git a/vendor/github.com/godbus/dbus/transport_darwin.go b/vendor/github.com/godbus/dbus/transport_darwin.go deleted file mode 100644 index 1bba0d6b..00000000 --- a/vendor/github.com/godbus/dbus/transport_darwin.go +++ /dev/null @@ -1,6 +0,0 @@ -package dbus - -func (t *unixTransport) SendNullByte() error { - _, err := t.Write([]byte{0}) - return err -} diff --git a/vendor/github.com/godbus/dbus/transport_generic.go b/vendor/github.com/godbus/dbus/transport_generic.go deleted file mode 100644 index 718a1ff0..00000000 --- a/vendor/github.com/godbus/dbus/transport_generic.go +++ /dev/null @@ -1,50 +0,0 @@ -package dbus - -import ( - "encoding/binary" - "errors" - "io" - "unsafe" -) - -var nativeEndian binary.ByteOrder - -func detectEndianness() binary.ByteOrder { - var x uint32 = 0x01020304 - if *(*byte)(unsafe.Pointer(&x)) == 0x01 { - return binary.BigEndian - } - return binary.LittleEndian -} - -func init() { - nativeEndian = detectEndianness() -} - -type genericTransport struct { - io.ReadWriteCloser -} - -func (t genericTransport) SendNullByte() error { - _, err := t.Write([]byte{0}) - return err -} - -func (t genericTransport) SupportsUnixFDs() bool { - return false -} - -func (t genericTransport) EnableUnixFDs() {} - -func (t genericTransport) ReadMessage() (*Message, error) { - return DecodeMessage(t) -} - -func (t genericTransport) SendMessage(msg *Message) error { - for _, v := range msg.Body { - if _, ok := v.(UnixFD); ok { - return errors.New("dbus: unix fd passing not enabled") - } - } - return msg.EncodeTo(t, nativeEndian) -} diff --git a/vendor/github.com/godbus/dbus/transport_nonce_tcp.go b/vendor/github.com/godbus/dbus/transport_nonce_tcp.go deleted file mode 100644 index 697739ef..00000000 --- a/vendor/github.com/godbus/dbus/transport_nonce_tcp.go +++ /dev/null @@ -1,39 +0,0 @@ -//+build !windows - -package dbus - -import ( - "errors" - "io/ioutil" - "net" -) - -func init() { - transports["nonce-tcp"] = newNonceTcpTransport -} - -func newNonceTcpTransport(keys string) (transport, error) { - host := getKey(keys, "host") - port := getKey(keys, "port") - noncefile := getKey(keys, "noncefile") - if host == "" || port == "" || noncefile == "" { - return nil, errors.New("dbus: unsupported address (must set host, port and noncefile)") - } - protocol, err := tcpFamily(keys) - if err != nil { - return nil, err - } - socket, err := net.Dial(protocol, net.JoinHostPort(host, port)) - if err != nil { - return nil, err - } - b, err := ioutil.ReadFile(noncefile) - if err != nil { - return nil, err - } - _, err = socket.Write(b) - if err != nil { - return nil, err - } - return NewConn(socket) -} diff --git a/vendor/github.com/godbus/dbus/transport_tcp.go b/vendor/github.com/godbus/dbus/transport_tcp.go deleted file mode 100644 index dd1c8e59..00000000 --- a/vendor/github.com/godbus/dbus/transport_tcp.go +++ /dev/null @@ -1,43 +0,0 @@ -//+build !windows - -package dbus - -import ( - "errors" - "net" -) - -func init() { - transports["tcp"] = newTcpTransport -} - -func tcpFamily(keys string) (string, error) { - switch getKey(keys, "family") { - case "": - return "tcp", nil - case "ipv4": - return "tcp4", nil - case "ipv6": - return "tcp6", nil - default: - return "", errors.New("dbus: invalid tcp family (must be ipv4 or ipv6)") - } -} - -func newTcpTransport(keys string) (transport, error) { - host := getKey(keys, "host") - port := getKey(keys, "port") - if host == "" || port == "" { - return nil, errors.New("dbus: unsupported address (must set host and port)") - } - - protocol, err := tcpFamily(keys) - if err != nil { - return nil, err - } - socket, err := net.Dial(protocol, net.JoinHostPort(host, port)) - if err != nil { - return nil, err - } - return NewConn(socket) -} diff --git a/vendor/github.com/godbus/dbus/transport_unix.go b/vendor/github.com/godbus/dbus/transport_unix.go deleted file mode 100644 index f000c6b5..00000000 --- a/vendor/github.com/godbus/dbus/transport_unix.go +++ /dev/null @@ -1,214 +0,0 @@ -//+build !windows,!solaris - -package dbus - -import ( - "bytes" - "encoding/binary" - "errors" - "io" - "net" - "syscall" -) - -type oobReader struct { - conn *net.UnixConn - oob []byte - buf [4096]byte -} - -func (o *oobReader) Read(b []byte) (n int, err error) { - n, oobn, flags, _, err := o.conn.ReadMsgUnix(b, o.buf[:]) - if err != nil { - return n, err - } - if flags&syscall.MSG_CTRUNC != 0 { - return n, errors.New("dbus: control data truncated (too many fds received)") - } - o.oob = append(o.oob, o.buf[:oobn]...) - return n, nil -} - -type unixTransport struct { - *net.UnixConn - rdr *oobReader - hasUnixFDs bool -} - -func newUnixTransport(keys string) (transport, error) { - var err error - - t := new(unixTransport) - abstract := getKey(keys, "abstract") - path := getKey(keys, "path") - switch { - case abstract == "" && path == "": - return nil, errors.New("dbus: invalid address (neither path nor abstract set)") - case abstract != "" && path == "": - t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: "@" + abstract, Net: "unix"}) - if err != nil { - return nil, err - } - return t, nil - case abstract == "" && path != "": - t.UnixConn, err = net.DialUnix("unix", nil, &net.UnixAddr{Name: path, Net: "unix"}) - if err != nil { - return nil, err - } - return t, nil - default: - return nil, errors.New("dbus: invalid address (both path and abstract set)") - } -} - -func init() { - transports["unix"] = newUnixTransport -} - -func (t *unixTransport) EnableUnixFDs() { - t.hasUnixFDs = true -} - -func (t *unixTransport) ReadMessage() (*Message, error) { - var ( - blen, hlen uint32 - csheader [16]byte - headers []header - order binary.ByteOrder - unixfds uint32 - ) - // To be sure that all bytes of out-of-band data are read, we use a special - // reader that uses ReadUnix on the underlying connection instead of Read - // and gathers the out-of-band data in a buffer. - if t.rdr == nil { - t.rdr = &oobReader{conn: t.UnixConn} - } else { - t.rdr.oob = nil - } - - // read the first 16 bytes (the part of the header that has a constant size), - // from which we can figure out the length of the rest of the message - if _, err := io.ReadFull(t.rdr, csheader[:]); err != nil { - return nil, err - } - switch csheader[0] { - case 'l': - order = binary.LittleEndian - case 'B': - order = binary.BigEndian - default: - return nil, InvalidMessageError("invalid byte order") - } - // csheader[4:8] -> length of message body, csheader[12:16] -> length of - // header fields (without alignment) - binary.Read(bytes.NewBuffer(csheader[4:8]), order, &blen) - binary.Read(bytes.NewBuffer(csheader[12:]), order, &hlen) - if hlen%8 != 0 { - hlen += 8 - (hlen % 8) - } - - // decode headers and look for unix fds - headerdata := make([]byte, hlen+4) - copy(headerdata, csheader[12:]) - if _, err := io.ReadFull(t.rdr, headerdata[4:]); err != nil { - return nil, err - } - dec := newDecoder(bytes.NewBuffer(headerdata), order) - dec.pos = 12 - vs, err := dec.Decode(Signature{"a(yv)"}) - if err != nil { - return nil, err - } - Store(vs, &headers) - for _, v := range headers { - if v.Field == byte(FieldUnixFDs) { - unixfds, _ = v.Variant.value.(uint32) - } - } - all := make([]byte, 16+hlen+blen) - copy(all, csheader[:]) - copy(all[16:], headerdata[4:]) - if _, err := io.ReadFull(t.rdr, all[16+hlen:]); err != nil { - return nil, err - } - if unixfds != 0 { - if !t.hasUnixFDs { - return nil, errors.New("dbus: got unix fds on unsupported transport") - } - // read the fds from the OOB data - scms, err := syscall.ParseSocketControlMessage(t.rdr.oob) - if err != nil { - return nil, err - } - if len(scms) != 1 { - return nil, errors.New("dbus: received more than one socket control message") - } - fds, err := syscall.ParseUnixRights(&scms[0]) - if err != nil { - return nil, err - } - msg, err := DecodeMessage(bytes.NewBuffer(all)) - if err != nil { - return nil, err - } - // substitute the values in the message body (which are indices for the - // array receiver via OOB) with the actual values - for i, v := range msg.Body { - switch v.(type) { - case UnixFDIndex: - j := v.(UnixFDIndex) - if uint32(j) >= unixfds { - return nil, InvalidMessageError("invalid index for unix fd") - } - msg.Body[i] = UnixFD(fds[j]) - case []UnixFDIndex: - idxArray := v.([]UnixFDIndex) - fdArray := make([]UnixFD, len(idxArray)) - for k, j := range idxArray { - if uint32(j) >= unixfds { - return nil, InvalidMessageError("invalid index for unix fd") - } - fdArray[k] = UnixFD(fds[j]) - } - msg.Body[i] = fdArray - } - } - return msg, nil - } - return DecodeMessage(bytes.NewBuffer(all)) -} - -func (t *unixTransport) SendMessage(msg *Message) error { - fds := make([]int, 0) - for i, v := range msg.Body { - if fd, ok := v.(UnixFD); ok { - msg.Body[i] = UnixFDIndex(len(fds)) - fds = append(fds, int(fd)) - } - } - if len(fds) != 0 { - if !t.hasUnixFDs { - return errors.New("dbus: unix fd passing not enabled") - } - msg.Headers[FieldUnixFDs] = MakeVariant(uint32(len(fds))) - oob := syscall.UnixRights(fds...) - buf := new(bytes.Buffer) - msg.EncodeTo(buf, nativeEndian) - n, oobn, err := t.UnixConn.WriteMsgUnix(buf.Bytes(), oob, nil) - if err != nil { - return err - } - if n != buf.Len() || oobn != len(oob) { - return io.ErrShortWrite - } - } else { - if err := msg.EncodeTo(t, nativeEndian); err != nil { - return nil - } - } - return nil -} - -func (t *unixTransport) SupportsUnixFDs() bool { - return true -} diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go b/vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go deleted file mode 100644 index a8cd3939..00000000 --- a/vendor/github.com/godbus/dbus/transport_unixcred_dragonfly.go +++ /dev/null @@ -1,95 +0,0 @@ -// The UnixCredentials system call is currently only implemented on Linux -// http://golang.org/src/pkg/syscall/sockcmsg_linux.go -// https://golang.org/s/go1.4-syscall -// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys - -// Local implementation of the UnixCredentials system call for DragonFly BSD - -package dbus - -/* -#include -*/ -import "C" - -import ( - "io" - "os" - "syscall" - "unsafe" -) - -// http://golang.org/src/pkg/syscall/ztypes_linux_amd64.go -// http://golang.org/src/pkg/syscall/ztypes_dragonfly_amd64.go -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -// http://golang.org/src/pkg/syscall/types_linux.go -// http://golang.org/src/pkg/syscall/types_dragonfly.go -// https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/sys/sys/ucred.h -const ( - SizeofUcred = C.sizeof_struct_ucred -) - -// http://golang.org/src/pkg/syscall/sockcmsg_unix.go -func cmsgAlignOf(salen int) int { - // From http://golang.org/src/pkg/syscall/sockcmsg_unix.go - //salign := sizeofPtr - // NOTE: It seems like 64-bit Darwin and DragonFly BSD kernels - // still require 32-bit aligned access to network subsystem. - //if darwin64Bit || dragonfly64Bit { - // salign = 4 - //} - salign := 4 - return (salen + salign - 1) & ^(salign - 1) -} - -// http://golang.org/src/pkg/syscall/sockcmsg_unix.go -func cmsgData(h *syscall.Cmsghdr) unsafe.Pointer { - return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(syscall.SizeofCmsghdr))) -} - -// http://golang.org/src/pkg/syscall/sockcmsg_linux.go -// UnixCredentials encodes credentials into a socket control message -// for sending to another process. This can be used for -// authentication. -func UnixCredentials(ucred *Ucred) []byte { - b := make([]byte, syscall.CmsgSpace(SizeofUcred)) - h := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - h.Level = syscall.SOL_SOCKET - h.Type = syscall.SCM_CREDS - h.SetLen(syscall.CmsgLen(SizeofUcred)) - *((*Ucred)(cmsgData(h))) = *ucred - return b -} - -// http://golang.org/src/pkg/syscall/sockcmsg_linux.go -// ParseUnixCredentials decodes a socket control message that contains -// credentials in a Ucred structure. To receive such a message, the -// SO_PASSCRED option must be enabled on the socket. -func ParseUnixCredentials(m *syscall.SocketControlMessage) (*Ucred, error) { - if m.Header.Level != syscall.SOL_SOCKET { - return nil, syscall.EINVAL - } - if m.Header.Type != syscall.SCM_CREDS { - return nil, syscall.EINVAL - } - ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) - return &ucred, nil -} - -func (t *unixTransport) SendNullByte() error { - ucred := &Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())} - b := UnixCredentials(ucred) - _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil) - if err != nil { - return err - } - if oobn != len(b) { - return io.ErrShortWrite - } - return nil -} diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_freebsd.go b/vendor/github.com/godbus/dbus/transport_unixcred_freebsd.go deleted file mode 100644 index 0fc5b927..00000000 --- a/vendor/github.com/godbus/dbus/transport_unixcred_freebsd.go +++ /dev/null @@ -1,91 +0,0 @@ -// The UnixCredentials system call is currently only implemented on Linux -// http://golang.org/src/pkg/syscall/sockcmsg_linux.go -// https://golang.org/s/go1.4-syscall -// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys - -// Local implementation of the UnixCredentials system call for FreeBSD - -package dbus - -/* -const int sizeofPtr = sizeof(void*); -#define _WANT_UCRED -#include -*/ -import "C" - -import ( - "io" - "os" - "syscall" - "unsafe" -) - -// http://golang.org/src/pkg/syscall/ztypes_linux_amd64.go -// https://golang.org/src/syscall/ztypes_freebsd_amd64.go -type Ucred struct { - Pid int32 - Uid uint32 - Gid uint32 -} - -// http://golang.org/src/pkg/syscall/types_linux.go -// https://golang.org/src/syscall/types_freebsd.go -// https://github.com/freebsd/freebsd/blob/master/sys/sys/ucred.h -const ( - SizeofUcred = C.sizeof_struct_ucred -) - -// http://golang.org/src/pkg/syscall/sockcmsg_unix.go -func cmsgAlignOf(salen int) int { - salign := C.sizeofPtr - - return (salen + salign - 1) & ^(salign - 1) -} - -// http://golang.org/src/pkg/syscall/sockcmsg_unix.go -func cmsgData(h *syscall.Cmsghdr) unsafe.Pointer { - return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(syscall.SizeofCmsghdr))) -} - -// http://golang.org/src/pkg/syscall/sockcmsg_linux.go -// UnixCredentials encodes credentials into a socket control message -// for sending to another process. This can be used for -// authentication. -func UnixCredentials(ucred *Ucred) []byte { - b := make([]byte, syscall.CmsgSpace(SizeofUcred)) - h := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0])) - h.Level = syscall.SOL_SOCKET - h.Type = syscall.SCM_CREDS - h.SetLen(syscall.CmsgLen(SizeofUcred)) - *((*Ucred)(cmsgData(h))) = *ucred - return b -} - -// http://golang.org/src/pkg/syscall/sockcmsg_linux.go -// ParseUnixCredentials decodes a socket control message that contains -// credentials in a Ucred structure. To receive such a message, the -// SO_PASSCRED option must be enabled on the socket. -func ParseUnixCredentials(m *syscall.SocketControlMessage) (*Ucred, error) { - if m.Header.Level != syscall.SOL_SOCKET { - return nil, syscall.EINVAL - } - if m.Header.Type != syscall.SCM_CREDS { - return nil, syscall.EINVAL - } - ucred := *(*Ucred)(unsafe.Pointer(&m.Data[0])) - return &ucred, nil -} - -func (t *unixTransport) SendNullByte() error { - ucred := &Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())} - b := UnixCredentials(ucred) - _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil) - if err != nil { - return err - } - if oobn != len(b) { - return io.ErrShortWrite - } - return nil -} diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_linux.go b/vendor/github.com/godbus/dbus/transport_unixcred_linux.go deleted file mode 100644 index d9dfdf69..00000000 --- a/vendor/github.com/godbus/dbus/transport_unixcred_linux.go +++ /dev/null @@ -1,25 +0,0 @@ -// The UnixCredentials system call is currently only implemented on Linux -// http://golang.org/src/pkg/syscall/sockcmsg_linux.go -// https://golang.org/s/go1.4-syscall -// http://code.google.com/p/go/source/browse/unix/sockcmsg_linux.go?repo=sys - -package dbus - -import ( - "io" - "os" - "syscall" -) - -func (t *unixTransport) SendNullByte() error { - ucred := &syscall.Ucred{Pid: int32(os.Getpid()), Uid: uint32(os.Getuid()), Gid: uint32(os.Getgid())} - b := syscall.UnixCredentials(ucred) - _, oobn, err := t.UnixConn.WriteMsgUnix([]byte{0}, b, nil) - if err != nil { - return err - } - if oobn != len(b) { - return io.ErrShortWrite - } - return nil -} diff --git a/vendor/github.com/godbus/dbus/transport_unixcred_openbsd.go b/vendor/github.com/godbus/dbus/transport_unixcred_openbsd.go deleted file mode 100644 index af7bafdf..00000000 --- a/vendor/github.com/godbus/dbus/transport_unixcred_openbsd.go +++ /dev/null @@ -1,14 +0,0 @@ -package dbus - -import "io" - -func (t *unixTransport) SendNullByte() error { - n, _, err := t.UnixConn.WriteMsgUnix([]byte{0}, nil, nil) - if err != nil { - return err - } - if n != 1 { - return io.ErrShortWrite - } - return nil -} diff --git a/vendor/github.com/godbus/dbus/variant.go b/vendor/github.com/godbus/dbus/variant.go deleted file mode 100644 index 0ca123b0..00000000 --- a/vendor/github.com/godbus/dbus/variant.go +++ /dev/null @@ -1,144 +0,0 @@ -package dbus - -import ( - "bytes" - "fmt" - "reflect" - "sort" - "strconv" -) - -// Variant represents the D-Bus variant type. -type Variant struct { - sig Signature - value interface{} -} - -// MakeVariant converts the given value to a Variant. It panics if v cannot be -// represented as a D-Bus type. -func MakeVariant(v interface{}) Variant { - return MakeVariantWithSignature(v, SignatureOf(v)) -} - -// MakeVariantWithSignature converts the given value to a Variant. -func MakeVariantWithSignature(v interface{}, s Signature) Variant { - return Variant{s, v} -} - -// ParseVariant parses the given string as a variant as described at -// https://developer.gnome.org/glib/unstable/gvariant-text.html. If sig is not -// empty, it is taken to be the expected signature for the variant. -func ParseVariant(s string, sig Signature) (Variant, error) { - tokens := varLex(s) - p := &varParser{tokens: tokens} - n, err := varMakeNode(p) - if err != nil { - return Variant{}, err - } - if sig.str == "" { - sig, err = varInfer(n) - if err != nil { - return Variant{}, err - } - } - v, err := n.Value(sig) - if err != nil { - return Variant{}, err - } - return MakeVariant(v), nil -} - -// format returns a formatted version of v and whether this string can be parsed -// unambigously. -func (v Variant) format() (string, bool) { - switch v.sig.str[0] { - case 'b', 'i': - return fmt.Sprint(v.value), true - case 'n', 'q', 'u', 'x', 't', 'd', 'h': - return fmt.Sprint(v.value), false - case 's': - return strconv.Quote(v.value.(string)), true - case 'o': - return strconv.Quote(string(v.value.(ObjectPath))), false - case 'g': - return strconv.Quote(v.value.(Signature).str), false - case 'v': - s, unamb := v.value.(Variant).format() - if !unamb { - return "<@" + v.value.(Variant).sig.str + " " + s + ">", true - } - return "<" + s + ">", true - case 'y': - return fmt.Sprintf("%#x", v.value.(byte)), false - } - rv := reflect.ValueOf(v.value) - switch rv.Kind() { - case reflect.Slice: - if rv.Len() == 0 { - return "[]", false - } - unamb := true - buf := bytes.NewBuffer([]byte("[")) - for i := 0; i < rv.Len(); i++ { - // TODO: slooow - s, b := MakeVariant(rv.Index(i).Interface()).format() - unamb = unamb && b - buf.WriteString(s) - if i != rv.Len()-1 { - buf.WriteString(", ") - } - } - buf.WriteByte(']') - return buf.String(), unamb - case reflect.Map: - if rv.Len() == 0 { - return "{}", false - } - unamb := true - var buf bytes.Buffer - kvs := make([]string, rv.Len()) - for i, k := range rv.MapKeys() { - s, b := MakeVariant(k.Interface()).format() - unamb = unamb && b - buf.Reset() - buf.WriteString(s) - buf.WriteString(": ") - s, b = MakeVariant(rv.MapIndex(k).Interface()).format() - unamb = unamb && b - buf.WriteString(s) - kvs[i] = buf.String() - } - buf.Reset() - buf.WriteByte('{') - sort.Strings(kvs) - for i, kv := range kvs { - if i > 0 { - buf.WriteString(", ") - } - buf.WriteString(kv) - } - buf.WriteByte('}') - return buf.String(), unamb - } - return `"INVALID"`, true -} - -// Signature returns the D-Bus signature of the underlying value of v. -func (v Variant) Signature() Signature { - return v.sig -} - -// String returns the string representation of the underlying value of v as -// described at https://developer.gnome.org/glib/unstable/gvariant-text.html. -func (v Variant) String() string { - s, unamb := v.format() - if !unamb { - return "@" + v.sig.str + " " + s - } - return s -} - -// Value returns the underlying value of v. -func (v Variant) Value() interface{} { - return v.value -} diff --git a/vendor/github.com/godbus/dbus/variant_lexer.go b/vendor/github.com/godbus/dbus/variant_lexer.go deleted file mode 100644 index 332007d6..00000000 --- a/vendor/github.com/godbus/dbus/variant_lexer.go +++ /dev/null @@ -1,284 +0,0 @@ -package dbus - -import ( - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -// Heavily inspired by the lexer from text/template. - -type varToken struct { - typ varTokenType - val string -} - -type varTokenType byte - -const ( - tokEOF varTokenType = iota - tokError - tokNumber - tokString - tokBool - tokArrayStart - tokArrayEnd - tokDictStart - tokDictEnd - tokVariantStart - tokVariantEnd - tokComma - tokColon - tokType - tokByteString -) - -type varLexer struct { - input string - start int - pos int - width int - tokens []varToken -} - -type lexState func(*varLexer) lexState - -func varLex(s string) []varToken { - l := &varLexer{input: s} - l.run() - return l.tokens -} - -func (l *varLexer) accept(valid string) bool { - if strings.IndexRune(valid, l.next()) >= 0 { - return true - } - l.backup() - return false -} - -func (l *varLexer) backup() { - l.pos -= l.width -} - -func (l *varLexer) emit(t varTokenType) { - l.tokens = append(l.tokens, varToken{t, l.input[l.start:l.pos]}) - l.start = l.pos -} - -func (l *varLexer) errorf(format string, v ...interface{}) lexState { - l.tokens = append(l.tokens, varToken{ - tokError, - fmt.Sprintf(format, v...), - }) - return nil -} - -func (l *varLexer) ignore() { - l.start = l.pos -} - -func (l *varLexer) next() rune { - var r rune - - if l.pos >= len(l.input) { - l.width = 0 - return -1 - } - r, l.width = utf8.DecodeRuneInString(l.input[l.pos:]) - l.pos += l.width - return r -} - -func (l *varLexer) run() { - for state := varLexNormal; state != nil; { - state = state(l) - } -} - -func (l *varLexer) peek() rune { - r := l.next() - l.backup() - return r -} - -func varLexNormal(l *varLexer) lexState { - for { - r := l.next() - switch { - case r == -1: - l.emit(tokEOF) - return nil - case r == '[': - l.emit(tokArrayStart) - case r == ']': - l.emit(tokArrayEnd) - case r == '{': - l.emit(tokDictStart) - case r == '}': - l.emit(tokDictEnd) - case r == '<': - l.emit(tokVariantStart) - case r == '>': - l.emit(tokVariantEnd) - case r == ':': - l.emit(tokColon) - case r == ',': - l.emit(tokComma) - case r == '\'' || r == '"': - l.backup() - return varLexString - case r == '@': - l.backup() - return varLexType - case unicode.IsSpace(r): - l.ignore() - case unicode.IsNumber(r) || r == '+' || r == '-': - l.backup() - return varLexNumber - case r == 'b': - pos := l.start - if n := l.peek(); n == '"' || n == '\'' { - return varLexByteString - } - // not a byte string; try to parse it as a type or bool below - l.pos = pos + 1 - l.width = 1 - fallthrough - default: - // either a bool or a type. Try bools first. - l.backup() - if l.pos+4 <= len(l.input) { - if l.input[l.pos:l.pos+4] == "true" { - l.pos += 4 - l.emit(tokBool) - continue - } - } - if l.pos+5 <= len(l.input) { - if l.input[l.pos:l.pos+5] == "false" { - l.pos += 5 - l.emit(tokBool) - continue - } - } - // must be a type. - return varLexType - } - } -} - -var varTypeMap = map[string]string{ - "boolean": "b", - "byte": "y", - "int16": "n", - "uint16": "q", - "int32": "i", - "uint32": "u", - "int64": "x", - "uint64": "t", - "double": "f", - "string": "s", - "objectpath": "o", - "signature": "g", -} - -func varLexByteString(l *varLexer) lexState { - q := l.next() -Loop: - for { - switch l.next() { - case '\\': - if r := l.next(); r != -1 { - break - } - fallthrough - case -1: - return l.errorf("unterminated bytestring") - case q: - break Loop - } - } - l.emit(tokByteString) - return varLexNormal -} - -func varLexNumber(l *varLexer) lexState { - l.accept("+-") - digits := "0123456789" - if l.accept("0") { - if l.accept("x") { - digits = "0123456789abcdefABCDEF" - } else { - digits = "01234567" - } - } - for strings.IndexRune(digits, l.next()) >= 0 { - } - l.backup() - if l.accept(".") { - for strings.IndexRune(digits, l.next()) >= 0 { - } - l.backup() - } - if l.accept("eE") { - l.accept("+-") - for strings.IndexRune("0123456789", l.next()) >= 0 { - } - l.backup() - } - if r := l.peek(); unicode.IsLetter(r) { - l.next() - return l.errorf("bad number syntax: %q", l.input[l.start:l.pos]) - } - l.emit(tokNumber) - return varLexNormal -} - -func varLexString(l *varLexer) lexState { - q := l.next() -Loop: - for { - switch l.next() { - case '\\': - if r := l.next(); r != -1 { - break - } - fallthrough - case -1: - return l.errorf("unterminated string") - case q: - break Loop - } - } - l.emit(tokString) - return varLexNormal -} - -func varLexType(l *varLexer) lexState { - at := l.accept("@") - for { - r := l.next() - if r == -1 { - break - } - if unicode.IsSpace(r) { - l.backup() - break - } - } - if at { - if _, err := ParseSignature(l.input[l.start+1 : l.pos]); err != nil { - return l.errorf("%s", err) - } - } else { - if _, ok := varTypeMap[l.input[l.start:l.pos]]; ok { - l.emit(tokType) - return varLexNormal - } - return l.errorf("unrecognized type %q", l.input[l.start:l.pos]) - } - l.emit(tokType) - return varLexNormal -} diff --git a/vendor/github.com/godbus/dbus/variant_parser.go b/vendor/github.com/godbus/dbus/variant_parser.go deleted file mode 100644 index d20f5da6..00000000 --- a/vendor/github.com/godbus/dbus/variant_parser.go +++ /dev/null @@ -1,817 +0,0 @@ -package dbus - -import ( - "bytes" - "errors" - "fmt" - "io" - "reflect" - "strconv" - "strings" - "unicode/utf8" -) - -type varParser struct { - tokens []varToken - i int -} - -func (p *varParser) backup() { - p.i-- -} - -func (p *varParser) next() varToken { - if p.i < len(p.tokens) { - t := p.tokens[p.i] - p.i++ - return t - } - return varToken{typ: tokEOF} -} - -type varNode interface { - Infer() (Signature, error) - String() string - Sigs() sigSet - Value(Signature) (interface{}, error) -} - -func varMakeNode(p *varParser) (varNode, error) { - var sig Signature - - for { - t := p.next() - switch t.typ { - case tokEOF: - return nil, io.ErrUnexpectedEOF - case tokError: - return nil, errors.New(t.val) - case tokNumber: - return varMakeNumNode(t, sig) - case tokString: - return varMakeStringNode(t, sig) - case tokBool: - if sig.str != "" && sig.str != "b" { - return nil, varTypeError{t.val, sig} - } - b, err := strconv.ParseBool(t.val) - if err != nil { - return nil, err - } - return boolNode(b), nil - case tokArrayStart: - return varMakeArrayNode(p, sig) - case tokVariantStart: - return varMakeVariantNode(p, sig) - case tokDictStart: - return varMakeDictNode(p, sig) - case tokType: - if sig.str != "" { - return nil, errors.New("unexpected type annotation") - } - if t.val[0] == '@' { - sig.str = t.val[1:] - } else { - sig.str = varTypeMap[t.val] - } - case tokByteString: - if sig.str != "" && sig.str != "ay" { - return nil, varTypeError{t.val, sig} - } - b, err := varParseByteString(t.val) - if err != nil { - return nil, err - } - return byteStringNode(b), nil - default: - return nil, fmt.Errorf("unexpected %q", t.val) - } - } -} - -type varTypeError struct { - val string - sig Signature -} - -func (e varTypeError) Error() string { - return fmt.Sprintf("dbus: can't parse %q as type %q", e.val, e.sig.str) -} - -type sigSet map[Signature]bool - -func (s sigSet) Empty() bool { - return len(s) == 0 -} - -func (s sigSet) Intersect(s2 sigSet) sigSet { - r := make(sigSet) - for k := range s { - if s2[k] { - r[k] = true - } - } - return r -} - -func (s sigSet) Single() (Signature, bool) { - if len(s) == 1 { - for k := range s { - return k, true - } - } - return Signature{}, false -} - -func (s sigSet) ToArray() sigSet { - r := make(sigSet, len(s)) - for k := range s { - r[Signature{"a" + k.str}] = true - } - return r -} - -type numNode struct { - sig Signature - str string - val interface{} -} - -var numSigSet = sigSet{ - Signature{"y"}: true, - Signature{"n"}: true, - Signature{"q"}: true, - Signature{"i"}: true, - Signature{"u"}: true, - Signature{"x"}: true, - Signature{"t"}: true, - Signature{"d"}: true, -} - -func (n numNode) Infer() (Signature, error) { - if strings.ContainsAny(n.str, ".e") { - return Signature{"d"}, nil - } - return Signature{"i"}, nil -} - -func (n numNode) String() string { - return n.str -} - -func (n numNode) Sigs() sigSet { - if n.sig.str != "" { - return sigSet{n.sig: true} - } - if strings.ContainsAny(n.str, ".e") { - return sigSet{Signature{"d"}: true} - } - return numSigSet -} - -func (n numNode) Value(sig Signature) (interface{}, error) { - if n.sig.str != "" && n.sig != sig { - return nil, varTypeError{n.str, sig} - } - if n.val != nil { - return n.val, nil - } - return varNumAs(n.str, sig) -} - -func varMakeNumNode(tok varToken, sig Signature) (varNode, error) { - if sig.str == "" { - return numNode{str: tok.val}, nil - } - num, err := varNumAs(tok.val, sig) - if err != nil { - return nil, err - } - return numNode{sig: sig, val: num}, nil -} - -func varNumAs(s string, sig Signature) (interface{}, error) { - isUnsigned := false - size := 32 - switch sig.str { - case "n": - size = 16 - case "i": - case "x": - size = 64 - case "y": - size = 8 - isUnsigned = true - case "q": - size = 16 - isUnsigned = true - case "u": - isUnsigned = true - case "t": - size = 64 - isUnsigned = true - case "d": - d, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, err - } - return d, nil - default: - return nil, varTypeError{s, sig} - } - base := 10 - if strings.HasPrefix(s, "0x") { - base = 16 - s = s[2:] - } - if strings.HasPrefix(s, "0") && len(s) != 1 { - base = 8 - s = s[1:] - } - if isUnsigned { - i, err := strconv.ParseUint(s, base, size) - if err != nil { - return nil, err - } - var v interface{} = i - switch sig.str { - case "y": - v = byte(i) - case "q": - v = uint16(i) - case "u": - v = uint32(i) - } - return v, nil - } - i, err := strconv.ParseInt(s, base, size) - if err != nil { - return nil, err - } - var v interface{} = i - switch sig.str { - case "n": - v = int16(i) - case "i": - v = int32(i) - } - return v, nil -} - -type stringNode struct { - sig Signature - str string // parsed - val interface{} // has correct type -} - -var stringSigSet = sigSet{ - Signature{"s"}: true, - Signature{"g"}: true, - Signature{"o"}: true, -} - -func (n stringNode) Infer() (Signature, error) { - return Signature{"s"}, nil -} - -func (n stringNode) String() string { - return n.str -} - -func (n stringNode) Sigs() sigSet { - if n.sig.str != "" { - return sigSet{n.sig: true} - } - return stringSigSet -} - -func (n stringNode) Value(sig Signature) (interface{}, error) { - if n.sig.str != "" && n.sig != sig { - return nil, varTypeError{n.str, sig} - } - if n.val != nil { - return n.val, nil - } - switch { - case sig.str == "g": - return Signature{n.str}, nil - case sig.str == "o": - return ObjectPath(n.str), nil - case sig.str == "s": - return n.str, nil - default: - return nil, varTypeError{n.str, sig} - } -} - -func varMakeStringNode(tok varToken, sig Signature) (varNode, error) { - if sig.str != "" && sig.str != "s" && sig.str != "g" && sig.str != "o" { - return nil, fmt.Errorf("invalid type %q for string", sig.str) - } - s, err := varParseString(tok.val) - if err != nil { - return nil, err - } - n := stringNode{str: s} - if sig.str == "" { - return stringNode{str: s}, nil - } - n.sig = sig - switch sig.str { - case "o": - n.val = ObjectPath(s) - case "g": - n.val = Signature{s} - case "s": - n.val = s - } - return n, nil -} - -func varParseString(s string) (string, error) { - // quotes are guaranteed to be there - s = s[1 : len(s)-1] - buf := new(bytes.Buffer) - for len(s) != 0 { - r, size := utf8.DecodeRuneInString(s) - if r == utf8.RuneError && size == 1 { - return "", errors.New("invalid UTF-8") - } - s = s[size:] - if r != '\\' { - buf.WriteRune(r) - continue - } - r, size = utf8.DecodeRuneInString(s) - if r == utf8.RuneError && size == 1 { - return "", errors.New("invalid UTF-8") - } - s = s[size:] - switch r { - case 'a': - buf.WriteRune(0x7) - case 'b': - buf.WriteRune(0x8) - case 'f': - buf.WriteRune(0xc) - case 'n': - buf.WriteRune('\n') - case 'r': - buf.WriteRune('\r') - case 't': - buf.WriteRune('\t') - case '\n': - case 'u': - if len(s) < 4 { - return "", errors.New("short unicode escape") - } - r, err := strconv.ParseUint(s[:4], 16, 32) - if err != nil { - return "", err - } - buf.WriteRune(rune(r)) - s = s[4:] - case 'U': - if len(s) < 8 { - return "", errors.New("short unicode escape") - } - r, err := strconv.ParseUint(s[:8], 16, 32) - if err != nil { - return "", err - } - buf.WriteRune(rune(r)) - s = s[8:] - default: - buf.WriteRune(r) - } - } - return buf.String(), nil -} - -var boolSigSet = sigSet{Signature{"b"}: true} - -type boolNode bool - -func (boolNode) Infer() (Signature, error) { - return Signature{"b"}, nil -} - -func (b boolNode) String() string { - if b { - return "true" - } - return "false" -} - -func (boolNode) Sigs() sigSet { - return boolSigSet -} - -func (b boolNode) Value(sig Signature) (interface{}, error) { - if sig.str != "b" { - return nil, varTypeError{b.String(), sig} - } - return bool(b), nil -} - -type arrayNode struct { - set sigSet - children []varNode - val interface{} -} - -func (n arrayNode) Infer() (Signature, error) { - for _, v := range n.children { - csig, err := varInfer(v) - if err != nil { - continue - } - return Signature{"a" + csig.str}, nil - } - return Signature{}, fmt.Errorf("can't infer type for %q", n.String()) -} - -func (n arrayNode) String() string { - s := "[" - for i, v := range n.children { - s += v.String() - if i != len(n.children)-1 { - s += ", " - } - } - return s + "]" -} - -func (n arrayNode) Sigs() sigSet { - return n.set -} - -func (n arrayNode) Value(sig Signature) (interface{}, error) { - if n.set.Empty() { - // no type information whatsoever, so this must be an empty slice - return reflect.MakeSlice(typeFor(sig.str), 0, 0).Interface(), nil - } - if !n.set[sig] { - return nil, varTypeError{n.String(), sig} - } - s := reflect.MakeSlice(typeFor(sig.str), len(n.children), len(n.children)) - for i, v := range n.children { - rv, err := v.Value(Signature{sig.str[1:]}) - if err != nil { - return nil, err - } - s.Index(i).Set(reflect.ValueOf(rv)) - } - return s.Interface(), nil -} - -func varMakeArrayNode(p *varParser, sig Signature) (varNode, error) { - var n arrayNode - if sig.str != "" { - n.set = sigSet{sig: true} - } - if t := p.next(); t.typ == tokArrayEnd { - return n, nil - } else { - p.backup() - } -Loop: - for { - t := p.next() - switch t.typ { - case tokEOF: - return nil, io.ErrUnexpectedEOF - case tokError: - return nil, errors.New(t.val) - } - p.backup() - cn, err := varMakeNode(p) - if err != nil { - return nil, err - } - if cset := cn.Sigs(); !cset.Empty() { - if n.set.Empty() { - n.set = cset.ToArray() - } else { - nset := cset.ToArray().Intersect(n.set) - if nset.Empty() { - return nil, fmt.Errorf("can't parse %q with given type information", cn.String()) - } - n.set = nset - } - } - n.children = append(n.children, cn) - switch t := p.next(); t.typ { - case tokEOF: - return nil, io.ErrUnexpectedEOF - case tokError: - return nil, errors.New(t.val) - case tokArrayEnd: - break Loop - case tokComma: - continue - default: - return nil, fmt.Errorf("unexpected %q", t.val) - } - } - return n, nil -} - -type variantNode struct { - n varNode -} - -var variantSet = sigSet{ - Signature{"v"}: true, -} - -func (variantNode) Infer() (Signature, error) { - return Signature{"v"}, nil -} - -func (n variantNode) String() string { - return "<" + n.n.String() + ">" -} - -func (variantNode) Sigs() sigSet { - return variantSet -} - -func (n variantNode) Value(sig Signature) (interface{}, error) { - if sig.str != "v" { - return nil, varTypeError{n.String(), sig} - } - sig, err := varInfer(n.n) - if err != nil { - return nil, err - } - v, err := n.n.Value(sig) - if err != nil { - return nil, err - } - return MakeVariant(v), nil -} - -func varMakeVariantNode(p *varParser, sig Signature) (varNode, error) { - n, err := varMakeNode(p) - if err != nil { - return nil, err - } - if t := p.next(); t.typ != tokVariantEnd { - return nil, fmt.Errorf("unexpected %q", t.val) - } - vn := variantNode{n} - if sig.str != "" && sig.str != "v" { - return nil, varTypeError{vn.String(), sig} - } - return variantNode{n}, nil -} - -type dictEntry struct { - key, val varNode -} - -type dictNode struct { - kset, vset sigSet - children []dictEntry - val interface{} -} - -func (n dictNode) Infer() (Signature, error) { - for _, v := range n.children { - ksig, err := varInfer(v.key) - if err != nil { - continue - } - vsig, err := varInfer(v.val) - if err != nil { - continue - } - return Signature{"a{" + ksig.str + vsig.str + "}"}, nil - } - return Signature{}, fmt.Errorf("can't infer type for %q", n.String()) -} - -func (n dictNode) String() string { - s := "{" - for i, v := range n.children { - s += v.key.String() + ": " + v.val.String() - if i != len(n.children)-1 { - s += ", " - } - } - return s + "}" -} - -func (n dictNode) Sigs() sigSet { - r := sigSet{} - for k := range n.kset { - for v := range n.vset { - sig := "a{" + k.str + v.str + "}" - r[Signature{sig}] = true - } - } - return r -} - -func (n dictNode) Value(sig Signature) (interface{}, error) { - set := n.Sigs() - if set.Empty() { - // no type information -> empty dict - return reflect.MakeMap(typeFor(sig.str)).Interface(), nil - } - if !set[sig] { - return nil, varTypeError{n.String(), sig} - } - m := reflect.MakeMap(typeFor(sig.str)) - ksig := Signature{sig.str[2:3]} - vsig := Signature{sig.str[3 : len(sig.str)-1]} - for _, v := range n.children { - kv, err := v.key.Value(ksig) - if err != nil { - return nil, err - } - vv, err := v.val.Value(vsig) - if err != nil { - return nil, err - } - m.SetMapIndex(reflect.ValueOf(kv), reflect.ValueOf(vv)) - } - return m.Interface(), nil -} - -func varMakeDictNode(p *varParser, sig Signature) (varNode, error) { - var n dictNode - - if sig.str != "" { - if len(sig.str) < 5 { - return nil, fmt.Errorf("invalid signature %q for dict type", sig) - } - ksig := Signature{string(sig.str[2])} - vsig := Signature{sig.str[3 : len(sig.str)-1]} - n.kset = sigSet{ksig: true} - n.vset = sigSet{vsig: true} - } - if t := p.next(); t.typ == tokDictEnd { - return n, nil - } else { - p.backup() - } -Loop: - for { - t := p.next() - switch t.typ { - case tokEOF: - return nil, io.ErrUnexpectedEOF - case tokError: - return nil, errors.New(t.val) - } - p.backup() - kn, err := varMakeNode(p) - if err != nil { - return nil, err - } - if kset := kn.Sigs(); !kset.Empty() { - if n.kset.Empty() { - n.kset = kset - } else { - n.kset = kset.Intersect(n.kset) - if n.kset.Empty() { - return nil, fmt.Errorf("can't parse %q with given type information", kn.String()) - } - } - } - t = p.next() - switch t.typ { - case tokEOF: - return nil, io.ErrUnexpectedEOF - case tokError: - return nil, errors.New(t.val) - case tokColon: - default: - return nil, fmt.Errorf("unexpected %q", t.val) - } - t = p.next() - switch t.typ { - case tokEOF: - return nil, io.ErrUnexpectedEOF - case tokError: - return nil, errors.New(t.val) - } - p.backup() - vn, err := varMakeNode(p) - if err != nil { - return nil, err - } - if vset := vn.Sigs(); !vset.Empty() { - if n.vset.Empty() { - n.vset = vset - } else { - n.vset = n.vset.Intersect(vset) - if n.vset.Empty() { - return nil, fmt.Errorf("can't parse %q with given type information", vn.String()) - } - } - } - n.children = append(n.children, dictEntry{kn, vn}) - t = p.next() - switch t.typ { - case tokEOF: - return nil, io.ErrUnexpectedEOF - case tokError: - return nil, errors.New(t.val) - case tokDictEnd: - break Loop - case tokComma: - continue - default: - return nil, fmt.Errorf("unexpected %q", t.val) - } - } - return n, nil -} - -type byteStringNode []byte - -var byteStringSet = sigSet{ - Signature{"ay"}: true, -} - -func (byteStringNode) Infer() (Signature, error) { - return Signature{"ay"}, nil -} - -func (b byteStringNode) String() string { - return string(b) -} - -func (b byteStringNode) Sigs() sigSet { - return byteStringSet -} - -func (b byteStringNode) Value(sig Signature) (interface{}, error) { - if sig.str != "ay" { - return nil, varTypeError{b.String(), sig} - } - return []byte(b), nil -} - -func varParseByteString(s string) ([]byte, error) { - // quotes and b at start are guaranteed to be there - b := make([]byte, 0, 1) - s = s[2 : len(s)-1] - for len(s) != 0 { - c := s[0] - s = s[1:] - if c != '\\' { - b = append(b, c) - continue - } - c = s[0] - s = s[1:] - switch c { - case 'a': - b = append(b, 0x7) - case 'b': - b = append(b, 0x8) - case 'f': - b = append(b, 0xc) - case 'n': - b = append(b, '\n') - case 'r': - b = append(b, '\r') - case 't': - b = append(b, '\t') - case 'x': - if len(s) < 2 { - return nil, errors.New("short escape") - } - n, err := strconv.ParseUint(s[:2], 16, 8) - if err != nil { - return nil, err - } - b = append(b, byte(n)) - s = s[2:] - case '0': - if len(s) < 3 { - return nil, errors.New("short escape") - } - n, err := strconv.ParseUint(s[:3], 8, 8) - if err != nil { - return nil, err - } - b = append(b, byte(n)) - s = s[3:] - default: - b = append(b, c) - } - } - return append(b, 0), nil -} - -func varInfer(n varNode) (Signature, error) { - if sig, ok := n.Sigs().Single(); ok { - return sig, nil - } - return n.Infer() -}