2016-05-16 18:50:55 +03:00
|
|
|
package plugin
|
|
|
|
|
|
|
|
import (
|
2016-12-13 02:05:53 +03:00
|
|
|
"archive/tar"
|
|
|
|
"compress/gzip"
|
2016-08-11 02:48:17 +03:00
|
|
|
"encoding/json"
|
2016-10-04 22:01:19 +03:00
|
|
|
"io"
|
2016-08-11 02:48:17 +03:00
|
|
|
"io/ioutil"
|
2016-05-16 18:50:55 +03:00
|
|
|
"net/http"
|
|
|
|
"os"
|
2016-12-13 02:05:53 +03:00
|
|
|
"path"
|
2016-05-16 18:50:55 +03:00
|
|
|
"path/filepath"
|
2016-12-13 02:05:53 +03:00
|
|
|
"strings"
|
2016-05-16 18:50:55 +03:00
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
"github.com/docker/distribution/manifest/schema2"
|
2017-01-26 03:54:18 +03:00
|
|
|
"github.com/docker/distribution/reference"
|
2016-09-06 21:18:12 +03:00
|
|
|
"github.com/docker/docker/api/types"
|
2016-11-23 15:58:15 +03:00
|
|
|
"github.com/docker/docker/api/types/filters"
|
2016-12-13 02:05:53 +03:00
|
|
|
"github.com/docker/docker/distribution"
|
|
|
|
progressutils "github.com/docker/docker/distribution/utils"
|
|
|
|
"github.com/docker/docker/distribution/xfer"
|
Embed DockerVersion in plugin config.
Embedding DockerVersion in plugin config when the plugin is created,
enables users to do a docker plugin inspect and know which version
the plugin was built on. This is helpful in cases where users are
running a new plugin on older docker releases and confused at
unexpected behavior.
By embedding DockerVersion in the config, we claim that there's no
guarantee that if the plugin config's DockerVersion is greater that
the version of the docker engine the plugin is executed against, the
plugin will work as expected.
For example, lets say:
- in 17.03, a plugin was released as johndoe/foo:v1
- in 17.05, the plugin uses the new ipchost config setting and author
publishes johndoe/foo:v2
In this case, johndoe/foo:v2 was built on 17.05 using ipchost, but is
running on docker-engine version 17.03. Since 17.05 > 17.03, there's
no guarantee that the plugin will work as expected. Ofcourse, if the
plugin did not use newly added config settings (ipchost in this case)
in 17.05, it would work fine in 17.03.
Signed-off-by: Anusha Ragunathan <anusha.ragunathan@docker.com>
2017-03-22 00:07:41 +03:00
|
|
|
"github.com/docker/docker/dockerversion"
|
2016-12-13 02:05:53 +03:00
|
|
|
"github.com/docker/docker/image"
|
|
|
|
"github.com/docker/docker/layer"
|
2017-03-18 00:57:23 +03:00
|
|
|
"github.com/docker/docker/pkg/authorization"
|
2016-10-04 22:01:19 +03:00
|
|
|
"github.com/docker/docker/pkg/chrootarchive"
|
2017-02-03 07:08:35 +03:00
|
|
|
"github.com/docker/docker/pkg/mount"
|
2016-12-13 02:05:53 +03:00
|
|
|
"github.com/docker/docker/pkg/pools"
|
|
|
|
"github.com/docker/docker/pkg/progress"
|
2017-06-26 21:54:14 +03:00
|
|
|
"github.com/docker/docker/pkg/system"
|
2016-08-26 20:02:38 +03:00
|
|
|
"github.com/docker/docker/plugin/v2"
|
2017-01-26 03:54:18 +03:00
|
|
|
refstore "github.com/docker/docker/reference"
|
2017-08-22 00:51:45 +03:00
|
|
|
digest "github.com/opencontainers/go-digest"
|
2016-12-13 05:18:17 +03:00
|
|
|
"github.com/pkg/errors"
|
2017-07-27 00:42:13 +03:00
|
|
|
"github.com/sirupsen/logrus"
|
2016-10-04 22:01:19 +03:00
|
|
|
"golang.org/x/net/context"
|
2016-05-16 18:50:55 +03:00
|
|
|
)
|
|
|
|
|
2016-11-23 15:58:15 +03:00
|
|
|
var acceptedPluginFilterTags = map[string]bool{
|
2016-11-23 16:27:09 +03:00
|
|
|
"enabled": true,
|
|
|
|
"capability": true,
|
2016-11-23 15:58:15 +03:00
|
|
|
}
|
|
|
|
|
2016-12-20 19:26:58 +03:00
|
|
|
// Disable deactivates a plugin. This means resources (volumes, networks) cant use them.
|
2016-12-13 02:05:53 +03:00
|
|
|
func (pm *Manager) Disable(refOrID string, config *types.PluginDisableConfig) error {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(refOrID)
|
2016-05-16 18:50:55 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-01 22:36:56 +03:00
|
|
|
pm.mu.RLock()
|
|
|
|
c := pm.cMap[p]
|
|
|
|
pm.mu.RUnlock()
|
|
|
|
|
2016-12-20 19:26:58 +03:00
|
|
|
if !config.ForceDisable && p.GetRefCount() > 0 {
|
2017-07-19 17:20:13 +03:00
|
|
|
return errors.WithStack(inUseError(p.Name()))
|
2016-12-20 19:26:58 +03:00
|
|
|
}
|
|
|
|
|
2017-03-18 00:57:23 +03:00
|
|
|
for _, typ := range p.GetTypes() {
|
|
|
|
if typ.Capability == authorization.AuthZApiImplements {
|
2017-06-13 13:52:04 +03:00
|
|
|
pm.config.AuthzMiddleware.RemovePlugin(p.Name())
|
2017-03-18 00:57:23 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-01 22:36:56 +03:00
|
|
|
if err := pm.disable(p, c); err != nil {
|
2016-07-18 18:02:12 +03:00
|
|
|
return err
|
|
|
|
}
|
2017-06-07 20:07:01 +03:00
|
|
|
pm.publisher.Publish(EventDisable{Plugin: p.PluginObj})
|
2016-12-13 02:05:53 +03:00
|
|
|
pm.config.LogPluginEvent(p.GetID(), refOrID, "disable")
|
2016-07-18 18:02:12 +03:00
|
|
|
return nil
|
2016-05-16 18:50:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Enable activates a plugin, which implies that they are ready to be used by containers.
|
2016-12-13 02:05:53 +03:00
|
|
|
func (pm *Manager) Enable(refOrID string, config *types.PluginEnableConfig) error {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(refOrID)
|
2016-05-16 18:50:55 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-11-21 20:24:01 +03:00
|
|
|
|
2016-12-01 22:36:56 +03:00
|
|
|
c := &controller{timeoutInSecs: config.Timeout}
|
|
|
|
if err := pm.enable(p, c, false); err != nil {
|
2016-07-18 18:02:12 +03:00
|
|
|
return err
|
|
|
|
}
|
2017-06-07 20:07:01 +03:00
|
|
|
pm.publisher.Publish(EventEnable{Plugin: p.PluginObj})
|
2016-12-13 02:05:53 +03:00
|
|
|
pm.config.LogPluginEvent(p.GetID(), refOrID, "enable")
|
2016-07-18 18:02:12 +03:00
|
|
|
return nil
|
2016-05-16 18:50:55 +03:00
|
|
|
}
|
|
|
|
|
2016-11-08 05:51:47 +03:00
|
|
|
// Inspect examines a plugin config
|
2016-12-13 02:05:53 +03:00
|
|
|
func (pm *Manager) Inspect(refOrID string) (tp *types.Plugin, err error) {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(refOrID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2016-11-24 07:04:44 +03:00
|
|
|
}
|
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
return &p.PluginObj, nil
|
|
|
|
}
|
2016-11-24 07:04:44 +03:00
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
func (pm *Manager) pull(ctx context.Context, ref reference.Named, config *distribution.ImagePullConfig, outStream io.Writer) error {
|
|
|
|
if outStream != nil {
|
|
|
|
// Include a buffer so that slow client connections don't affect
|
|
|
|
// transfer performance.
|
|
|
|
progressChan := make(chan progress.Progress, 100)
|
|
|
|
|
|
|
|
writesDone := make(chan struct{})
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
close(progressChan)
|
|
|
|
<-writesDone
|
|
|
|
}()
|
|
|
|
|
|
|
|
var cancelFunc context.CancelFunc
|
|
|
|
ctx, cancelFunc = context.WithCancel(ctx)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
|
|
|
|
close(writesDone)
|
|
|
|
}()
|
|
|
|
|
|
|
|
config.ProgressOutput = progress.ChanOutput(progressChan)
|
|
|
|
} else {
|
|
|
|
config.ProgressOutput = progress.DiscardOutput()
|
2016-05-16 18:50:55 +03:00
|
|
|
}
|
2016-12-13 02:05:53 +03:00
|
|
|
return distribution.Pull(ctx, ref, config)
|
|
|
|
}
|
2016-11-24 07:04:44 +03:00
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
type tempConfigStore struct {
|
|
|
|
config []byte
|
|
|
|
configDigest digest.Digest
|
2016-05-16 18:50:55 +03:00
|
|
|
}
|
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
func (s *tempConfigStore) Put(c []byte) (digest.Digest, error) {
|
|
|
|
dgst := digest.FromBytes(c)
|
2016-11-24 04:29:21 +03:00
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
s.config = c
|
|
|
|
s.configDigest = dgst
|
2016-11-24 04:29:21 +03:00
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
return dgst, nil
|
2016-11-24 04:29:21 +03:00
|
|
|
}
|
2016-11-28 22:08:39 +03:00
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
func (s *tempConfigStore) Get(d digest.Digest) ([]byte, error) {
|
|
|
|
if d != s.configDigest {
|
2017-07-19 17:20:13 +03:00
|
|
|
return nil, errNotFound("digest not found")
|
2016-11-28 22:08:39 +03:00
|
|
|
}
|
2016-12-13 02:05:53 +03:00
|
|
|
return s.config, nil
|
|
|
|
}
|
2016-11-28 22:08:39 +03:00
|
|
|
|
2017-08-08 22:43:48 +03:00
|
|
|
func (s *tempConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, layer.OS, error) {
|
2016-12-13 02:05:53 +03:00
|
|
|
return configToRootFS(c)
|
|
|
|
}
|
2016-11-28 22:08:39 +03:00
|
|
|
|
2017-07-19 17:20:13 +03:00
|
|
|
func computePrivileges(c types.PluginConfig) types.PluginPrivileges {
|
2016-11-24 04:29:21 +03:00
|
|
|
var privileges types.PluginPrivileges
|
2016-12-14 04:46:01 +03:00
|
|
|
if c.Network.Type != "null" && c.Network.Type != "bridge" && c.Network.Type != "" {
|
2016-11-24 04:29:21 +03:00
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "network",
|
|
|
|
Description: "permissions to access a network",
|
|
|
|
Value: []string{c.Network.Type},
|
|
|
|
})
|
|
|
|
}
|
2017-03-08 05:26:09 +03:00
|
|
|
if c.IpcHost {
|
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "host ipc namespace",
|
|
|
|
Description: "allow access to host ipc namespace",
|
|
|
|
Value: []string{"true"},
|
|
|
|
})
|
|
|
|
}
|
2017-03-11 01:17:24 +03:00
|
|
|
if c.PidHost {
|
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "host pid namespace",
|
|
|
|
Description: "allow access to host pid namespace",
|
|
|
|
Value: []string{"true"},
|
|
|
|
})
|
|
|
|
}
|
2016-11-24 04:29:21 +03:00
|
|
|
for _, mount := range c.Mounts {
|
|
|
|
if mount.Source != nil {
|
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "mount",
|
|
|
|
Description: "host path to mount",
|
|
|
|
Value: []string{*mount.Source},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, device := range c.Linux.Devices {
|
|
|
|
if device.Path != nil {
|
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "device",
|
|
|
|
Description: "host device to access",
|
|
|
|
Value: []string{*device.Path},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2017-01-10 22:00:57 +03:00
|
|
|
if c.Linux.AllowAllDevices {
|
2016-11-24 04:29:21 +03:00
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
2017-01-10 22:00:57 +03:00
|
|
|
Name: "allow-all-devices",
|
|
|
|
Description: "allow 'rwm' access to all devices",
|
2016-11-24 04:29:21 +03:00
|
|
|
Value: []string{"true"},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
if len(c.Linux.Capabilities) > 0 {
|
|
|
|
privileges = append(privileges, types.PluginPrivilege{
|
|
|
|
Name: "capabilities",
|
|
|
|
Description: "list of additional capabilities required",
|
|
|
|
Value: c.Linux.Capabilities,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-07-19 17:20:13 +03:00
|
|
|
return privileges
|
2016-11-28 22:08:39 +03:00
|
|
|
}
|
|
|
|
|
2016-11-24 04:29:21 +03:00
|
|
|
// Privileges pulls a plugin config and computes the privileges required to install it.
|
2016-12-13 02:05:53 +03:00
|
|
|
func (pm *Manager) Privileges(ctx context.Context, ref reference.Named, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) {
|
|
|
|
// create image store instance
|
|
|
|
cs := &tempConfigStore{}
|
|
|
|
|
|
|
|
// DownloadManager not defined because only pulling configuration.
|
|
|
|
pluginPullConfig := &distribution.ImagePullConfig{
|
|
|
|
Config: distribution.Config{
|
|
|
|
MetaHeaders: metaHeader,
|
|
|
|
AuthConfig: authConfig,
|
|
|
|
RegistryService: pm.config.RegistryService,
|
|
|
|
ImageEventLogger: func(string, string, string) {},
|
|
|
|
ImageStore: cs,
|
|
|
|
},
|
|
|
|
Schema2Types: distribution.PluginTypes,
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := pm.pull(ctx, ref, pluginPullConfig, nil); err != nil {
|
2016-05-16 18:50:55 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
2016-12-13 02:05:53 +03:00
|
|
|
|
|
|
|
if cs.config == nil {
|
|
|
|
return nil, errors.New("no configuration pulled")
|
|
|
|
}
|
|
|
|
var config types.PluginConfig
|
|
|
|
if err := json.Unmarshal(cs.config, &config); err != nil {
|
2017-07-19 17:20:13 +03:00
|
|
|
return nil, systemError{err}
|
2016-12-13 02:05:53 +03:00
|
|
|
}
|
|
|
|
|
2017-07-19 17:20:13 +03:00
|
|
|
return computePrivileges(config), nil
|
2016-11-24 04:29:21 +03:00
|
|
|
}
|
2016-05-16 18:50:55 +03:00
|
|
|
|
2017-01-29 03:54:32 +03:00
|
|
|
// Upgrade upgrades a plugin
|
|
|
|
func (pm *Manager) Upgrade(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer) (err error) {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(name)
|
|
|
|
if err != nil {
|
2017-07-19 17:20:13 +03:00
|
|
|
return err
|
2017-01-29 03:54:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if p.IsEnabled() {
|
2017-07-19 17:20:13 +03:00
|
|
|
return errors.Wrap(enabledError(p.Name()), "plugin must be disabled before upgrading")
|
2017-01-29 03:54:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
pm.muGC.RLock()
|
|
|
|
defer pm.muGC.RUnlock()
|
|
|
|
|
|
|
|
// revalidate because Pull is public
|
2017-04-25 10:13:48 +03:00
|
|
|
if _, err := reference.ParseNormalizedNamed(name); err != nil {
|
2017-07-19 17:20:13 +03:00
|
|
|
return errors.Wrapf(validationError{err}, "failed to parse %q", name)
|
2017-01-29 03:54:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs")
|
2017-02-09 11:58:58 +03:00
|
|
|
if err != nil {
|
2017-07-19 17:20:13 +03:00
|
|
|
return errors.Wrap(systemError{err}, "error preparing upgrade")
|
2017-02-09 11:58:58 +03:00
|
|
|
}
|
2017-01-29 03:54:32 +03:00
|
|
|
defer os.RemoveAll(tmpRootFSDir)
|
|
|
|
|
|
|
|
dm := &downloadManager{
|
|
|
|
tmpDir: tmpRootFSDir,
|
|
|
|
blobStore: pm.blobStore,
|
|
|
|
}
|
|
|
|
|
|
|
|
pluginPullConfig := &distribution.ImagePullConfig{
|
|
|
|
Config: distribution.Config{
|
|
|
|
MetaHeaders: metaHeader,
|
|
|
|
AuthConfig: authConfig,
|
|
|
|
RegistryService: pm.config.RegistryService,
|
|
|
|
ImageEventLogger: pm.config.LogPluginEvent,
|
|
|
|
ImageStore: dm,
|
|
|
|
},
|
|
|
|
DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead
|
|
|
|
Schema2Types: distribution.PluginTypes,
|
|
|
|
}
|
|
|
|
|
|
|
|
err = pm.pull(ctx, ref, pluginPullConfig, outStream)
|
|
|
|
if err != nil {
|
|
|
|
go pm.GC()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := pm.upgradePlugin(p, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
p.PluginObj.PluginReference = ref.String()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-11-24 04:29:21 +03:00
|
|
|
// Pull pulls a plugin, check if the correct privileges are provided and install the plugin.
|
2017-06-07 20:07:01 +03:00
|
|
|
func (pm *Manager) Pull(ctx context.Context, ref reference.Named, name string, metaHeader http.Header, authConfig *types.AuthConfig, privileges types.PluginPrivileges, outStream io.Writer, opts ...CreateOpt) (err error) {
|
2016-12-13 02:05:53 +03:00
|
|
|
pm.muGC.RLock()
|
|
|
|
defer pm.muGC.RUnlock()
|
|
|
|
|
|
|
|
// revalidate because Pull is public
|
2017-01-26 03:54:18 +03:00
|
|
|
nameref, err := reference.ParseNormalizedNamed(name)
|
2016-11-24 04:29:21 +03:00
|
|
|
if err != nil {
|
2017-07-19 17:20:13 +03:00
|
|
|
return errors.Wrapf(validationError{err}, "failed to parse %q", name)
|
2016-11-24 04:29:21 +03:00
|
|
|
}
|
2017-01-26 03:54:18 +03:00
|
|
|
name = reference.FamiliarString(reference.TagNameOnly(nameref))
|
2016-11-24 04:29:21 +03:00
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
if err := pm.config.Store.validateName(name); err != nil {
|
2017-07-19 17:20:13 +03:00
|
|
|
return validationError{err}
|
2016-11-24 04:29:21 +03:00
|
|
|
}
|
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs")
|
2017-02-09 11:58:58 +03:00
|
|
|
if err != nil {
|
2017-07-19 17:20:13 +03:00
|
|
|
return errors.Wrap(systemError{err}, "error preparing pull")
|
2017-02-09 11:58:58 +03:00
|
|
|
}
|
2016-12-13 02:05:53 +03:00
|
|
|
defer os.RemoveAll(tmpRootFSDir)
|
2016-05-16 18:50:55 +03:00
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
dm := &downloadManager{
|
|
|
|
tmpDir: tmpRootFSDir,
|
|
|
|
blobStore: pm.blobStore,
|
2016-05-16 18:50:55 +03:00
|
|
|
}
|
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
pluginPullConfig := &distribution.ImagePullConfig{
|
|
|
|
Config: distribution.Config{
|
|
|
|
MetaHeaders: metaHeader,
|
|
|
|
AuthConfig: authConfig,
|
|
|
|
RegistryService: pm.config.RegistryService,
|
|
|
|
ImageEventLogger: pm.config.LogPluginEvent,
|
|
|
|
ImageStore: dm,
|
|
|
|
},
|
|
|
|
DownloadManager: dm, // todo: reevaluate if possible to substitute distribution/xfer dependencies instead
|
|
|
|
Schema2Types: distribution.PluginTypes,
|
|
|
|
}
|
2016-11-24 04:29:21 +03:00
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
err = pm.pull(ctx, ref, pluginPullConfig, outStream)
|
2016-11-24 04:29:21 +03:00
|
|
|
if err != nil {
|
2016-12-13 02:05:53 +03:00
|
|
|
go pm.GC()
|
2016-11-24 04:29:21 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-07 20:07:01 +03:00
|
|
|
refOpt := func(p *v2.Plugin) {
|
|
|
|
p.PluginObj.PluginReference = ref.String()
|
|
|
|
}
|
|
|
|
optsList := make([]CreateOpt, 0, len(opts)+1)
|
|
|
|
optsList = append(optsList, opts...)
|
|
|
|
optsList = append(optsList, refOpt)
|
|
|
|
|
|
|
|
p, err := pm.createPlugin(name, dm.configDigest, dm.blobs, tmpRootFSDir, &privileges, optsList...)
|
2017-01-29 03:54:32 +03:00
|
|
|
if err != nil {
|
2016-11-24 04:29:21 +03:00
|
|
|
return err
|
2016-05-16 18:50:55 +03:00
|
|
|
}
|
2016-11-24 04:29:21 +03:00
|
|
|
|
2017-06-07 20:07:01 +03:00
|
|
|
pm.publisher.Publish(EventCreate{Plugin: p.PluginObj})
|
2016-11-24 04:29:21 +03:00
|
|
|
return nil
|
2016-05-16 18:50:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// List displays the list of plugins and associated metadata.
|
2016-11-23 15:58:15 +03:00
|
|
|
func (pm *Manager) List(pluginFilters filters.Args) ([]types.Plugin, error) {
|
|
|
|
if err := pluginFilters.Validate(acceptedPluginFilterTags); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
enabledOnly := false
|
|
|
|
disabledOnly := false
|
2017-09-26 14:39:56 +03:00
|
|
|
if pluginFilters.Contains("enabled") {
|
2016-11-23 15:58:15 +03:00
|
|
|
if pluginFilters.ExactMatch("enabled", "true") {
|
|
|
|
enabledOnly = true
|
|
|
|
} else if pluginFilters.ExactMatch("enabled", "false") {
|
|
|
|
disabledOnly = true
|
|
|
|
} else {
|
2017-07-19 17:20:13 +03:00
|
|
|
return nil, invalidFilter{"enabled", pluginFilters.Get("enabled")}
|
2016-11-23 15:58:15 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
plugins := pm.config.Store.GetAll()
|
2016-08-26 20:02:38 +03:00
|
|
|
out := make([]types.Plugin, 0, len(plugins))
|
2016-11-23 15:58:15 +03:00
|
|
|
|
2016-11-23 16:27:09 +03:00
|
|
|
next:
|
2016-08-26 20:02:38 +03:00
|
|
|
for _, p := range plugins {
|
2016-11-23 15:58:15 +03:00
|
|
|
if enabledOnly && !p.PluginObj.Enabled {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if disabledOnly && p.PluginObj.Enabled {
|
|
|
|
continue
|
|
|
|
}
|
2017-09-26 14:39:56 +03:00
|
|
|
if pluginFilters.Contains("capability") {
|
2016-11-23 16:27:09 +03:00
|
|
|
for _, f := range p.GetTypes() {
|
|
|
|
if !pluginFilters.Match("capability", f.Capability) {
|
|
|
|
continue next
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-07-18 18:02:12 +03:00
|
|
|
out = append(out, p.PluginObj)
|
2016-05-16 18:50:55 +03:00
|
|
|
}
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Push pushes a plugin to the store.
|
2016-12-13 02:05:53 +03:00
|
|
|
func (pm *Manager) Push(ctx context.Context, name string, metaHeader http.Header, authConfig *types.AuthConfig, outStream io.Writer) error {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(name)
|
2016-06-27 18:41:53 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-13 02:05:53 +03:00
|
|
|
|
2017-01-26 03:54:18 +03:00
|
|
|
ref, err := reference.ParseNormalizedNamed(p.Name())
|
2016-08-11 02:48:17 +03:00
|
|
|
if err != nil {
|
2016-12-13 02:05:53 +03:00
|
|
|
return errors.Wrapf(err, "plugin has invalid name %v for push", p.Name())
|
2016-08-11 02:48:17 +03:00
|
|
|
}
|
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
var po progress.Output
|
|
|
|
if outStream != nil {
|
|
|
|
// Include a buffer so that slow client connections don't affect
|
|
|
|
// transfer performance.
|
|
|
|
progressChan := make(chan progress.Progress, 100)
|
|
|
|
|
|
|
|
writesDone := make(chan struct{})
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
close(progressChan)
|
|
|
|
<-writesDone
|
|
|
|
}()
|
|
|
|
|
|
|
|
var cancelFunc context.CancelFunc
|
|
|
|
ctx, cancelFunc = context.WithCancel(ctx)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
progressutils.WriteDistributionProgress(cancelFunc, outStream, progressChan)
|
|
|
|
close(writesDone)
|
|
|
|
}()
|
|
|
|
|
|
|
|
po = progress.ChanOutput(progressChan)
|
|
|
|
} else {
|
|
|
|
po = progress.DiscardOutput()
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: replace these with manager
|
|
|
|
is := &pluginConfigStore{
|
|
|
|
pm: pm,
|
|
|
|
plugin: p,
|
|
|
|
}
|
|
|
|
ls := &pluginLayerProvider{
|
|
|
|
pm: pm,
|
|
|
|
plugin: p,
|
|
|
|
}
|
|
|
|
rs := &pluginReference{
|
|
|
|
name: ref,
|
|
|
|
pluginID: p.Config,
|
2016-05-16 18:50:55 +03:00
|
|
|
}
|
2016-06-27 18:41:53 +03:00
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
uploadManager := xfer.NewLayerUploadManager(3)
|
|
|
|
|
|
|
|
imagePushConfig := &distribution.ImagePushConfig{
|
|
|
|
Config: distribution.Config{
|
|
|
|
MetaHeaders: metaHeader,
|
|
|
|
AuthConfig: authConfig,
|
|
|
|
ProgressOutput: po,
|
|
|
|
RegistryService: pm.config.RegistryService,
|
|
|
|
ReferenceStore: rs,
|
|
|
|
ImageEventLogger: pm.config.LogPluginEvent,
|
|
|
|
ImageStore: is,
|
|
|
|
RequireSchema2: true,
|
|
|
|
},
|
|
|
|
ConfigMediaType: schema2.MediaTypePluginConfig,
|
|
|
|
LayerStore: ls,
|
|
|
|
UploadManager: uploadManager,
|
|
|
|
}
|
|
|
|
|
|
|
|
return distribution.Push(ctx, ref, imagePushConfig)
|
|
|
|
}
|
|
|
|
|
|
|
|
type pluginReference struct {
|
|
|
|
name reference.Named
|
|
|
|
pluginID digest.Digest
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *pluginReference) References(id digest.Digest) []reference.Named {
|
|
|
|
if r.pluginID != id {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return []reference.Named{r.name}
|
|
|
|
}
|
|
|
|
|
2017-01-26 03:54:18 +03:00
|
|
|
func (r *pluginReference) ReferencesByName(ref reference.Named) []refstore.Association {
|
|
|
|
return []refstore.Association{
|
2016-12-13 02:05:53 +03:00
|
|
|
{
|
|
|
|
Ref: r.name,
|
|
|
|
ID: r.pluginID,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *pluginReference) Get(ref reference.Named) (digest.Digest, error) {
|
|
|
|
if r.name.String() != ref.String() {
|
2017-01-26 03:54:18 +03:00
|
|
|
return digest.Digest(""), refstore.ErrDoesNotExist
|
2016-12-13 02:05:53 +03:00
|
|
|
}
|
|
|
|
return r.pluginID, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *pluginReference) AddTag(ref reference.Named, id digest.Digest, force bool) error {
|
|
|
|
// Read only, ignore
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (r *pluginReference) AddDigest(ref reference.Canonical, id digest.Digest, force bool) error {
|
|
|
|
// Read only, ignore
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
func (r *pluginReference) Delete(ref reference.Named) (bool, error) {
|
|
|
|
// Read only, ignore
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type pluginConfigStore struct {
|
|
|
|
pm *Manager
|
|
|
|
plugin *v2.Plugin
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *pluginConfigStore) Put([]byte) (digest.Digest, error) {
|
|
|
|
return digest.Digest(""), errors.New("cannot store config on push")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *pluginConfigStore) Get(d digest.Digest) ([]byte, error) {
|
|
|
|
if s.plugin.Config != d {
|
|
|
|
return nil, errors.New("plugin not found")
|
|
|
|
}
|
|
|
|
rwc, err := s.pm.blobStore.Get(d)
|
2016-05-16 18:50:55 +03:00
|
|
|
if err != nil {
|
2016-12-13 02:05:53 +03:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer rwc.Close()
|
|
|
|
return ioutil.ReadAll(rwc)
|
|
|
|
}
|
|
|
|
|
2017-08-08 22:43:48 +03:00
|
|
|
func (s *pluginConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, layer.OS, error) {
|
2016-12-13 02:05:53 +03:00
|
|
|
return configToRootFS(c)
|
|
|
|
}
|
|
|
|
|
|
|
|
type pluginLayerProvider struct {
|
|
|
|
pm *Manager
|
|
|
|
plugin *v2.Plugin
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *pluginLayerProvider) Get(id layer.ChainID) (distribution.PushLayer, error) {
|
|
|
|
rootFS := rootFSFromPlugin(p.plugin.PluginObj.Config.Rootfs)
|
|
|
|
var i int
|
|
|
|
for i = 1; i <= len(rootFS.DiffIDs); i++ {
|
|
|
|
if layer.CreateChainID(rootFS.DiffIDs[:i]) == id {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if i > len(rootFS.DiffIDs) {
|
|
|
|
return nil, errors.New("layer not found")
|
|
|
|
}
|
|
|
|
return &pluginLayer{
|
|
|
|
pm: p.pm,
|
|
|
|
diffIDs: rootFS.DiffIDs[:i],
|
|
|
|
blobs: p.plugin.Blobsums[:i],
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type pluginLayer struct {
|
|
|
|
pm *Manager
|
|
|
|
diffIDs []layer.DiffID
|
|
|
|
blobs []digest.Digest
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *pluginLayer) ChainID() layer.ChainID {
|
|
|
|
return layer.CreateChainID(l.diffIDs)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *pluginLayer) DiffID() layer.DiffID {
|
|
|
|
return l.diffIDs[len(l.diffIDs)-1]
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *pluginLayer) Parent() distribution.PushLayer {
|
|
|
|
if len(l.diffIDs) == 1 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return &pluginLayer{
|
|
|
|
pm: l.pm,
|
|
|
|
diffIDs: l.diffIDs[:len(l.diffIDs)-1],
|
|
|
|
blobs: l.blobs[:len(l.diffIDs)-1],
|
2016-05-16 18:50:55 +03:00
|
|
|
}
|
2016-12-13 02:05:53 +03:00
|
|
|
}
|
2016-06-25 06:57:21 +03:00
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
func (l *pluginLayer) Open() (io.ReadCloser, error) {
|
|
|
|
return l.pm.blobStore.Get(l.blobs[len(l.diffIDs)-1])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *pluginLayer) Size() (int64, error) {
|
|
|
|
return l.pm.blobStore.Size(l.blobs[len(l.diffIDs)-1])
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *pluginLayer) MediaType() string {
|
|
|
|
return schema2.MediaTypeLayer
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *pluginLayer) Release() {
|
|
|
|
// Nothing needs to be release, no references held
|
2016-05-16 18:50:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Remove deletes plugin's root directory.
|
2016-12-13 02:05:53 +03:00
|
|
|
func (pm *Manager) Remove(name string, config *types.PluginRmConfig) error {
|
|
|
|
p, err := pm.config.Store.GetV2Plugin(name)
|
2016-12-01 22:36:56 +03:00
|
|
|
pm.mu.RLock()
|
|
|
|
c := pm.cMap[p]
|
|
|
|
pm.mu.RUnlock()
|
|
|
|
|
2016-05-16 18:50:55 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-09-07 16:59:15 +03:00
|
|
|
|
|
|
|
if !config.ForceRemove {
|
2016-12-01 22:36:56 +03:00
|
|
|
if p.GetRefCount() > 0 {
|
2017-07-19 17:20:13 +03:00
|
|
|
return inUseError(p.Name())
|
2016-09-07 16:59:15 +03:00
|
|
|
}
|
|
|
|
if p.IsEnabled() {
|
2017-07-19 17:20:13 +03:00
|
|
|
return enabledError(p.Name())
|
2016-08-26 20:02:38 +03:00
|
|
|
}
|
2016-09-07 16:59:15 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if p.IsEnabled() {
|
2016-12-01 22:36:56 +03:00
|
|
|
if err := pm.disable(p, c); err != nil {
|
2016-08-26 20:02:38 +03:00
|
|
|
logrus.Errorf("failed to disable plugin '%s': %s", p.Name(), err)
|
|
|
|
}
|
2016-07-18 18:02:12 +03:00
|
|
|
}
|
2016-09-07 16:59:15 +03:00
|
|
|
|
2016-12-13 05:18:17 +03:00
|
|
|
defer func() {
|
2016-12-13 02:05:53 +03:00
|
|
|
go pm.GC()
|
2016-12-13 05:18:17 +03:00
|
|
|
}()
|
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
id := p.GetID()
|
|
|
|
pluginDir := filepath.Join(pm.config.Root, id)
|
2017-06-26 21:54:14 +03:00
|
|
|
|
|
|
|
if err := mount.RecursiveUnmount(pluginDir); err != nil {
|
|
|
|
return errors.Wrap(err, "error unmounting plugin data")
|
2017-02-03 07:08:35 +03:00
|
|
|
}
|
2017-06-26 21:54:14 +03:00
|
|
|
|
2017-07-04 14:37:26 +03:00
|
|
|
removeDir := pluginDir + "-removing"
|
|
|
|
if err := os.Rename(pluginDir, removeDir); err != nil {
|
2017-06-26 21:54:14 +03:00
|
|
|
return errors.Wrap(err, "error performing atomic remove of plugin dir")
|
|
|
|
}
|
|
|
|
|
2017-07-04 14:37:26 +03:00
|
|
|
if err := system.EnsureRemoveAll(removeDir); err != nil {
|
2017-06-26 21:54:14 +03:00
|
|
|
return errors.Wrap(err, "error removing plugin dir")
|
2016-11-22 22:21:34 +03:00
|
|
|
}
|
2017-06-26 21:54:14 +03:00
|
|
|
pm.config.Store.Remove(p)
|
2016-12-13 02:05:53 +03:00
|
|
|
pm.config.LogPluginEvent(id, name, "remove")
|
2017-06-07 20:07:01 +03:00
|
|
|
pm.publisher.Publish(EventRemove{Plugin: p.PluginObj})
|
2016-07-18 18:02:12 +03:00
|
|
|
return nil
|
2016-05-16 18:50:55 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Set sets plugin args
|
|
|
|
func (pm *Manager) Set(name string, args []string) error {
|
2016-12-13 02:05:53 +03:00
|
|
|
p, err := pm.config.Store.GetV2Plugin(name)
|
2016-05-16 18:50:55 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2016-12-13 02:05:53 +03:00
|
|
|
if err := p.Set(args); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return pm.save(p)
|
2016-05-16 18:50:55 +03:00
|
|
|
}
|
2016-10-04 22:01:19 +03:00
|
|
|
|
|
|
|
// CreateFromContext creates a plugin from the given pluginDir which contains
|
2016-11-08 05:51:47 +03:00
|
|
|
// both the rootfs and the config.json and a repoName with optional tag.
|
2016-12-13 02:05:53 +03:00
|
|
|
func (pm *Manager) CreateFromContext(ctx context.Context, tarCtx io.ReadCloser, options *types.PluginCreateOptions) (err error) {
|
|
|
|
pm.muGC.RLock()
|
|
|
|
defer pm.muGC.RUnlock()
|
|
|
|
|
2017-01-26 03:54:18 +03:00
|
|
|
ref, err := reference.ParseNormalizedNamed(options.RepoName)
|
2016-11-29 23:55:41 +03:00
|
|
|
if err != nil {
|
2016-12-13 02:05:53 +03:00
|
|
|
return errors.Wrapf(err, "failed to parse reference %v", options.RepoName)
|
|
|
|
}
|
|
|
|
if _, ok := ref.(reference.Canonical); ok {
|
|
|
|
return errors.Errorf("canonical references are not permitted")
|
2016-11-29 23:55:41 +03:00
|
|
|
}
|
2017-01-26 03:54:18 +03:00
|
|
|
name := reference.FamiliarString(reference.TagNameOnly(ref))
|
2016-11-29 23:55:41 +03:00
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
if err := pm.config.Store.validateName(name); err != nil { // fast check, real check is in createPlugin()
|
|
|
|
return err
|
|
|
|
}
|
2016-10-04 22:01:19 +03:00
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
tmpRootFSDir, err := ioutil.TempDir(pm.tmpDir(), ".rootfs")
|
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, "failed to create temp directory")
|
|
|
|
}
|
2017-02-09 11:58:58 +03:00
|
|
|
defer os.RemoveAll(tmpRootFSDir)
|
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
var configJSON []byte
|
|
|
|
rootFS := splitConfigRootFSFromTar(tarCtx, &configJSON)
|
2016-11-29 23:55:41 +03:00
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
rootFSBlob, err := pm.blobStore.New()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
2016-11-29 23:55:41 +03:00
|
|
|
}
|
2016-12-13 02:05:53 +03:00
|
|
|
defer rootFSBlob.Close()
|
|
|
|
gzw := gzip.NewWriter(rootFSBlob)
|
2017-01-07 04:23:18 +03:00
|
|
|
layerDigester := digest.Canonical.Digester()
|
2016-12-13 02:05:53 +03:00
|
|
|
rootFSReader := io.TeeReader(rootFS, io.MultiWriter(gzw, layerDigester.Hash()))
|
2016-11-29 23:55:41 +03:00
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
if err := chrootarchive.Untar(rootFSReader, tmpRootFSDir, nil); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := rootFS.Close(); err != nil {
|
2016-10-04 22:01:19 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
if configJSON == nil {
|
|
|
|
return errors.New("config not found")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := gzw.Close(); err != nil {
|
|
|
|
return errors.Wrap(err, "error closing gzip writer")
|
|
|
|
}
|
|
|
|
|
|
|
|
var config types.PluginConfig
|
|
|
|
if err := json.Unmarshal(configJSON, &config); err != nil {
|
|
|
|
return errors.Wrap(err, "failed to parse config")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := pm.validateConfig(config); err != nil {
|
2016-11-22 20:42:58 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
pm.mu.Lock()
|
|
|
|
defer pm.mu.Unlock()
|
2016-11-22 20:42:58 +03:00
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
rootFSBlobsum, err := rootFSBlob.Commit()
|
|
|
|
if err != nil {
|
2016-10-04 22:01:19 +03:00
|
|
|
return err
|
|
|
|
}
|
2016-12-13 02:05:53 +03:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
go pm.GC()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
config.Rootfs = &types.PluginConfigRootfs{
|
|
|
|
Type: "layers",
|
|
|
|
DiffIds: []string{layerDigester.Digest().String()},
|
|
|
|
}
|
2016-10-04 22:01:19 +03:00
|
|
|
|
Embed DockerVersion in plugin config.
Embedding DockerVersion in plugin config when the plugin is created,
enables users to do a docker plugin inspect and know which version
the plugin was built on. This is helpful in cases where users are
running a new plugin on older docker releases and confused at
unexpected behavior.
By embedding DockerVersion in the config, we claim that there's no
guarantee that if the plugin config's DockerVersion is greater that
the version of the docker engine the plugin is executed against, the
plugin will work as expected.
For example, lets say:
- in 17.03, a plugin was released as johndoe/foo:v1
- in 17.05, the plugin uses the new ipchost config setting and author
publishes johndoe/foo:v2
In this case, johndoe/foo:v2 was built on 17.05 using ipchost, but is
running on docker-engine version 17.03. Since 17.05 > 17.03, there's
no guarantee that the plugin will work as expected. Ofcourse, if the
plugin did not use newly added config settings (ipchost in this case)
in 17.05, it would work fine in 17.03.
Signed-off-by: Anusha Ragunathan <anusha.ragunathan@docker.com>
2017-03-22 00:07:41 +03:00
|
|
|
config.DockerVersion = dockerversion.Version
|
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
configBlob, err := pm.blobStore.New()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer configBlob.Close()
|
|
|
|
if err := json.NewEncoder(configBlob).Encode(config); err != nil {
|
|
|
|
return errors.Wrap(err, "error encoding json config")
|
|
|
|
}
|
|
|
|
configBlobsum, err := configBlob.Commit()
|
|
|
|
if err != nil {
|
2016-10-04 22:01:19 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
p, err := pm.createPlugin(name, configBlobsum, []digest.Digest{rootFSBlobsum}, tmpRootFSDir, nil)
|
|
|
|
if err != nil {
|
2016-11-22 20:42:58 +03:00
|
|
|
return err
|
|
|
|
}
|
2017-01-26 03:54:18 +03:00
|
|
|
p.PluginObj.PluginReference = name
|
2016-10-04 22:01:19 +03:00
|
|
|
|
2017-06-07 20:07:01 +03:00
|
|
|
pm.publisher.Publish(EventCreate{Plugin: p.PluginObj})
|
2016-12-13 02:05:53 +03:00
|
|
|
pm.config.LogPluginEvent(p.PluginObj.ID, name, "create")
|
2016-10-04 22:01:19 +03:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2016-11-24 07:04:44 +03:00
|
|
|
|
2016-12-13 02:05:53 +03:00
|
|
|
func (pm *Manager) validateConfig(config types.PluginConfig) error {
|
|
|
|
return nil // TODO:
|
|
|
|
}
|
|
|
|
|
|
|
|
func splitConfigRootFSFromTar(in io.ReadCloser, config *[]byte) io.ReadCloser {
|
|
|
|
pr, pw := io.Pipe()
|
|
|
|
go func() {
|
|
|
|
tarReader := tar.NewReader(in)
|
|
|
|
tarWriter := tar.NewWriter(pw)
|
|
|
|
defer in.Close()
|
|
|
|
|
|
|
|
hasRootFS := false
|
|
|
|
|
|
|
|
for {
|
|
|
|
hdr, err := tarReader.Next()
|
|
|
|
if err == io.EOF {
|
|
|
|
if !hasRootFS {
|
|
|
|
pw.CloseWithError(errors.Wrap(err, "no rootfs found"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Signals end of archive.
|
|
|
|
tarWriter.Close()
|
|
|
|
pw.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
pw.CloseWithError(errors.Wrap(err, "failed to read from tar"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
content := io.Reader(tarReader)
|
|
|
|
name := path.Clean(hdr.Name)
|
|
|
|
if path.IsAbs(name) {
|
|
|
|
name = name[1:]
|
|
|
|
}
|
|
|
|
if name == configFileName {
|
|
|
|
dt, err := ioutil.ReadAll(content)
|
|
|
|
if err != nil {
|
|
|
|
pw.CloseWithError(errors.Wrapf(err, "failed to read %s", configFileName))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
*config = dt
|
|
|
|
}
|
|
|
|
if parts := strings.Split(name, "/"); len(parts) != 0 && parts[0] == rootFSFileName {
|
|
|
|
hdr.Name = path.Clean(path.Join(parts[1:]...))
|
|
|
|
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(strings.ToLower(hdr.Linkname), rootFSFileName+"/") {
|
|
|
|
hdr.Linkname = hdr.Linkname[len(rootFSFileName)+1:]
|
|
|
|
}
|
|
|
|
if err := tarWriter.WriteHeader(hdr); err != nil {
|
|
|
|
pw.CloseWithError(errors.Wrap(err, "error writing tar header"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if _, err := pools.Copy(tarWriter, content); err != nil {
|
|
|
|
pw.CloseWithError(errors.Wrap(err, "error copying tar data"))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
hasRootFS = true
|
|
|
|
} else {
|
|
|
|
io.Copy(ioutil.Discard, content)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return pr
|
2016-11-24 07:04:44 +03:00
|
|
|
}
|