Add support for kubernetes in docker cli
- Add support for kubernetes for docker stack command - Update to go 1.9 - Add kubernetes to vendors - Print orchestrator in docker version command Signed-off-by: Vincent Demeester <vincent@sbr.pm> Signed-off-by: Silvin Lubecki <silvin.lubecki@docker.com>
This commit is contained in:
Родитель
70db7cc0fc
Коммит
8417e49792
|
@ -37,6 +37,7 @@ jobs:
|
|||
docker build -f $dockerfile --tag cli-builder:$CIRCLE_BUILD_NUM .
|
||||
name=cross-$CIRCLE_BUILD_NUM-$CIRCLE_NODE_INDEX
|
||||
docker run \
|
||||
-e VERSION=$VERSION \
|
||||
-e CROSS_GROUP=$CIRCLE_NODE_INDEX \
|
||||
--name $name cli-builder:$CIRCLE_BUILD_NUM \
|
||||
make cross
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
package orchestrator
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
cliconfig "github.com/docker/cli/cli/config"
|
||||
)
|
||||
|
||||
// Orchestrator type acts as an enum describing supported orchestrators.
|
||||
type Orchestrator string
|
||||
|
||||
const (
|
||||
// Kubernetes orchestrator
|
||||
Kubernetes = Orchestrator("kubernetes")
|
||||
// Swarm orchestrator
|
||||
Swarm = Orchestrator("swarm")
|
||||
unset = Orchestrator("unset")
|
||||
|
||||
defaultOrchestrator = Swarm
|
||||
dockerOrchestrator = "DOCKER_ORCHESTRATOR"
|
||||
)
|
||||
|
||||
func normalize(flag string) Orchestrator {
|
||||
switch strings.ToLower(flag) {
|
||||
case "kubernetes", "k8s":
|
||||
return Kubernetes
|
||||
case "swarm", "swarmkit":
|
||||
return Swarm
|
||||
default:
|
||||
return unset
|
||||
}
|
||||
}
|
||||
|
||||
// GetOrchestrator checks DOCKER_ORCHESTRATOR environment variable and configuration file
|
||||
// orchestrator value and returns user defined Orchestrator.
|
||||
func GetOrchestrator(dockerCli command.Cli) Orchestrator {
|
||||
// Check environment variable
|
||||
env := os.Getenv(dockerOrchestrator)
|
||||
if o := normalize(env); o != unset {
|
||||
return o
|
||||
}
|
||||
// Check config file
|
||||
if configFile := cliconfig.LoadDefaultConfigFile(dockerCli.Err()); configFile != nil {
|
||||
if o := normalize(configFile.Orchestrator); o != unset {
|
||||
return o
|
||||
}
|
||||
}
|
||||
|
||||
// Nothing set, use default orchestrator
|
||||
return defaultOrchestrator
|
||||
}
|
|
@ -3,6 +3,9 @@ package stack
|
|||
import (
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/orchestrator"
|
||||
"github.com/docker/cli/cli/command/stack/kubernetes"
|
||||
"github.com/docker/cli/cli/command/stack/swarm"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
@ -15,19 +18,24 @@ func NewStackCommand(dockerCli command.Cli) *cobra.Command {
|
|||
RunE: command.ShowHelp(dockerCli.Err()),
|
||||
Annotations: map[string]string{"version": "1.25"},
|
||||
}
|
||||
cmd.AddCommand(
|
||||
newDeployCommand(dockerCli),
|
||||
newListCommand(dockerCli),
|
||||
newRemoveCommand(dockerCli),
|
||||
newServicesCommand(dockerCli),
|
||||
newPsCommand(dockerCli),
|
||||
)
|
||||
switch orchestrator.GetOrchestrator(dockerCli) {
|
||||
case orchestrator.Kubernetes:
|
||||
kubernetes.AddStackCommands(cmd, dockerCli)
|
||||
case orchestrator.Swarm:
|
||||
swarm.AddStackCommands(cmd, dockerCli)
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// NewTopLevelDeployCommand returns a command for `docker deploy`
|
||||
func NewTopLevelDeployCommand(dockerCli command.Cli) *cobra.Command {
|
||||
cmd := newDeployCommand(dockerCli)
|
||||
var cmd *cobra.Command
|
||||
switch orchestrator.GetOrchestrator(dockerCli) {
|
||||
case orchestrator.Kubernetes:
|
||||
cmd = kubernetes.NewTopLevelDeployCommand(dockerCli)
|
||||
case orchestrator.Swarm:
|
||||
cmd = swarm.NewTopLevelDeployCommand(dockerCli)
|
||||
}
|
||||
// Remove the aliases at the top level
|
||||
cmd.Aliases = []string{}
|
||||
cmd.Annotations = map[string]string{"experimental": "", "version": "1.25"}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package stack
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -10,21 +10,25 @@ import (
|
|||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
func addComposefileFlag(opt *string, flags *pflag.FlagSet) {
|
||||
// AddComposefileFlag adds compose-file file to the specified flagset
|
||||
func AddComposefileFlag(opt *string, flags *pflag.FlagSet) {
|
||||
flags.StringVarP(opt, "compose-file", "c", "", "Path to a Compose file")
|
||||
flags.SetAnnotation("compose-file", "version", []string{"1.25"})
|
||||
}
|
||||
|
||||
func addBundlefileFlag(opt *string, flags *pflag.FlagSet) {
|
||||
// AddBundlefileFlag adds bundle-file file to the specified flagset
|
||||
func AddBundlefileFlag(opt *string, flags *pflag.FlagSet) {
|
||||
flags.StringVar(opt, "bundle-file", "", "Path to a Distributed Application Bundle file")
|
||||
flags.SetAnnotation("bundle-file", "experimental", nil)
|
||||
}
|
||||
|
||||
func addRegistryAuthFlag(opt *bool, flags *pflag.FlagSet) {
|
||||
// AddRegistryAuthFlag adds with-registry-auth file to the specified flagset
|
||||
func AddRegistryAuthFlag(opt *bool, flags *pflag.FlagSet) {
|
||||
flags.BoolVar(opt, "with-registry-auth", false, "Send registry authentication details to Swarm agents")
|
||||
}
|
||||
|
||||
func loadBundlefile(stderr io.Writer, namespace string, path string) (*bundlefile.Bundlefile, error) {
|
||||
// LoadBundlefile loads a bundle-file from the specified path
|
||||
func LoadBundlefile(stderr io.Writer, namespace string, path string) (*bundlefile.Bundlefile, error) {
|
||||
defaultPath := fmt.Sprintf("%s.dab", namespace)
|
||||
|
||||
if path == "" {
|
|
@ -1,4 +1,4 @@
|
|||
package stack
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
@ -32,7 +32,7 @@ func TestLoadBundlefileErrors(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
_, err := loadBundlefile(&bytes.Buffer{}, tc.namespace, tc.path)
|
||||
_, err := LoadBundlefile(&bytes.Buffer{}, tc.namespace, tc.path)
|
||||
assert.Error(t, err, tc.expectedError)
|
||||
}
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ func TestLoadBundlefile(t *testing.T) {
|
|||
|
||||
namespace := ""
|
||||
path := filepath.Join("testdata", "bundlefile_with_two_services.dab")
|
||||
bundleFile, err := loadBundlefile(buf, namespace, path)
|
||||
bundleFile, err := LoadBundlefile(buf, namespace, path)
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(bundleFile.Services), 2)
|
|
@ -0,0 +1,88 @@
|
|||
package clientset
|
||||
|
||||
import (
|
||||
composev1beta1 "github.com/docker/cli/cli/command/stack/kubernetes/api/client/clientset_generated/clientset/typed/compose/v1beta1"
|
||||
glog "github.com/golang/glog"
|
||||
discovery "k8s.io/client-go/discovery"
|
||||
rest "k8s.io/client-go/rest"
|
||||
flowcontrol "k8s.io/client-go/util/flowcontrol"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
Discovery() discovery.DiscoveryInterface
|
||||
ComposeV1beta1() composev1beta1.ComposeV1beta1Interface
|
||||
// Deprecated: please explicitly pick a version if possible.
|
||||
Compose() composev1beta1.ComposeV1beta1Interface
|
||||
}
|
||||
|
||||
// Clientset contains the clients for groups. Each group has exactly one
|
||||
// version included in a Clientset.
|
||||
type Clientset struct {
|
||||
*discovery.DiscoveryClient
|
||||
*composev1beta1.ComposeV1beta1Client
|
||||
}
|
||||
|
||||
// ComposeV1beta1 retrieves the ComposeV1beta1Client
|
||||
func (c *Clientset) ComposeV1beta1() composev1beta1.ComposeV1beta1Interface {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return c.ComposeV1beta1Client
|
||||
}
|
||||
|
||||
// Deprecated: Compose retrieves the default version of ComposeClient.
|
||||
// Please explicitly pick a version.
|
||||
func (c *Clientset) Compose() composev1beta1.ComposeV1beta1Interface {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return c.ComposeV1beta1Client
|
||||
}
|
||||
|
||||
// Discovery retrieves the DiscoveryClient
|
||||
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return c.DiscoveryClient
|
||||
}
|
||||
|
||||
// NewForConfig creates a new Clientset for the given config.
|
||||
func NewForConfig(c *rest.Config) (*Clientset, error) {
|
||||
configShallowCopy := *c
|
||||
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
|
||||
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
|
||||
}
|
||||
var cs Clientset
|
||||
var err error
|
||||
cs.ComposeV1beta1Client, err = composev1beta1.NewForConfig(&configShallowCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to create the DiscoveryClient: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
return &cs, nil
|
||||
}
|
||||
|
||||
// NewForConfigOrDie creates a new Clientset for the given config and
|
||||
// panics if there is an error in the config.
|
||||
func NewForConfigOrDie(c *rest.Config) *Clientset {
|
||||
var cs Clientset
|
||||
cs.ComposeV1beta1Client = composev1beta1.NewForConfigOrDie(c)
|
||||
|
||||
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
|
||||
return &cs
|
||||
}
|
||||
|
||||
// New creates a new Clientset for the given RESTClient.
|
||||
func New(c rest.Interface) *Clientset {
|
||||
var cs Clientset
|
||||
cs.ComposeV1beta1Client = composev1beta1.New(c)
|
||||
|
||||
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
|
||||
return &cs
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
// This package is generated by client-gen with custom arguments.
|
||||
|
||||
// This package has the automatically generated clientset.
|
||||
package clientset
|
|
@ -0,0 +1,4 @@
|
|||
// This package is generated by client-gen with custom arguments.
|
||||
|
||||
// This package contains the scheme of the automatically generated clientset.
|
||||
package scheme
|
|
@ -0,0 +1,37 @@
|
|||
package scheme
|
||||
|
||||
import (
|
||||
composev1beta1 "github.com/docker/cli/cli/command/stack/kubernetes/api/compose/v1beta1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
)
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
||||
var Codecs = serializer.NewCodecFactory(Scheme)
|
||||
var ParameterCodec = runtime.NewParameterCodec(Scheme)
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
|
||||
AddToScheme(Scheme)
|
||||
}
|
||||
|
||||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
// of clientsets, like in:
|
||||
//
|
||||
// import (
|
||||
// "k8s.io/client-go/kubernetes"
|
||||
// clientsetscheme "k8s.io/client-go/kuberentes/scheme"
|
||||
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
func AddToScheme(scheme *runtime.Scheme) {
|
||||
composev1beta1.AddToScheme(scheme)
|
||||
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/cli/command/stack/kubernetes/api/client/clientset_generated/clientset/scheme"
|
||||
v1beta1 "github.com/docker/cli/cli/command/stack/kubernetes/api/compose/v1beta1"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type ComposeV1beta1Interface interface {
|
||||
RESTClient() rest.Interface
|
||||
StacksGetter
|
||||
}
|
||||
|
||||
// ComposeV1beta1Client is used to interact with features provided by the compose.docker.com group.
|
||||
type ComposeV1beta1Client struct {
|
||||
restClient rest.Interface
|
||||
}
|
||||
|
||||
func (c *ComposeV1beta1Client) Stacks(namespace string) StackInterface {
|
||||
return newStacks(c, namespace)
|
||||
}
|
||||
|
||||
// NewForConfig creates a new ComposeV1beta1Client for the given config.
|
||||
func NewForConfig(c *rest.Config) (*ComposeV1beta1Client, error) {
|
||||
config := *c
|
||||
if err := setConfigDefaults(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := rest.RESTClientFor(&config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &ComposeV1beta1Client{client}, nil
|
||||
}
|
||||
|
||||
// NewForConfigOrDie creates a new ComposeV1beta1Client for the given config and
|
||||
// panics if there is an error in the config.
|
||||
func NewForConfigOrDie(c *rest.Config) *ComposeV1beta1Client {
|
||||
client, err := NewForConfig(c)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
// New creates a new ComposeV1beta1Client for the given RESTClient.
|
||||
func New(c rest.Interface) *ComposeV1beta1Client {
|
||||
return &ComposeV1beta1Client{c}
|
||||
}
|
||||
|
||||
func setConfigDefaults(config *rest.Config) error {
|
||||
gv := v1beta1.SchemeGroupVersion
|
||||
config.GroupVersion = &gv
|
||||
config.APIPath = "/apis"
|
||||
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
|
||||
|
||||
if config.UserAgent == "" {
|
||||
config.UserAgent = rest.DefaultKubernetesUserAgent()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
// with API server by this client implementation.
|
||||
func (c *ComposeV1beta1Client) RESTClient() rest.Interface {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
return c.restClient
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
// This package is generated by client-gen with custom arguments.
|
||||
|
||||
// This package has the automatically generated typed clients.
|
||||
package v1beta1
|
|
@ -0,0 +1,3 @@
|
|||
package v1beta1
|
||||
|
||||
type StackExpansion interface{}
|
|
@ -0,0 +1,158 @@
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
scheme "github.com/docker/cli/cli/command/stack/kubernetes/api/client/clientset_generated/clientset/scheme"
|
||||
v1beta1 "github.com/docker/cli/cli/command/stack/kubernetes/api/compose/v1beta1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// StacksGetter has a method to return a StackInterface.
|
||||
// A group's client should implement this interface.
|
||||
type StacksGetter interface {
|
||||
Stacks(namespace string) StackInterface
|
||||
}
|
||||
|
||||
// StackInterface has methods to work with Stack resources.
|
||||
type StackInterface interface {
|
||||
Create(*v1beta1.Stack) (*v1beta1.Stack, error)
|
||||
Update(*v1beta1.Stack) (*v1beta1.Stack, error)
|
||||
UpdateStatus(*v1beta1.Stack) (*v1beta1.Stack, error)
|
||||
Delete(name string, options *v1.DeleteOptions) error
|
||||
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
|
||||
Get(name string, options v1.GetOptions) (*v1beta1.Stack, error)
|
||||
List(opts v1.ListOptions) (*v1beta1.StackList, error)
|
||||
Watch(opts v1.ListOptions) (watch.Interface, error)
|
||||
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Stack, err error)
|
||||
StackExpansion
|
||||
}
|
||||
|
||||
var _ StackInterface = &stacks{}
|
||||
|
||||
// stacks implements StackInterface
|
||||
type stacks struct {
|
||||
client rest.Interface
|
||||
ns string
|
||||
}
|
||||
|
||||
// newStacks returns a Stacks
|
||||
func newStacks(c *ComposeV1beta1Client, namespace string) *stacks {
|
||||
return &stacks{
|
||||
client: c.RESTClient(),
|
||||
ns: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
// Create takes the representation of a stack and creates it. Returns the server's representation of the stack, and an error, if there is any.
|
||||
func (c *stacks) Create(stack *v1beta1.Stack) (result *v1beta1.Stack, err error) {
|
||||
result = &v1beta1.Stack{}
|
||||
err = c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
Body(stack).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a stack and updates it. Returns the server's representation of the stack, and an error, if there is any.
|
||||
func (c *stacks) Update(stack *v1beta1.Stack) (result *v1beta1.Stack, err error) {
|
||||
result = &v1beta1.Stack{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
Name(stack.Name).
|
||||
Body(stack).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus().
|
||||
|
||||
func (c *stacks) UpdateStatus(stack *v1beta1.Stack) (result *v1beta1.Stack, err error) {
|
||||
result = &v1beta1.Stack{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
Name(stack.Name).
|
||||
SubResource("status").
|
||||
Body(stack).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the stack and deletes it. Returns an error if one occurs.
|
||||
func (c *stacks) Delete(name string, options *v1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
Name(name).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *stacks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// Get takes name of the stack, and returns the corresponding stack object, and an error if there is any.
|
||||
func (c *stacks) Get(name string, options v1.GetOptions) (result *v1beta1.Stack, err error) {
|
||||
result = &v1beta1.Stack{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of Stacks that match those selectors.
|
||||
func (c *stacks) List(opts v1.ListOptions) (result *v1beta1.StackList, err error) {
|
||||
result = &v1beta1.StackList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested stacks.
|
||||
func (c *stacks) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Watch()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched stack.
|
||||
func (c *stacks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Stack, err error) {
|
||||
result = &v1beta1.Stack{}
|
||||
err = c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("stacks").
|
||||
SubResource(subresources...).
|
||||
Name(name).
|
||||
Body(data).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
// +k8s:deepcopy-gen=package,register
|
||||
// +groupName=compose.docker.com
|
||||
|
||||
// Package compose is the internal version of the API.
|
||||
package compose
|
|
@ -0,0 +1,43 @@
|
|||
package compose
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GroupName is the group name used to register these objects
|
||||
const GroupName = "compose.docker.com"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal}
|
||||
|
||||
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
|
||||
func Kind(kind string) schema.GroupKind {
|
||||
return SchemeGroupVersion.WithKind(kind).GroupKind()
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns back a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
var (
|
||||
// SchemeBuilder collects functions that add things to a scheme. It's to allow
|
||||
// code to compile without explicitly referencing generated types. You should
|
||||
// declare one in each package that will have generated deep copy or conversion
|
||||
// functions.
|
||||
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
|
||||
|
||||
// AddToScheme applies all the stored functions to the scheme. A non-nil error
|
||||
// indicates that one function failed and the attempt was abandoned.
|
||||
AddToScheme = SchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
// adds the list of known types to api.Scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&Stack{},
|
||||
&StackList{},
|
||||
)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
package compose
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// ImpersonationConfig holds information use to impersonate calls from the compose controller
|
||||
type ImpersonationConfig struct {
|
||||
// UserName is the username to impersonate on each request.
|
||||
UserName string
|
||||
// Groups are the groups to impersonate on each request.
|
||||
Groups []string
|
||||
// Extra is a free-form field which can be used to link some authentication information
|
||||
// to authorization information. This field allows you to impersonate it.
|
||||
Extra map[string][]string
|
||||
}
|
||||
|
||||
// Stack defines a stack object to be register in the kubernetes API
|
||||
// +genclient=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type Stack struct {
|
||||
metav1.TypeMeta
|
||||
metav1.ObjectMeta
|
||||
Spec StackSpec
|
||||
Status StackStatus
|
||||
}
|
||||
|
||||
// StackStatus defines the observed state of Stack
|
||||
type StackStatus struct {
|
||||
Phase StackPhase
|
||||
Message string
|
||||
}
|
||||
|
||||
// StackSpec defines the desired state of Stack
|
||||
type StackSpec struct {
|
||||
ComposeFile string
|
||||
Owner ImpersonationConfig
|
||||
}
|
||||
|
||||
// StackPhase defines the status phase in which the stack is.
|
||||
type StackPhase string
|
||||
|
||||
// These are valid conditions of a stack.
|
||||
const (
|
||||
// Available means the stack is available.
|
||||
StackAvailable StackPhase = "Available"
|
||||
// Progressing means the deployment is progressing.
|
||||
StackProgressing StackPhase = "Progressing"
|
||||
// StackFailure is added in a stack when one of its members fails to be created
|
||||
// or deleted.
|
||||
StackFailure StackPhase = "Failure"
|
||||
)
|
||||
|
||||
// StackList defines a list of stacks
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type StackList struct {
|
||||
metav1.TypeMeta
|
||||
metav1.ListMeta
|
||||
Items []Stack
|
||||
}
|
||||
|
||||
// Owner defines the owner of a stack. It is used to impersonate the controller calls
|
||||
// to kubernetes api.
|
||||
type Owner struct {
|
||||
metav1.TypeMeta
|
||||
metav1.ObjectMeta
|
||||
Owner ImpersonationConfig
|
||||
}
|
||||
|
||||
// OwnerList defines a list of owner.
|
||||
type OwnerList struct {
|
||||
metav1.TypeMeta
|
||||
metav1.ListMeta
|
||||
Items []Owner
|
||||
}
|
||||
|
||||
// FIXME(vdemeester) are those necessary ??
|
||||
|
||||
// NewStatus is newStatus
|
||||
func (Stack) NewStatus() interface{} {
|
||||
return StackStatus{}
|
||||
}
|
||||
|
||||
// GetStatus returns the status
|
||||
func (pc *Stack) GetStatus() interface{} {
|
||||
return pc.Status
|
||||
}
|
||||
|
||||
// SetStatus sets the status
|
||||
func (pc *Stack) SetStatus(s interface{}) {
|
||||
pc.Status = s.(StackStatus)
|
||||
}
|
||||
|
||||
// GetSpec returns the spec
|
||||
func (pc *Stack) GetSpec() interface{} {
|
||||
return pc.Spec
|
||||
}
|
||||
|
||||
// SetSpec sets the spec
|
||||
func (pc *Stack) SetSpec(s interface{}) {
|
||||
pc.Spec = s.(StackSpec)
|
||||
}
|
||||
|
||||
// GetObjectMeta returns the ObjectMeta
|
||||
func (pc *Stack) GetObjectMeta() *metav1.ObjectMeta {
|
||||
return &pc.ObjectMeta
|
||||
}
|
||||
|
||||
// SetGeneration sets the Generation
|
||||
func (pc *Stack) SetGeneration(generation int64) {
|
||||
pc.ObjectMeta.Generation = generation
|
||||
}
|
||||
|
||||
// GetGeneration returns the Generation
|
||||
func (pc Stack) GetGeneration() int64 {
|
||||
return pc.ObjectMeta.Generation
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
// Package v1beta1 holds the v1beta1 versions of our stack structures.
|
||||
// API versions allow the api contract for a resource to be changed while keeping
|
||||
// backward compatibility by support multiple concurrent versions
|
||||
// of the same resource
|
||||
//
|
||||
// +k8s:openapi-gen=true
|
||||
// +k8s:deepcopy-gen=package,register
|
||||
// +k8s:conversion-gen=github.com/docker/cli/cli/command/stack/kubernetes/api/compose
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +groupName=compose.docker.com
|
||||
package v1beta1 // import "github.com/docker/cli/cli/command/stack/kubernetes/api/compose/v1beta1"
|
|
@ -0,0 +1,25 @@
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/docker/cli/cli/command/stack/kubernetes/api/compose"
|
||||
)
|
||||
|
||||
// Owner defines the owner of a stack. It is used to impersonate the controller calls
|
||||
// to kubernetes api.
|
||||
// +genclient=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +subresource-request
|
||||
type Owner struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
Owner compose.ImpersonationConfig `json:"owner,omitempty"`
|
||||
}
|
||||
|
||||
// OwnerList defines a list of owner.
|
||||
type OwnerList struct {
|
||||
metav1.TypeMeta
|
||||
metav1.ListMeta
|
||||
Items []Owner
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GroupName is the group name used to register these objects
|
||||
const GroupName = "compose.docker.com"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"}
|
||||
|
||||
var (
|
||||
// SchemeBuilder collects functions that add things to a scheme. It's to allow
|
||||
// code to compile without explicitly referencing generated types. You should
|
||||
// declare one in each package that will have generated deep copy or conversion
|
||||
// functions.
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
|
||||
// AddToScheme applies all the stored functions to the scheme. A non-nil error
|
||||
// indicates that one function failed and the attempt was abandoned.
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
localSchemeBuilder.Register(addKnownTypes)
|
||||
}
|
||||
|
||||
// Adds the list of known types to api.Scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&Stack{},
|
||||
&StackList{},
|
||||
&Owner{},
|
||||
&OwnerList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// StackList defines a list of stacks
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type StackList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
Items []Stack `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient=true
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Stack defines a stack object to be register in the kubernetes API
|
||||
// +k8s:openapi-gen=true
|
||||
// +resource:path=stacks,strategy=StackStrategy
|
||||
// +subresource:request=Owner,path=owner,rest=OwnerStackREST
|
||||
type Stack struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec StackSpec `json:"spec,omitempty"`
|
||||
Status StackStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// StackSpec defines the desired state of Stack
|
||||
type StackSpec struct {
|
||||
ComposeFile string `json:"composeFile,omitempty"`
|
||||
}
|
||||
|
||||
// StackPhase defines the status phase in which the stack is.
|
||||
type StackPhase string
|
||||
|
||||
// These are valid conditions of a stack.
|
||||
const (
|
||||
// Available means the stack is available.
|
||||
StackAvailable StackPhase = "Available"
|
||||
// Progressing means the deployment is progressing.
|
||||
StackProgressing StackPhase = "Progressing"
|
||||
// StackFailure is added in a stack when one of its members fails to be created
|
||||
// or deleted.
|
||||
StackFailure StackPhase = "Failure"
|
||||
)
|
||||
|
||||
// StackStatus defines the observed state of Stack
|
||||
type StackStatus struct {
|
||||
// Current condition of the stack.
|
||||
// +optional
|
||||
Phase StackPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=StackPhase"`
|
||||
// A human readable message indicating details about the stack.
|
||||
// +optional
|
||||
Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"`
|
||||
}
|
||||
|
||||
// Clone implements the Cloner interface for kubernetes
|
||||
func (s *Stack) Clone() (*Stack, error) {
|
||||
scheme := runtime.NewScheme()
|
||||
if err := AddToScheme(scheme); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.DeepCopy(), nil
|
||||
}
|
9
cli/command/stack/kubernetes/api/compose/v1beta1/testdata/redis-nginx.input.yaml
поставляемый
Normal file
9
cli/command/stack/kubernetes/api/compose/v1beta1/testdata/redis-nginx.input.yaml
поставляемый
Normal file
|
@ -0,0 +1,9 @@
|
|||
version: "3.2"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
deploy:
|
||||
replicas: 2
|
||||
|
||||
redis:
|
||||
image: redis:alpine
|
10
cli/command/stack/kubernetes/api/compose/v1beta1/testdata/redis-nginx.output.yaml
поставляемый
Normal file
10
cli/command/stack/kubernetes/api/compose/v1beta1/testdata/redis-nginx.output.yaml
поставляемый
Normal file
|
@ -0,0 +1,10 @@
|
|||
version: "3.2"
|
||||
services:
|
||||
nginx:
|
||||
image: nginx
|
||||
deploy:
|
||||
replicas: 2
|
||||
redis:
|
||||
image: redis:alpine
|
||||
deploy:
|
||||
replicas: 5
|
10
cli/command/stack/kubernetes/api/compose/v1beta1/testdata/redis-with-memory.input.yaml
поставляемый
Normal file
10
cli/command/stack/kubernetes/api/compose/v1beta1/testdata/redis-with-memory.input.yaml
поставляемый
Normal file
|
@ -0,0 +1,10 @@
|
|||
version: "3.2"
|
||||
services:
|
||||
redis:
|
||||
image: redis:alpine
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 64M
|
||||
reservations:
|
||||
memory: 64M
|
11
cli/command/stack/kubernetes/api/compose/v1beta1/testdata/redis-with-memory.output.yaml
поставляемый
Normal file
11
cli/command/stack/kubernetes/api/compose/v1beta1/testdata/redis-with-memory.output.yaml
поставляемый
Normal file
|
@ -0,0 +1,11 @@
|
|||
version: "3.2"
|
||||
services:
|
||||
redis:
|
||||
image: redis:alpine
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 64M
|
||||
reservations:
|
||||
memory: 64M
|
||||
replicas: 5
|
6
cli/command/stack/kubernetes/api/compose/v1beta1/testdata/redis-with-replicas.input.yaml
поставляемый
Normal file
6
cli/command/stack/kubernetes/api/compose/v1beta1/testdata/redis-with-replicas.input.yaml
поставляемый
Normal file
|
@ -0,0 +1,6 @@
|
|||
version: "3.2"
|
||||
services:
|
||||
redis:
|
||||
image: redis:alpine
|
||||
deploy:
|
||||
replicas: 2
|
6
cli/command/stack/kubernetes/api/compose/v1beta1/testdata/redis-with-replicas.output.yaml
поставляемый
Normal file
6
cli/command/stack/kubernetes/api/compose/v1beta1/testdata/redis-with-replicas.output.yaml
поставляемый
Normal file
|
@ -0,0 +1,6 @@
|
|||
version: "3.2"
|
||||
services:
|
||||
redis:
|
||||
image: redis:alpine
|
||||
deploy:
|
||||
replicas: 5
|
4
cli/command/stack/kubernetes/api/compose/v1beta1/testdata/single-redis.input.yaml
поставляемый
Normal file
4
cli/command/stack/kubernetes/api/compose/v1beta1/testdata/single-redis.input.yaml
поставляемый
Normal file
|
@ -0,0 +1,4 @@
|
|||
version: "3.2"
|
||||
services:
|
||||
redis:
|
||||
image: redis:alpine
|
6
cli/command/stack/kubernetes/api/compose/v1beta1/testdata/single-redis.output.yaml
поставляемый
Normal file
6
cli/command/stack/kubernetes/api/compose/v1beta1/testdata/single-redis.output.yaml
поставляемый
Normal file
|
@ -0,0 +1,6 @@
|
|||
version: "3.2"
|
||||
services:
|
||||
redis:
|
||||
image: redis:alpine
|
||||
deploy:
|
||||
replicas: 5
|
|
@ -0,0 +1,169 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
// This file was autogenerated by conversion-gen. Do not edit it manually!
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
compose "github.com/docker/cli/cli/command/stack/kubernetes/api/compose"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func init() {
|
||||
localSchemeBuilder.Register(RegisterConversions)
|
||||
}
|
||||
|
||||
// RegisterConversions adds conversion functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
func RegisterConversions(scheme *runtime.Scheme) error {
|
||||
return scheme.AddGeneratedConversionFuncs(
|
||||
Convert_v1beta1_Owner_To_compose_Owner,
|
||||
Convert_compose_Owner_To_v1beta1_Owner,
|
||||
Convert_v1beta1_Stack_To_compose_Stack,
|
||||
Convert_compose_Stack_To_v1beta1_Stack,
|
||||
Convert_v1beta1_StackList_To_compose_StackList,
|
||||
Convert_compose_StackList_To_v1beta1_StackList,
|
||||
Convert_v1beta1_StackSpec_To_compose_StackSpec,
|
||||
Convert_compose_StackSpec_To_v1beta1_StackSpec,
|
||||
Convert_v1beta1_StackStatus_To_compose_StackStatus,
|
||||
Convert_compose_StackStatus_To_v1beta1_StackStatus,
|
||||
)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_Owner_To_compose_Owner(in *Owner, out *compose.Owner, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Owner = in.Owner
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1beta1_Owner_To_compose_Owner is an autogenerated conversion function.
|
||||
func Convert_v1beta1_Owner_To_compose_Owner(in *Owner, out *compose.Owner, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_Owner_To_compose_Owner(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_compose_Owner_To_v1beta1_Owner(in *compose.Owner, out *Owner, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
out.Owner = in.Owner
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_compose_Owner_To_v1beta1_Owner is an autogenerated conversion function.
|
||||
func Convert_compose_Owner_To_v1beta1_Owner(in *compose.Owner, out *Owner, s conversion.Scope) error {
|
||||
return autoConvert_compose_Owner_To_v1beta1_Owner(in, out, s)
|
||||
}
|
||||
|
||||
|
||||
func autoConvert_v1beta1_Stack_To_compose_Stack(in *Stack, out *compose.Stack, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_v1beta1_StackSpec_To_compose_StackSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_v1beta1_StackStatus_To_compose_StackStatus(&in.Status, &out.Status, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1beta1_Stack_To_compose_Stack is an autogenerated conversion function.
|
||||
func Convert_v1beta1_Stack_To_compose_Stack(in *Stack, out *compose.Stack, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_Stack_To_compose_Stack(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_compose_Stack_To_v1beta1_Stack(in *compose.Stack, out *Stack, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_compose_StackSpec_To_v1beta1_StackSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_compose_StackStatus_To_v1beta1_StackStatus(&in.Status, &out.Status, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_compose_Stack_To_v1beta1_Stack is an autogenerated conversion function.
|
||||
func Convert_compose_Stack_To_v1beta1_Stack(in *compose.Stack, out *Stack, s conversion.Scope) error {
|
||||
return autoConvert_compose_Stack_To_v1beta1_Stack(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_StackList_To_compose_StackList(in *StackList, out *compose.StackList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]compose.Stack, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_v1beta1_Stack_To_compose_Stack(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1beta1_StackList_To_compose_StackList is an autogenerated conversion function.
|
||||
func Convert_v1beta1_StackList_To_compose_StackList(in *StackList, out *compose.StackList, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_StackList_To_compose_StackList(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_compose_StackList_To_v1beta1_StackList(in *compose.StackList, out *StackList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Stack, len(*in))
|
||||
for i := range *in {
|
||||
if err := Convert_compose_Stack_To_v1beta1_Stack(&(*in)[i], &(*out)[i], s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
out.Items = make([]Stack, 0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_compose_StackList_To_v1beta1_StackList is an autogenerated conversion function.
|
||||
func Convert_compose_StackList_To_v1beta1_StackList(in *compose.StackList, out *StackList, s conversion.Scope) error {
|
||||
return autoConvert_compose_StackList_To_v1beta1_StackList(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_StackSpec_To_compose_StackSpec(in *StackSpec, out *compose.StackSpec, s conversion.Scope) error {
|
||||
out.ComposeFile = in.ComposeFile
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_compose_StackSpec_To_v1beta1_StackSpec(in *compose.StackSpec, out *StackSpec, s conversion.Scope) error {
|
||||
return autoConvert_compose_StackSpec_To_v1beta1_StackSpec(in, out, s)
|
||||
}
|
||||
// Convert_v1beta1_StackSpec_To_compose_StackSpec is an autogenerated conversion function.
|
||||
func Convert_v1beta1_StackSpec_To_compose_StackSpec(in *StackSpec, out *compose.StackSpec, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_StackSpec_To_compose_StackSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_compose_StackSpec_To_v1beta1_StackSpec(in *compose.StackSpec, out *StackSpec, s conversion.Scope) error {
|
||||
out.ComposeFile = in.ComposeFile
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_StackStatus_To_compose_StackStatus(in *StackStatus, out *compose.StackStatus, s conversion.Scope) error {
|
||||
out.Phase = compose.StackPhase(in.Phase)
|
||||
out.Message = in.Message
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1beta1_StackStatus_To_compose_StackStatus is an autogenerated conversion function.
|
||||
func Convert_v1beta1_StackStatus_To_compose_StackStatus(in *StackStatus, out *compose.StackStatus, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_StackStatus_To_compose_StackStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_compose_StackStatus_To_v1beta1_StackStatus(in *compose.StackStatus, out *StackStatus, s conversion.Scope) error {
|
||||
out.Phase = StackPhase(in.Phase)
|
||||
out.Message = in.Message
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_compose_StackStatus_To_v1beta1_StackStatus is an autogenerated conversion function.
|
||||
func Convert_compose_StackStatus_To_v1beta1_StackStatus(in *compose.StackStatus, out *StackStatus, s conversion.Scope) error {
|
||||
return autoConvert_compose_StackStatus_To_v1beta1_StackStatus(in, out, s)
|
||||
}
|
|
@ -0,0 +1,204 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Deprecated: register deep-copy functions.
|
||||
func init() {
|
||||
SchemeBuilder.Register(RegisterDeepCopies)
|
||||
}
|
||||
|
||||
// Deprecated: RegisterDeepCopies adds deep-copy functions to the given scheme. Public
|
||||
// to allow building arbitrary schemes.
|
||||
func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
||||
return scheme.AddGeneratedDeepCopyFuncs(
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*Owner).DeepCopyInto(out.(*Owner))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&Owner{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*OwnerList).DeepCopyInto(out.(*OwnerList))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&OwnerList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*Stack).DeepCopyInto(out.(*Stack))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&Stack{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*StackList).DeepCopyInto(out.(*StackList))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&StackList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*StackSpec).DeepCopyInto(out.(*StackSpec))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&StackSpec{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*StackStatus).DeepCopyInto(out.(*StackStatus))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&StackStatus{})},
|
||||
)
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Owner) DeepCopyInto(out *Owner) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Owner.DeepCopyInto(&out.Owner)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new Owner.
|
||||
func (x *Owner) DeepCopy() *Owner {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Owner)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (x *Owner) DeepCopyObject() runtime.Object {
|
||||
if c := x.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OwnerList) DeepCopyInto(out *OwnerList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Owner, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new OwnerList.
|
||||
func (x *OwnerList) DeepCopy() *OwnerList {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OwnerList)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (x *OwnerList) DeepCopyObject() runtime.Object {
|
||||
if c := x.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Stack) DeepCopyInto(out *Stack) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
out.Status = in.Status
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new Stack.
|
||||
func (x *Stack) DeepCopy() *Stack {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Stack)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (x *Stack) DeepCopyObject() runtime.Object {
|
||||
if c := x.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StackList) DeepCopyInto(out *StackList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Stack, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new StackList.
|
||||
func (x *StackList) DeepCopy() *StackList {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StackList)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (x *StackList) DeepCopyObject() runtime.Object {
|
||||
if c := x.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StackSpec) DeepCopyInto(out *StackSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new StackSpec.
|
||||
func (x *StackSpec) DeepCopy() *StackSpec {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StackSpec)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StackStatus) DeepCopyInto(out *StackStatus) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new StackStatus.
|
||||
func (x *StackStatus) DeepCopy() *StackStatus {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StackStatus)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
|
@ -0,0 +1,231 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
|
||||
|
||||
package compose
|
||||
|
||||
import (
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Deprecated: register deep-copy functions.
|
||||
func init() {
|
||||
SchemeBuilder.Register(RegisterDeepCopies)
|
||||
}
|
||||
|
||||
// Deprecated: RegisterDeepCopies adds deep-copy functions to the given scheme. Public
|
||||
// to allow building arbitrary schemes.
|
||||
func RegisterDeepCopies(scheme *runtime.Scheme) error {
|
||||
return scheme.AddGeneratedDeepCopyFuncs(
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*ImpersonationConfig).DeepCopyInto(out.(*ImpersonationConfig))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&ImpersonationConfig{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*Owner).DeepCopyInto(out.(*Owner))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&Owner{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*OwnerList).DeepCopyInto(out.(*OwnerList))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&OwnerList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*Stack).DeepCopyInto(out.(*Stack))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&Stack{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*StackList).DeepCopyInto(out.(*StackList))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&StackList{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*StackSpec).DeepCopyInto(out.(*StackSpec))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&StackSpec{})},
|
||||
conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error {
|
||||
in.(*StackStatus).DeepCopyInto(out.(*StackStatus))
|
||||
return nil
|
||||
}, InType: reflect.TypeOf(&StackStatus{})},
|
||||
)
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImpersonationConfig) DeepCopyInto(out *ImpersonationConfig) {
|
||||
*out = *in
|
||||
if in.Groups != nil {
|
||||
in, out := &in.Groups, &out.Groups
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Extra != nil {
|
||||
in, out := &in.Extra, &out.Extra
|
||||
*out = make(map[string][]string, len(*in))
|
||||
for key, val := range *in {
|
||||
if val == nil {
|
||||
(*out)[key] = nil
|
||||
} else {
|
||||
(*out)[key] = make([]string, len(val))
|
||||
copy((*out)[key], val)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new ImpersonationConfig.
|
||||
func (x *ImpersonationConfig) DeepCopy() *ImpersonationConfig {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ImpersonationConfig)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Owner) DeepCopyInto(out *Owner) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Owner.DeepCopyInto(&out.Owner)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new Owner.
|
||||
func (x *Owner) DeepCopy() *Owner {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Owner)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
func (x *Owner) DeepCopyObject() runtime.Object {
|
||||
if c := x.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OwnerList) DeepCopyInto(out *OwnerList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Owner, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new OwnerList.
|
||||
func (x *OwnerList) DeepCopy() *OwnerList {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OwnerList)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Stack) DeepCopyInto(out *Stack) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
out.Status = in.Status
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new Stack.
|
||||
func (x *Stack) DeepCopy() *Stack {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Stack)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (x *Stack) DeepCopyObject() runtime.Object {
|
||||
if c := x.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StackList) DeepCopyInto(out *StackList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]Stack, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new StackList.
|
||||
func (x *StackList) DeepCopy() *StackList {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StackList)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (x *StackList) DeepCopyObject() runtime.Object {
|
||||
if c := x.DeepCopy(); c != nil {
|
||||
return c
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StackSpec) DeepCopyInto(out *StackSpec) {
|
||||
*out = *in
|
||||
in.Owner.DeepCopyInto(&out.Owner)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new StackSpec.
|
||||
func (x *StackSpec) DeepCopy() *StackSpec {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StackSpec)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *StackStatus) DeepCopyInto(out *StackStatus) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, creating a new StackStatus.
|
||||
func (x *StackStatus) DeepCopy() *StackStatus {
|
||||
if x == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(StackStatus)
|
||||
x.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
//
|
||||
// +domain=docker.com
|
||||
|
||||
package apis
|
|
@ -0,0 +1,58 @@
|
|||
package labels
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// ForServiceName is the label for the service name.
|
||||
ForServiceName = "com.docker.service.name"
|
||||
// ForStackName is the label for the stack name.
|
||||
ForStackName = "com.docker.stack.namespace"
|
||||
// ForServiceID is the label for the service id.
|
||||
ForServiceID = "com.docker.service.id"
|
||||
)
|
||||
|
||||
// ForService gives the labels to select a given service in a stack.
|
||||
func ForService(stackName, serviceName string) map[string]string {
|
||||
labels := map[string]string{}
|
||||
|
||||
if serviceName != "" {
|
||||
labels[ForServiceName] = serviceName
|
||||
}
|
||||
if stackName != "" {
|
||||
labels[ForStackName] = stackName
|
||||
}
|
||||
if serviceName != "" && stackName != "" {
|
||||
labels[ForServiceID] = stackName + "-" + serviceName
|
||||
}
|
||||
|
||||
return labels
|
||||
}
|
||||
|
||||
// Merge merges multiple lists of labels.
|
||||
func Merge(labelsList ...map[string]string) map[string]string {
|
||||
merged := map[string]string{}
|
||||
|
||||
for _, labels := range labelsList {
|
||||
for k, v := range labels {
|
||||
merged[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return merged
|
||||
}
|
||||
|
||||
// SelectorForStack gives the labelSelector to use for a given stack.
|
||||
// Specific service names can be passed to narrow down the selection.
|
||||
func SelectorForStack(stackName string, serviceNames ...string) string {
|
||||
switch len(serviceNames) {
|
||||
case 0:
|
||||
return fmt.Sprintf("%s=%s", ForStackName, stackName)
|
||||
case 1:
|
||||
return fmt.Sprintf("%s=%s,%s=%s", ForStackName, stackName, ForServiceName, serviceNames[0])
|
||||
default:
|
||||
return fmt.Sprintf("%s=%s,%s in (%s)", ForStackName, stackName, ForServiceName, strings.Join(serviceNames, ","))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
package labels
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestForService(t *testing.T) {
|
||||
labels := ForService("stack", "service")
|
||||
|
||||
assert.Len(t, labels, 3)
|
||||
assert.Equal(t, "stack", labels["com.docker.stack.namespace"])
|
||||
assert.Equal(t, "service", labels["com.docker.service.name"])
|
||||
assert.Equal(t, "stack-service", labels["com.docker.service.id"])
|
||||
}
|
||||
|
||||
func TestSelectorForStack(t *testing.T) {
|
||||
assert.Equal(t, "com.docker.stack.namespace=demostack", SelectorForStack("demostack"))
|
||||
assert.Equal(t, "com.docker.stack.namespace=stack,com.docker.service.name=service", SelectorForStack("stack", "service"))
|
||||
assert.Equal(t, "com.docker.stack.namespace=stack,com.docker.service.name in (service1,service2)", SelectorForStack("stack", "service1", "service2"))
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
apiv1beta1 "github.com/docker/cli/cli/command/stack/kubernetes/api/compose/v1beta1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// APIPresent checks that an API is installed.
|
||||
func APIPresent(config *rest.Config) error {
|
||||
log.Debugf("check API present at %s", config.Host)
|
||||
clients, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
groups, err := clients.Discovery().ServerGroups()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, group := range groups.Groups {
|
||||
if group.Name == apiv1beta1.SchemeGroupVersion.Group {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("could not find %s api. Install it on your cluster first", apiv1beta1.SchemeGroupVersion.Group)
|
||||
}
|
|
@ -0,0 +1,91 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/client-go/discovery"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2"
|
||||
typesappsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// Factory is the kubernetes client factory
|
||||
type Factory struct {
|
||||
namespace string
|
||||
config *restclient.Config
|
||||
coreClientSet *corev1.CoreV1Client
|
||||
appsClientSet *appsv1beta2.AppsV1beta2Client
|
||||
}
|
||||
|
||||
// NewFactory creates a kubernetes client factory
|
||||
func NewFactory(namespace string, config *restclient.Config) (*Factory, error) {
|
||||
coreClientSet, err := corev1.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
appsClientSet, err := appsv1beta2.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Factory{
|
||||
namespace: namespace,
|
||||
config: config,
|
||||
coreClientSet: coreClientSet,
|
||||
appsClientSet: appsClientSet,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// RESTMapper returns a PriorityRESTMapper based on the discovered
|
||||
// groups and resources passed in.
|
||||
func (s *Factory) RESTMapper() (meta.RESTMapper, error) {
|
||||
clientSet, err := kubernetes.NewForConfig(s.config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
groupResources, err := discovery.GetAPIGroupResources(clientSet.DiscoveryClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mapper := discovery.NewRESTMapper(groupResources, meta.InterfacesForUnstructured)
|
||||
|
||||
return mapper, nil
|
||||
}
|
||||
|
||||
// ConfigMaps returns a client for kubernetes's config maps
|
||||
func (s *Factory) ConfigMaps() corev1.ConfigMapInterface {
|
||||
return s.coreClientSet.ConfigMaps(s.namespace)
|
||||
}
|
||||
|
||||
// Secrets returns a client for kubernetes's secrets
|
||||
func (s *Factory) Secrets() corev1.SecretInterface {
|
||||
return s.coreClientSet.Secrets(s.namespace)
|
||||
}
|
||||
|
||||
// Services returns a client for kubernetes's secrets
|
||||
func (s *Factory) Services() corev1.ServiceInterface {
|
||||
return s.coreClientSet.Services(s.namespace)
|
||||
}
|
||||
|
||||
// Pods returns a client for kubernetes's pods
|
||||
func (s *Factory) Pods() corev1.PodInterface {
|
||||
return s.coreClientSet.Pods(s.namespace)
|
||||
}
|
||||
|
||||
// Nodes returns a client for kubernetes's nodes
|
||||
func (s *Factory) Nodes() corev1.NodeInterface {
|
||||
return s.coreClientSet.Nodes()
|
||||
}
|
||||
|
||||
// ReplicationControllers returns a client for kubernetes replication controllers
|
||||
func (s *Factory) ReplicationControllers() corev1.ReplicationControllerInterface {
|
||||
return s.coreClientSet.ReplicationControllers(s.namespace)
|
||||
}
|
||||
|
||||
// ReplicaSets return a client for kubernetes replace sets
|
||||
func (s *Factory) ReplicaSets() typesappsv1beta2.ReplicaSetInterface {
|
||||
return s.appsClientSet.ReplicaSets(s.namespace)
|
||||
}
|
|
@ -0,0 +1,107 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
composev1beta1 "github.com/docker/cli/cli/command/stack/kubernetes/api/client/clientset_generated/clientset/typed/compose/v1beta1"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// AddStackCommands adds `stack` subcommands
|
||||
func AddStackCommands(root *cobra.Command, dockerCli command.Cli) {
|
||||
var kubeCli kubeCli
|
||||
configureCommand(root, &kubeCli)
|
||||
root.AddCommand(
|
||||
newDeployCommand(dockerCli, &kubeCli),
|
||||
newListCommand(dockerCli, &kubeCli),
|
||||
newRemoveCommand(dockerCli, &kubeCli),
|
||||
newServicesCommand(dockerCli, &kubeCli),
|
||||
newPsCommand(dockerCli, &kubeCli),
|
||||
)
|
||||
}
|
||||
|
||||
// NewTopLevelDeployCommand returns a command for `docker deploy`
|
||||
func NewTopLevelDeployCommand(dockerCli command.Cli) *cobra.Command {
|
||||
var kubeCli kubeCli
|
||||
cmd := newDeployCommand(dockerCli, &kubeCli)
|
||||
configureCommand(cmd, &kubeCli)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func configureCommand(root *cobra.Command, kubeCli *kubeCli) {
|
||||
var (
|
||||
kubeOpts kubeOptions
|
||||
)
|
||||
kubeOpts.installFlags(root.PersistentFlags())
|
||||
preRunE := root.PersistentPreRunE
|
||||
root.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
|
||||
if preRunE != nil {
|
||||
if err := preRunE(cmd, args); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
kubeCli.kubeNamespace = kubeOpts.namespace
|
||||
if kubeCli.kubeNamespace == "" {
|
||||
kubeCli.kubeNamespace = "default"
|
||||
}
|
||||
// Read kube config flag and environment variable
|
||||
if kubeOpts.kubeconfig == "" {
|
||||
if config := os.Getenv("KUBECONFIG"); config != "" {
|
||||
kubeOpts.kubeconfig = config
|
||||
} else {
|
||||
kubeOpts.kubeconfig = filepath.Join(homedir.Get(), ".kube/config")
|
||||
}
|
||||
}
|
||||
config, err := clientcmd.BuildConfigFromFlags("", kubeOpts.kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
kubeCli.kubeConfig = config
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// KubeOptions are options specific to kubernetes
|
||||
type kubeOptions struct {
|
||||
namespace string
|
||||
kubeconfig string
|
||||
}
|
||||
|
||||
// InstallFlags adds flags for the common options on the FlagSet
|
||||
func (opts *kubeOptions) installFlags(flags *pflag.FlagSet) {
|
||||
flags.StringVar(&opts.namespace, "namespace", "default", "Kubernetes namespace to use")
|
||||
flags.StringVar(&opts.kubeconfig, "kubeconfig", "", "Kubernetes config file")
|
||||
}
|
||||
|
||||
type kubeCli struct {
|
||||
kubeConfig *restclient.Config
|
||||
kubeNamespace string
|
||||
}
|
||||
|
||||
func (c *kubeCli) ComposeClient() (*Factory, error) {
|
||||
return NewFactory(c.kubeNamespace, c.kubeConfig)
|
||||
}
|
||||
|
||||
func (c *kubeCli) KubeConfig() *restclient.Config {
|
||||
return c.kubeConfig
|
||||
}
|
||||
|
||||
func (c *kubeCli) Stacks() (composev1beta1.StackInterface, error) {
|
||||
err := APIPresent(c.kubeConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clientSet, err := composev1beta1.NewForConfig(c.kubeConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clientSet.Stacks(c.kubeNamespace), nil
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
apiv1beta1 "github.com/docker/cli/cli/command/stack/kubernetes/api/compose/v1beta1"
|
||||
"github.com/docker/cli/cli/command/stack/kubernetes/api/labels"
|
||||
"github.com/docker/cli/cli/compose/loader"
|
||||
"github.com/docker/cli/cli/compose/types"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
// IsColliding verify that services defined in the stack collides with already deployed services
|
||||
func IsColliding(services corev1.ServiceInterface, stack *apiv1beta1.Stack) error {
|
||||
stackObjects, err := getServices(stack.Spec.ComposeFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, srv := range stackObjects {
|
||||
if err := verify(services, stack.Name, srv); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func verify(services corev1.ServiceInterface, stackName string, service string) error {
|
||||
svc, err := services.Get(service, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
if key, ok := svc.ObjectMeta.Labels[labels.ForStackName]; ok {
|
||||
if key != stackName {
|
||||
return fmt.Errorf("service %s already present in stack named %s", service, key)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("service %s already present in the cluster", service)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getServices(composeFile string) ([]string, error) {
|
||||
parsed, err := loader.ParseYAML([]byte(composeFile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config, err := loader.Load(types.ConfigDetails{
|
||||
WorkingDir: ".",
|
||||
ConfigFiles: []types.ConfigFile{
|
||||
{
|
||||
Config: parsed,
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
services := make([]string, len(config.Services))
|
||||
for i := range config.Services {
|
||||
services[i] = config.Services[i].Name
|
||||
}
|
||||
sort.Strings(services)
|
||||
return services, nil
|
||||
}
|
|
@ -0,0 +1,171 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/docker/cli/cli/command/stack/kubernetes/api/labels"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
appsv1beta2 "k8s.io/api/apps/v1beta2"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
// Pod conversion
|
||||
func podToTask(pod apiv1.Pod) swarm.Task {
|
||||
var startTime time.Time
|
||||
if pod.Status.StartTime != nil {
|
||||
startTime = (*pod.Status.StartTime).Time
|
||||
}
|
||||
task := swarm.Task{
|
||||
ID: string(pod.UID),
|
||||
NodeID: pod.Spec.NodeName,
|
||||
Spec: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{
|
||||
Image: getContainerImage(pod.Spec.Containers),
|
||||
},
|
||||
},
|
||||
DesiredState: podPhaseToState(pod.Status.Phase),
|
||||
Status: swarm.TaskStatus{
|
||||
State: podPhaseToState(pod.Status.Phase),
|
||||
Timestamp: startTime,
|
||||
PortStatus: swarm.PortStatus{
|
||||
Ports: getPorts(pod.Spec.Containers),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pod.GetLabels()
|
||||
return task
|
||||
}
|
||||
|
||||
func podPhaseToState(phase apiv1.PodPhase) swarm.TaskState {
|
||||
switch phase {
|
||||
case apiv1.PodPending:
|
||||
return swarm.TaskStatePending
|
||||
case apiv1.PodRunning:
|
||||
return swarm.TaskStateRunning
|
||||
case apiv1.PodSucceeded:
|
||||
return swarm.TaskStateComplete
|
||||
case apiv1.PodFailed:
|
||||
return swarm.TaskStateFailed
|
||||
default:
|
||||
return swarm.TaskState("unknown")
|
||||
}
|
||||
}
|
||||
|
||||
func toSwarmProtocol(protocol apiv1.Protocol) swarm.PortConfigProtocol {
|
||||
switch protocol {
|
||||
case apiv1.ProtocolTCP:
|
||||
return swarm.PortConfigProtocolTCP
|
||||
case apiv1.ProtocolUDP:
|
||||
return swarm.PortConfigProtocolUDP
|
||||
}
|
||||
return swarm.PortConfigProtocol("unknown")
|
||||
}
|
||||
|
||||
func fetchPods(namespace string, pods corev1.PodInterface) ([]apiv1.Pod, error) {
|
||||
labelSelector := labels.SelectorForStack(namespace)
|
||||
podsList, err := pods.List(metav1.ListOptions{LabelSelector: labelSelector})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return podsList.Items, nil
|
||||
}
|
||||
|
||||
func getContainerImage(containers []apiv1.Container) string {
|
||||
if len(containers) == 0 {
|
||||
return ""
|
||||
}
|
||||
return containers[0].Image
|
||||
}
|
||||
|
||||
func getPorts(containers []apiv1.Container) []swarm.PortConfig {
|
||||
if len(containers) == 0 || len(containers[0].Ports) == 0 {
|
||||
return nil
|
||||
}
|
||||
ports := make([]swarm.PortConfig, len(containers[0].Ports))
|
||||
for i, port := range containers[0].Ports {
|
||||
ports[i] = swarm.PortConfig{
|
||||
PublishedPort: uint32(port.HostPort),
|
||||
TargetPort: uint32(port.ContainerPort),
|
||||
Protocol: toSwarmProtocol(port.Protocol),
|
||||
}
|
||||
}
|
||||
return ports
|
||||
}
|
||||
|
||||
type tasksBySlot []swarm.Task
|
||||
|
||||
func (t tasksBySlot) Len() int {
|
||||
return len(t)
|
||||
}
|
||||
|
||||
func (t tasksBySlot) Swap(i, j int) {
|
||||
t[i], t[j] = t[j], t[i]
|
||||
}
|
||||
|
||||
func (t tasksBySlot) Less(i, j int) bool {
|
||||
// Sort by slot.
|
||||
if t[i].Slot != t[j].Slot {
|
||||
return t[i].Slot < t[j].Slot
|
||||
}
|
||||
|
||||
// If same slot, sort by most recent.
|
||||
return t[j].Meta.CreatedAt.Before(t[i].CreatedAt)
|
||||
}
|
||||
|
||||
// Replicas conversion
|
||||
func replicasToServices(replicas *appsv1beta2.ReplicaSetList, services *apiv1.ServiceList) ([]swarm.Service, map[string]formatter.ServiceListInfo, error) {
|
||||
result := make([]swarm.Service, len(replicas.Items))
|
||||
infos := make(map[string]formatter.ServiceListInfo, len(replicas.Items))
|
||||
for i, r := range replicas.Items {
|
||||
service, ok := findService(services, r.Labels[labels.ForServiceName])
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("could not find service '%s'", r.Labels[labels.ForServiceName])
|
||||
}
|
||||
uid := string(service.UID)
|
||||
s := swarm.Service{
|
||||
ID: uid,
|
||||
Spec: swarm.ServiceSpec{
|
||||
Annotations: swarm.Annotations{
|
||||
Name: service.Name,
|
||||
},
|
||||
TaskTemplate: swarm.TaskSpec{
|
||||
ContainerSpec: &swarm.ContainerSpec{
|
||||
Image: getContainerImage(r.Spec.Template.Spec.Containers),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if service.Spec.Type == apiv1.ServiceTypeLoadBalancer {
|
||||
configs := make([]swarm.PortConfig, len(service.Spec.Ports))
|
||||
for i, p := range service.Spec.Ports {
|
||||
configs[i] = swarm.PortConfig{
|
||||
PublishMode: swarm.PortConfigPublishModeIngress,
|
||||
PublishedPort: uint32(p.Port),
|
||||
TargetPort: uint32(p.TargetPort.IntValue()),
|
||||
Protocol: toSwarmProtocol(p.Protocol),
|
||||
}
|
||||
}
|
||||
s.Endpoint = swarm.Endpoint{Ports: configs}
|
||||
}
|
||||
result[i] = s
|
||||
infos[uid] = formatter.ServiceListInfo{
|
||||
Mode: "replicated",
|
||||
Replicas: fmt.Sprintf("%d/%d", r.Status.AvailableReplicas, r.Status.Replicas),
|
||||
}
|
||||
}
|
||||
return result, infos, nil
|
||||
}
|
||||
|
||||
func findService(services *apiv1.ServiceList, name string) (apiv1.Service, bool) {
|
||||
for _, s := range services.Items {
|
||||
if s.Name == name {
|
||||
return s, true
|
||||
}
|
||||
}
|
||||
return apiv1.Service{}, false
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/cli/command/stack/kubernetes/api/labels"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// toConfigMap converts a Compose Config to a Kube ConfigMap.
|
||||
func toConfigMap(stackName, name, key string, content []byte) *apiv1.ConfigMap {
|
||||
return &apiv1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "ConfigMap",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
labels.ForStackName: stackName,
|
||||
},
|
||||
},
|
||||
Data: map[string]string{
|
||||
key: string(content),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// toSecret converts a Compose Secret to a Kube Secret.
|
||||
func toSecret(stackName, name, key string, content []byte) *apiv1.Secret {
|
||||
return &apiv1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
labels.ForStackName: stackName,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
key: content,
|
||||
},
|
||||
}
|
||||
}
|
|
@ -0,0 +1,158 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/stack/common"
|
||||
composeTypes "github.com/docker/cli/cli/compose/types"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
type deployOptions struct {
|
||||
composefile string
|
||||
stack string
|
||||
}
|
||||
|
||||
func newDeployCommand(dockerCli command.Cli, kubeCli *kubeCli) *cobra.Command {
|
||||
var opts deployOptions
|
||||
cmd := &cobra.Command{
|
||||
Use: "deploy [OPTIONS] STACK",
|
||||
Aliases: []string{"up"},
|
||||
Short: "Deploy a new stack or update an existing stack",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.stack = args[0]
|
||||
return runDeploy(dockerCli, kubeCli, opts)
|
||||
},
|
||||
}
|
||||
flags := cmd.Flags()
|
||||
common.AddComposefileFlag(&opts.composefile, flags)
|
||||
// FIXME(vdemeester) other flags ? (bundlefile, registry-auth, prune, resolve-image) ?
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runDeploy(dockerCli command.Cli, kubeCli *kubeCli, opts deployOptions) error {
|
||||
cmdOut := dockerCli.Out()
|
||||
// Check arguments
|
||||
if opts.composefile == "" {
|
||||
return errors.Errorf("Please specify a Compose file (with --compose-file).")
|
||||
}
|
||||
// Initialize clients
|
||||
stacks, err := kubeCli.Stacks()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
composeClient, err := kubeCli.ComposeClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
configMaps := composeClient.ConfigMaps()
|
||||
secrets := composeClient.Secrets()
|
||||
services := composeClient.Services()
|
||||
pods := composeClient.Pods()
|
||||
watcher := DeployWatcher{
|
||||
Pods: pods,
|
||||
}
|
||||
|
||||
// Parse the compose file
|
||||
stack, cfg, err := LoadStack(opts.stack, opts.composefile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// FIXME(vdemeester) handle warnings server-side
|
||||
|
||||
if err = IsColliding(services, stack); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = createFileBasedConfigMaps(stack.Name, cfg.Configs, configMaps); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = createFileBasedSecrets(stack.Name, cfg.Secrets, secrets); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if in, err := stacks.Get(stack.Name, metav1.GetOptions{}); err == nil {
|
||||
in.Spec = stack.Spec
|
||||
|
||||
if _, err = stacks.Update(in); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("Stack %s was updated\n", stack.Name)
|
||||
} else {
|
||||
if _, err = stacks.Create(stack); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintf(cmdOut, "Stack %s was created\n", stack.Name)
|
||||
}
|
||||
|
||||
fmt.Fprintln(cmdOut, "Waiting for the stack to be stable and running...")
|
||||
|
||||
<-watcher.Watch(stack, serviceNames(cfg))
|
||||
|
||||
fmt.Fprintf(cmdOut, "Stack %s is stable and running\n\n", stack.Name)
|
||||
// fmt.Fprintf(cmdOut, "Read the logs with:\n $ %s stack logs %s\n", filepath.Base(os.Args[0]), stack.Name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createFileBasedConfigMaps creates a Kubernetes ConfigMap for each Compose global file-based config.
|
||||
func createFileBasedConfigMaps(stackName string, globalConfigs map[string]composeTypes.ConfigObjConfig, configMaps corev1.ConfigMapInterface) error {
|
||||
for name, config := range globalConfigs {
|
||||
if config.File == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fileName := path.Base(config.File)
|
||||
content, err := ioutil.ReadFile(config.File)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configMap := toConfigMap(stackName, name, fileName, content)
|
||||
configMaps.Create(configMap)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func serviceNames(cfg *composeTypes.Config) []string {
|
||||
names := []string{}
|
||||
|
||||
for _, service := range cfg.Services {
|
||||
names = append(names, service.Name)
|
||||
}
|
||||
|
||||
return names
|
||||
}
|
||||
|
||||
// createFileBasedSecrets creates a Kubernetes Secret for each Compose global file-based secret.
|
||||
func createFileBasedSecrets(stackName string, globalSecrets map[string]composeTypes.SecretConfig, secrets corev1.SecretInterface) error {
|
||||
for name, secret := range globalSecrets {
|
||||
if secret.File == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
fileName := path.Base(secret.File)
|
||||
content, err := ioutil.ReadFile(secret.File)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
secret := toSecret(stackName, name, fileName, content)
|
||||
secrets.Create(secret)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"vbom.ml/util/sortorder"
|
||||
)
|
||||
|
||||
type listOptions struct {
|
||||
format string
|
||||
}
|
||||
|
||||
func newListCommand(dockerCli command.Cli, kubeCli *kubeCli) *cobra.Command {
|
||||
opts := listOptions{}
|
||||
cmd := &cobra.Command{
|
||||
Use: "ls",
|
||||
Aliases: []string{"list"},
|
||||
Short: "List stacks",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return runList(dockerCli, kubeCli, opts)
|
||||
},
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&opts.format, "format", "", "Pretty-print stacks using a Go template")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runList(dockerCli command.Cli, kubeCli *kubeCli, opts listOptions) error {
|
||||
stacks, err := getStacks(kubeCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
format := opts.format
|
||||
if len(format) == 0 {
|
||||
format = formatter.TableFormatKey
|
||||
}
|
||||
stackCtx := formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: formatter.NewStackFormat(format),
|
||||
}
|
||||
sort.Sort(byName(stacks))
|
||||
return formatter.StackWrite(stackCtx, stacks)
|
||||
}
|
||||
|
||||
type byName []*formatter.Stack
|
||||
|
||||
func (n byName) Len() int { return len(n) }
|
||||
func (n byName) Swap(i, j int) { n[i], n[j] = n[j], n[i] }
|
||||
func (n byName) Less(i, j int) bool { return sortorder.NaturalLess(n[i].Name, n[j].Name) }
|
||||
|
||||
func getStacks(kubeCli *kubeCli) ([]*formatter.Stack, error) {
|
||||
stackSvc, err := kubeCli.Stacks()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stacks, err := stackSvc.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var formattedStacks []*formatter.Stack
|
||||
for _, stack := range stacks.Items {
|
||||
services, err := getServices(stack.Spec.ComposeFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
formattedStacks = append(formattedStacks, &formatter.Stack{
|
||||
Name: stack.Name,
|
||||
Services: len(services),
|
||||
})
|
||||
}
|
||||
return formattedStacks, nil
|
||||
}
|
|
@ -0,0 +1,167 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
apiv1beta1 "github.com/docker/cli/cli/command/stack/kubernetes/api/compose/v1beta1"
|
||||
"github.com/docker/cli/cli/compose/loader"
|
||||
"github.com/docker/cli/cli/compose/template"
|
||||
composetypes "github.com/docker/cli/cli/compose/types"
|
||||
"github.com/pkg/errors"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// LoadStack loads a stack from a Compose file, with a given name.
|
||||
func LoadStack(name, composeFile string) (*apiv1beta1.Stack, *composetypes.Config, error) {
|
||||
if composeFile == "" {
|
||||
return nil, nil, errors.New("compose-file must be set")
|
||||
}
|
||||
|
||||
workingDir, err := os.Getwd()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
composePath := composeFile
|
||||
if !strings.HasPrefix(composePath, "/") {
|
||||
composePath = filepath.Join(workingDir, composeFile)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(composePath); os.IsNotExist(err) {
|
||||
return nil, nil, errors.Errorf("no compose file found in %s", filepath.Dir(composePath))
|
||||
}
|
||||
|
||||
binary, err := ioutil.ReadFile(composePath)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "cannot load compose file")
|
||||
}
|
||||
|
||||
return load(name, binary, env())
|
||||
}
|
||||
|
||||
func load(name string, binary []byte, env map[string]string) (*apiv1beta1.Stack, *composetypes.Config, error) {
|
||||
processed, err := template.Substitute(string(binary), func(key string) (string, bool) { return env[key], true })
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "cannot load compose file")
|
||||
}
|
||||
|
||||
parsed, err := loader.ParseYAML([]byte(processed))
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "cannot load compose file")
|
||||
}
|
||||
|
||||
cfg, err := loader.Load(composetypes.ConfigDetails{
|
||||
WorkingDir: ".",
|
||||
ConfigFiles: []composetypes.ConfigFile{
|
||||
{
|
||||
Config: parsed,
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "cannot load compose file")
|
||||
}
|
||||
|
||||
result, err := processEnvFiles(processed, parsed, cfg)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "cannot load compose file")
|
||||
}
|
||||
|
||||
return &apiv1beta1.Stack{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: apiv1beta1.StackSpec{
|
||||
ComposeFile: result,
|
||||
},
|
||||
}, cfg, nil
|
||||
}
|
||||
|
||||
type iMap = map[string]interface{}
|
||||
|
||||
func processEnvFiles(input string, parsed map[string]interface{}, config *composetypes.Config) (string, error) {
|
||||
changed := false
|
||||
|
||||
for _, svc := range config.Services {
|
||||
if len(svc.EnvFile) == 0 {
|
||||
continue
|
||||
}
|
||||
// Load() processed the env_file for us, we just need to inject back into
|
||||
// the intermediate representation
|
||||
env := iMap{}
|
||||
for k, v := range svc.Environment {
|
||||
env[k] = v
|
||||
}
|
||||
parsed["services"].(iMap)[svc.Name].(iMap)["environment"] = env
|
||||
delete(parsed["services"].(iMap)[svc.Name].(iMap), "env_file")
|
||||
changed = true
|
||||
}
|
||||
if !changed {
|
||||
return input, nil
|
||||
}
|
||||
res, err := yaml.Marshal(parsed)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(res), nil
|
||||
}
|
||||
|
||||
func env() map[string]string {
|
||||
// Apply .env file first
|
||||
config := readEnvFile(".env")
|
||||
|
||||
// Apply env variables
|
||||
for k, v := range envToMap(os.Environ()) {
|
||||
config[k] = v
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
func readEnvFile(path string) map[string]string {
|
||||
config := map[string]string{}
|
||||
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return config // Ignore
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.HasPrefix(strings.TrimSpace(line), "#") {
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.SplitN(line, "=", 2)
|
||||
if len(parts) == 2 {
|
||||
key := parts[0]
|
||||
value := parts[1]
|
||||
|
||||
config[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
func envToMap(env []string) map[string]string {
|
||||
config := map[string]string{}
|
||||
|
||||
for _, value := range env {
|
||||
parts := strings.SplitN(value, "=", 2)
|
||||
|
||||
key := parts[0]
|
||||
value := parts[1]
|
||||
|
||||
config[key] = value
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPlaceholders(t *testing.T) {
|
||||
env := map[string]string{
|
||||
"TAG": "_latest_",
|
||||
"K1": "V1",
|
||||
"K2": "V2",
|
||||
}
|
||||
|
||||
prefix := "version: '3'\nvolumes:\n data:\n external:\n name: "
|
||||
var tests = []struct {
|
||||
input string
|
||||
expectedOutput string
|
||||
}{
|
||||
{prefix + "BEFORE${TAG}AFTER", prefix + "BEFORE_latest_AFTER"},
|
||||
{prefix + "BEFORE${K1}${K2}AFTER", prefix + "BEFOREV1V2AFTER"},
|
||||
{prefix + "BEFORE$TAG AFTER", prefix + "BEFORE_latest_ AFTER"},
|
||||
{prefix + "BEFORE$$TAG AFTER", prefix + "BEFORE$TAG AFTER"},
|
||||
{prefix + "BEFORE $UNKNOWN AFTER", prefix + "BEFORE AFTER"},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
output, _, err := load("stack", []byte(test.input), env)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, test.expectedOutput, output.Spec.ComposeFile)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,138 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/docker/cli/cli/command/task"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/spf13/cobra"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
type psOptions struct {
|
||||
filter opts.FilterOpt
|
||||
noTrunc bool
|
||||
namespace string
|
||||
noResolve bool
|
||||
quiet bool
|
||||
format string
|
||||
}
|
||||
|
||||
func newPsCommand(dockerCli command.Cli, kubeCli *kubeCli) *cobra.Command {
|
||||
options := psOptions{filter: opts.NewFilterOpt()}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "ps [OPTIONS] STACK",
|
||||
Short: "List the tasks in the stack",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.namespace = args[0]
|
||||
return runPS(dockerCli, kubeCli, options)
|
||||
},
|
||||
}
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&options.noTrunc, "no-trunc", false, "Do not truncate output")
|
||||
flags.BoolVar(&options.noResolve, "no-resolve", false, "Do not map IDs to Names")
|
||||
flags.VarP(&options.filter, "filter", "f", "Filter output based on conditions provided")
|
||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display task IDs")
|
||||
flags.StringVar(&options.format, "format", "", "Pretty-print tasks using a Go template")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runPS(dockerCli command.Cli, kubeCli *kubeCli, options psOptions) error {
|
||||
namespace := options.namespace
|
||||
|
||||
// Initialize clients
|
||||
client, err := kubeCli.ComposeClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stacks, err := kubeCli.Stacks()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
podsClient := client.Pods()
|
||||
|
||||
// Fetch pods
|
||||
if _, err := stacks.Get(namespace, metav1.GetOptions{}); err != nil {
|
||||
return fmt.Errorf("nothing found in stack: %s", namespace)
|
||||
}
|
||||
|
||||
pods, err := fetchPods(namespace, podsClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(pods) == 0 {
|
||||
return fmt.Errorf("nothing found in stack: %s", namespace)
|
||||
}
|
||||
|
||||
format := options.format
|
||||
if len(format) == 0 {
|
||||
format = task.DefaultFormat(dockerCli.ConfigFile(), options.quiet)
|
||||
}
|
||||
nodeResolver := makeNodeResolver(options.noResolve, client.Nodes())
|
||||
|
||||
tasks := make([]swarm.Task, len(pods))
|
||||
for i, pod := range pods {
|
||||
tasks[i] = podToTask(pod)
|
||||
}
|
||||
return print(dockerCli, tasks, pods, nodeResolver, !options.noTrunc, options.quiet, format)
|
||||
}
|
||||
|
||||
type idResolver func(name string) (string, error)
|
||||
|
||||
func print(dockerCli command.Cli, tasks []swarm.Task, pods []apiv1.Pod, nodeResolver idResolver, trunc, quiet bool, format string) error {
|
||||
sort.Stable(tasksBySlot(tasks))
|
||||
|
||||
names := map[string]string{}
|
||||
nodes := map[string]string{}
|
||||
|
||||
tasksCtx := formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: formatter.NewTaskFormat(format, quiet),
|
||||
Trunc: trunc,
|
||||
}
|
||||
|
||||
for i, task := range tasks {
|
||||
nodeValue, err := nodeResolver(pods[i].Spec.NodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
names[task.ID] = pods[i].Name
|
||||
nodes[task.ID] = nodeValue
|
||||
}
|
||||
|
||||
return formatter.TaskWrite(tasksCtx, tasks, names, nodes)
|
||||
}
|
||||
|
||||
func makeNodeResolver(noResolve bool, nodes corev1.NodeInterface) func(string) (string, error) {
|
||||
// Here we have a name and we need to resolve its identifier. To mimic swarm behavior
|
||||
// we need to resolve the id when noresolve is set, otherwise we return the name.
|
||||
if noResolve {
|
||||
return func(name string) (string, error) {
|
||||
n, err := nodes.List(metav1.ListOptions{
|
||||
FieldSelector: fields.OneTermEqualSelector(api.ObjectNameField, name).String(),
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(n.Items) != 1 {
|
||||
return "", fmt.Errorf("could not find node '%s'", name)
|
||||
}
|
||||
return string(n.Items[0].UID), nil
|
||||
}
|
||||
}
|
||||
return func(name string) (string, error) { return name, nil }
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type removeOptions struct {
|
||||
stacks []string
|
||||
}
|
||||
|
||||
func newRemoveCommand(dockerCli command.Cli, kubeCli *kubeCli) *cobra.Command {
|
||||
var opts removeOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "rm STACK [STACK...]",
|
||||
Aliases: []string{"remove", "down"},
|
||||
Short: "Remove one or more stacks",
|
||||
Args: cli.RequiresMinArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
opts.stacks = args
|
||||
return runRemove(dockerCli, kubeCli, opts)
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runRemove(dockerCli command.Cli, kubeCli *kubeCli, opts removeOptions) error {
|
||||
stacks, err := kubeCli.Stacks()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, stack := range opts.stacks {
|
||||
fmt.Fprintf(dockerCli.Out(), "Removing stack: %s\n", stack)
|
||||
err := stacks.Delete(stack, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
fmt.Fprintf(dockerCli.Out(), "Failed to remove stack %s: %s\n", stack, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/docker/cli/cli/command/stack/kubernetes/api/labels"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type servicesOptions struct {
|
||||
quiet bool
|
||||
format string
|
||||
namespace string
|
||||
}
|
||||
|
||||
func newServicesCommand(dockerCli command.Cli, kubeCli *kubeCli) *cobra.Command {
|
||||
var options servicesOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "services [OPTIONS] STACK",
|
||||
Short: "List the services in the stack",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.namespace = args[0]
|
||||
return runServices(dockerCli, kubeCli, options)
|
||||
},
|
||||
}
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Only display IDs")
|
||||
flags.StringVar(&options.format, "format", "", "Pretty-print services using a Go template")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runServices(dockerCli command.Cli, kubeCli *kubeCli, options servicesOptions) error {
|
||||
// Initialize clients
|
||||
client, err := kubeCli.ComposeClient()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
stacks, err := kubeCli.Stacks()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
replicas := client.ReplicaSets()
|
||||
|
||||
if _, err := stacks.Get(options.namespace, metav1.GetOptions{}); err != nil {
|
||||
fmt.Fprintf(dockerCli.Err(), "Nothing found in stack: %s\n", options.namespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
replicasList, err := replicas.List(metav1.ListOptions{LabelSelector: labels.SelectorForStack(options.namespace)})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
servicesList, err := client.Services().List(metav1.ListOptions{LabelSelector: labels.SelectorForStack(options.namespace)})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Convert Replicas sets and kubernetes services to swam services and formatter informations
|
||||
services, info, err := replicasToServices(replicasList, servicesList)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if options.quiet {
|
||||
info = map[string]formatter.ServiceListInfo{}
|
||||
}
|
||||
|
||||
format := options.format
|
||||
if len(format) == 0 {
|
||||
if len(dockerCli.ConfigFile().ServicesFormat) > 0 && !options.quiet {
|
||||
format = dockerCli.ConfigFile().ServicesFormat
|
||||
} else {
|
||||
format = formatter.TableFormatKey
|
||||
}
|
||||
}
|
||||
|
||||
servicesCtx := formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: formatter.NewServiceListFormat(format, options.quiet),
|
||||
}
|
||||
return formatter.ServiceListWrite(servicesCtx, services, info)
|
||||
}
|
|
@ -0,0 +1,117 @@
|
|||
package kubernetes
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
apiv1beta1 "github.com/docker/cli/cli/command/stack/kubernetes/api/compose/v1beta1"
|
||||
"github.com/docker/cli/cli/command/stack/kubernetes/api/labels"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
// DeployWatcher watches a stack deployement
|
||||
type DeployWatcher struct {
|
||||
Pods corev1.PodInterface
|
||||
}
|
||||
|
||||
// Watch watches a stuck deployement and return a chan that will holds the state of the stack
|
||||
func (w DeployWatcher) Watch(stack *apiv1beta1.Stack, serviceNames []string) chan bool {
|
||||
stop := make(chan bool)
|
||||
|
||||
go w.waitForPods(stack.Name, serviceNames, stop)
|
||||
|
||||
return stop
|
||||
}
|
||||
|
||||
func (w DeployWatcher) waitForPods(stackName string, serviceNames []string, stop chan bool) {
|
||||
starts := map[string]int32{}
|
||||
|
||||
for {
|
||||
time.Sleep(1 * time.Second)
|
||||
|
||||
list, err := w.Pods.List(metav1.ListOptions{
|
||||
LabelSelector: labels.SelectorForStack(stackName),
|
||||
IncludeUninitialized: true,
|
||||
})
|
||||
if err != nil {
|
||||
stop <- true
|
||||
return
|
||||
}
|
||||
|
||||
for i := range list.Items {
|
||||
pod := list.Items[i]
|
||||
if pod.Status.Phase != apiv1.PodRunning {
|
||||
continue
|
||||
}
|
||||
|
||||
startCount := startCount(pod)
|
||||
serviceName := pod.Labels[labels.ForServiceName]
|
||||
if startCount != starts[serviceName] {
|
||||
if startCount == 1 {
|
||||
fmt.Printf(" - Service %s has one container running\n", serviceName)
|
||||
} else {
|
||||
fmt.Printf(" - Service %s was restarted %d %s\n", serviceName, startCount-1, timeTimes(startCount-1))
|
||||
}
|
||||
|
||||
starts[serviceName] = startCount
|
||||
}
|
||||
}
|
||||
|
||||
if allReady(list.Items, serviceNames) {
|
||||
stop <- true
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func startCount(pod apiv1.Pod) int32 {
|
||||
restart := int32(0)
|
||||
|
||||
for _, status := range pod.Status.ContainerStatuses {
|
||||
restart += status.RestartCount
|
||||
}
|
||||
|
||||
return 1 + restart
|
||||
}
|
||||
|
||||
func allReady(pods []apiv1.Pod, serviceNames []string) bool {
|
||||
serviceUp := map[string]bool{}
|
||||
|
||||
for _, pod := range pods {
|
||||
if time.Since(pod.GetCreationTimestamp().Time) < 10*time.Second {
|
||||
return false
|
||||
}
|
||||
|
||||
ready := false
|
||||
for _, cond := range pod.Status.Conditions {
|
||||
if cond.Type == apiv1.PodReady && cond.Status == apiv1.ConditionTrue {
|
||||
ready = true
|
||||
}
|
||||
}
|
||||
|
||||
if !ready {
|
||||
return false
|
||||
}
|
||||
|
||||
serviceName := pod.Labels[labels.ForServiceName]
|
||||
serviceUp[serviceName] = true
|
||||
}
|
||||
|
||||
for _, serviceName := range serviceNames {
|
||||
if !serviceUp[serviceName] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func timeTimes(n int32) string {
|
||||
if n == 1 {
|
||||
return "time"
|
||||
}
|
||||
|
||||
return "times"
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package stack
|
||||
package swarm
|
||||
|
||||
import (
|
||||
"strings"
|
|
@ -0,0 +1,22 @@
|
|||
package swarm
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
// AddStackCommands adds `stack` subcommands
|
||||
func AddStackCommands(root *cobra.Command, dockerCli command.Cli) {
|
||||
root.AddCommand(
|
||||
newDeployCommand(dockerCli),
|
||||
newListCommand(dockerCli),
|
||||
newRemoveCommand(dockerCli),
|
||||
newServicesCommand(dockerCli),
|
||||
newPsCommand(dockerCli),
|
||||
)
|
||||
}
|
||||
|
||||
// NewTopLevelDeployCommand returns a command for `docker deploy`
|
||||
func NewTopLevelDeployCommand(dockerCli command.Cli) *cobra.Command {
|
||||
return newDeployCommand(dockerCli)
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package stack
|
||||
package swarm
|
||||
|
||||
import (
|
||||
"github.com/docker/cli/cli/compose/convert"
|
|
@ -1,10 +1,11 @@
|
|||
package stack
|
||||
package swarm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/stack/common"
|
||||
"github.com/docker/cli/cli/compose/convert"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
|
@ -44,9 +45,9 @@ func newDeployCommand(dockerCli command.Cli) *cobra.Command {
|
|||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
addBundlefileFlag(&opts.bundlefile, flags)
|
||||
addComposefileFlag(&opts.composefile, flags)
|
||||
addRegistryAuthFlag(&opts.sendRegistryAuth, flags)
|
||||
common.AddBundlefileFlag(&opts.bundlefile, flags)
|
||||
common.AddComposefileFlag(&opts.composefile, flags)
|
||||
common.AddRegistryAuthFlag(&opts.sendRegistryAuth, flags)
|
||||
flags.BoolVar(&opts.prune, "prune", false, "Prune services that are no longer referenced")
|
||||
flags.SetAnnotation("prune", "version", []string{"1.27"})
|
||||
flags.StringVar(&opts.resolveImage, "resolve-image", resolveImageAlways,
|
|
@ -1,16 +1,17 @@
|
|||
package stack
|
||||
package swarm
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/stack/common"
|
||||
"github.com/docker/cli/cli/compose/convert"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/swarm"
|
||||
)
|
||||
|
||||
func deployBundle(ctx context.Context, dockerCli command.Cli, opts deployOptions) error {
|
||||
bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile)
|
||||
bundle, err := common.LoadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package stack
|
||||
package swarm
|
||||
|
||||
import (
|
||||
"fmt"
|
|
@ -1,4 +1,4 @@
|
|||
package stack
|
||||
package swarm
|
||||
|
||||
import (
|
||||
"os"
|
|
@ -1,4 +1,4 @@
|
|||
package stack
|
||||
package swarm
|
||||
|
||||
import (
|
||||
"testing"
|
|
@ -1,4 +1,4 @@
|
|||
package stack
|
||||
package swarm
|
||||
|
||||
import (
|
||||
"sort"
|
|
@ -1,4 +1,4 @@
|
|||
package stack
|
||||
package swarm
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
|
@ -1,4 +1,4 @@
|
|||
package stack
|
||||
package swarm
|
||||
|
||||
import (
|
||||
"fmt"
|
|
@ -1,4 +1,4 @@
|
|||
package stack
|
||||
package swarm
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
|
@ -1,4 +1,4 @@
|
|||
package stack
|
||||
package swarm
|
||||
|
||||
import (
|
||||
"fmt"
|
|
@ -1,4 +1,4 @@
|
|||
package stack
|
||||
package swarm
|
||||
|
||||
import (
|
||||
"errors"
|
|
@ -1,4 +1,4 @@
|
|||
package stack
|
||||
package swarm
|
||||
|
||||
import (
|
||||
"fmt"
|
|
@ -1,4 +1,4 @@
|
|||
package stack
|
||||
package swarm
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/orchestrator"
|
||||
"github.com/docker/cli/templates"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -24,6 +25,7 @@ Client:{{if ne .Platform.Name ""}} {{.Platform.Name}}{{end}}
|
|||
Built: {{.BuildTime}}
|
||||
OS/Arch: {{.Os}}/{{.Arch}}
|
||||
Experimental: {{.Experimental}}
|
||||
Orchestrator: {{.Client.Orchestrator}}
|
||||
{{- end}}
|
||||
|
||||
{{- if .ServerOK}}{{with .Server}}
|
||||
|
@ -71,6 +73,7 @@ type clientVersion struct {
|
|||
Arch string
|
||||
BuildTime string `json:",omitempty"`
|
||||
Experimental bool
|
||||
Orchestrator string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ServerOK returns true when the client could connect to the docker server
|
||||
|
@ -136,6 +139,7 @@ func runVersion(dockerCli *command.DockerCli, opts *versionOptions) error {
|
|||
Os: runtime.GOOS,
|
||||
Arch: runtime.GOARCH,
|
||||
Experimental: dockerCli.ClientInfo().HasExperimental,
|
||||
Orchestrator: string(orchestrator.GetOrchestrator(dockerCli)),
|
||||
},
|
||||
}
|
||||
vd.Client.Platform.Name = cli.PlatformName
|
||||
|
|
|
@ -45,6 +45,7 @@ type ConfigFile struct {
|
|||
PruneFilters []string `json:"pruneFilters,omitempty"`
|
||||
Proxies map[string]ProxyConfig `json:"proxies,omitempty"`
|
||||
Experimental string `json:"experimental,omitempty"`
|
||||
Orchestrator string `json:"orchestrator,omitempty"`
|
||||
}
|
||||
|
||||
// ProxyConfig contains proxy configuration settings
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
|
||||
FROM golang:1.9.2-alpine3.6
|
||||
|
||||
RUN apk add -U git make bash coreutils ca-certificates
|
||||
RUN apk add -U git make bash coreutils ca-certificates openssh
|
||||
|
||||
ARG VNDR_SHA=a6e196d8b4b0cbbdc29aebdb20c59ac6926bb384
|
||||
RUN go get -d github.com/LK4D4/vndr && \
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
package orchestrator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/internal/test/environment"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if err := environment.Setup(); err != nil {
|
||||
fmt.Println(err.Error())
|
||||
os.Exit(3)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
package orchestrator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
shlex "github.com/flynn-archive/go-shlex"
|
||||
"github.com/gotestyourself/gotestyourself/fs"
|
||||
"github.com/gotestyourself/gotestyourself/icmd"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestVersionWithDefaultOrchestrator(t *testing.T) {
|
||||
// Orchestrator by default
|
||||
result := icmd.RunCmd(shell(t, "docker version"))
|
||||
result.Assert(t, icmd.Success)
|
||||
assert.Contains(t, result.Stdout(), "Orchestrator: swarm")
|
||||
}
|
||||
|
||||
func TestVersionWithOverridenEnvOrchestrator(t *testing.T) {
|
||||
// Override orchestrator using environment variable
|
||||
result := icmd.RunCmd(shell(t, "docker version"), func(cmd *icmd.Cmd) {
|
||||
cmd.Env = append(cmd.Env, append(os.Environ(), "DOCKER_ORCHESTRATOR=kubernetes")...)
|
||||
})
|
||||
result.Assert(t, icmd.Success)
|
||||
assert.Contains(t, result.Stdout(), "Orchestrator: kubernetes")
|
||||
}
|
||||
|
||||
func TestVersionWithOverridenConfigOrchestrator(t *testing.T) {
|
||||
// Override orchestrator using configuration file
|
||||
configDir := fs.NewDir(t, "config", fs.WithFile("config.json", `{"orchestrator": "kubernetes"}`))
|
||||
defer configDir.Remove()
|
||||
result := icmd.RunCmd(shell(t, fmt.Sprintf("docker --config %s version", configDir.Path())))
|
||||
result.Assert(t, icmd.Success)
|
||||
assert.Contains(t, result.Stdout(), "Orchestrator: kubernetes")
|
||||
}
|
||||
|
||||
// TODO: move to gotestyourself
|
||||
func shell(t *testing.T, format string, args ...interface{}) icmd.Cmd {
|
||||
cmd, err := shlex.Split(fmt.Sprintf(format, args...))
|
||||
require.NoError(t, err)
|
||||
return icmd.Cmd{Command: cmd}
|
||||
}
|
|
@ -4,6 +4,9 @@
|
|||
"Sort": ["linter", "severity", "path", "line"],
|
||||
"Exclude": [
|
||||
"cli/compose/schema/bindata.go",
|
||||
"cli/command/stack/kubernetes/api/openapi",
|
||||
"cli/command/stack/kubernetes/api/client",
|
||||
".*generated.*",
|
||||
"parameter .* always receives"
|
||||
],
|
||||
"EnableGC": true,
|
||||
|
|
46
vendor.conf
46
vendor.conf
|
@ -1,7 +1,7 @@
|
|||
github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c
|
||||
github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
|
||||
github.com/containerd/continuity 35d55c5e8dd23b32037d56cf97174aff3efdfa83
|
||||
github.com/coreos/etcd v3.2.1
|
||||
# github.com/coreos/etcd v3.2.1
|
||||
github.com/cpuguy83/go-md2man a65d4d2de4d5f7c74868dfa9b202a3c8be315aaa
|
||||
github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
|
||||
github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
|
||||
|
@ -44,13 +44,49 @@ github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
|
|||
github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a
|
||||
github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45
|
||||
github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d
|
||||
golang.org/x/crypto 558b6879de74bc843225cde5686419267ff707ca
|
||||
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
|
||||
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
||||
# golang.org/x/crypto 558b6879de74bc843225cde5686419267ff707ca
|
||||
# golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
|
||||
# golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
||||
golang.org/x/sys 95c6576299259db960f6c5b9b69ea52422860fce
|
||||
golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2
|
||||
# golang.org/x/net c427ad74c6d7a814201695e9ffde0c5d400a7674
|
||||
# golang.org/x/sys 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9
|
||||
golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
|
||||
golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
|
||||
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
||||
google.golang.org/grpc v1.3.0
|
||||
gopkg.in/yaml.v2 4c78c975fe7c825c6d1466c42be594d1d6f3aba6
|
||||
vbom.ml/util 928aaa586d7718c70f4090ddf83f2b34c16fdc8d
|
||||
# vbom.ml/util 928aaa586d7718c70f4090ddf83f2b34c16fdc8d
|
||||
|
||||
github.com/coreos/etcd 46ee06a85cdf0c02ea453bbae71c86f27f3f2ee5
|
||||
github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46
|
||||
github.com/emicklei/go-restful-swagger12 dcef7f55730566d41eae5db10e7d6981829720f6
|
||||
github.com/ghodss/yaml 0ca9ea5df5451ffdf184b4428c902747c2c11cd7
|
||||
github.com/go-openapi/jsonpointer 46af16f9f7b149af66e5d1bd010e3574dc06de98
|
||||
github.com/go-openapi/jsonreference 13c6e3589ad90f49bd3e3bbe2c2cb3d7a4142272
|
||||
github.com/go-openapi/spec 6aced65f8501fe1217321abf0749d354824ba2ff
|
||||
github.com/go-openapi/swag 1d0bd113de87027671077d3c71eb3ac5d7dbba72
|
||||
github.com/golang/glog 44145f04b68cf362d9c4df2182967c2275eaefed
|
||||
github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
|
||||
github.com/googleapis/gnostic e4f56557df6250e1945ee6854f181ce4e1c2c646
|
||||
github.com/grpc-ecosystem/grpc-gateway 1a03ca3bad1e1ebadaedd3abb76bc58d4ac8143b
|
||||
github.com/howeyc/gopass 3ca23474a7c7203e0a0a070fd33508f6efdb9b3d
|
||||
github.com/imdario/mergo 6633656539c1639d9d78127b7d47c622b5d7b6dc
|
||||
github.com/juju/ratelimit 5b9ff866471762aa2ab2dced63c9fb6f53921342
|
||||
github.com/mailru/easyjson d5b7844b561a7bc640052f1b935f7b800330d7e0
|
||||
github.com/PuerkitoBio/purell 8a290539e2e8629dbc4e6bad948158f790ec31f4
|
||||
github.com/PuerkitoBio/urlesc 5bd2802263f21d8788851d5305584c82a5c75d7e
|
||||
golang.org/x/net 02ac38e2528ff4adea90f184d71a3faa04b4b1b0
|
||||
gopkg.in/inf.v0 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
||||
|
||||
k8s.io/api kubernetes-1.8.2
|
||||
k8s.io/apimachinery kubernetes-1.8.2
|
||||
k8s.io/client-go kubernetes-1.8.2
|
||||
k8s.io/kubernetes v1.8.2
|
||||
k8s.io/kube-openapi 61b46af70dfed79c6d24530cd23b41440a7f22a5
|
||||
|
||||
github.com/gregjones/httpcache c1f8028e62adb3d518b823a2f8e6a95c38bdd3aa
|
||||
github.com/json-iterator/go 6240e1e7983a85228f7fd9c3e1b6932d46ec58e2
|
||||
github.com/peterbourgon/diskv 5f041e8faa004a95c88a202771f4cc3e991971e6
|
||||
vbom.ml/util 256737ac55c46798123f754ab7d2c784e2c71783
|
||||
github.com/google/btree 316fb6d3f031ae8f4d457c6c5186b9e3ded70435
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
Copyright (c) 2012, Martin Angers
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
||||
|
||||
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,185 @@
|
|||
# Purell
|
||||
|
||||
Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know...
|
||||
|
||||
Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
|
||||
|
||||
[![build status](https://secure.travis-ci.org/PuerkitoBio/purell.png)](http://travis-ci.org/PuerkitoBio/purell)
|
||||
|
||||
## Install
|
||||
|
||||
`go get github.com/PuerkitoBio/purell`
|
||||
|
||||
## Changelog
|
||||
|
||||
* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
|
||||
* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
|
||||
* **v0.2.0** : Add benchmarks, Attempt IDN support.
|
||||
* **v0.1.0** : Initial release.
|
||||
|
||||
## Examples
|
||||
|
||||
From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."):
|
||||
|
||||
```go
|
||||
package purell
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
func ExampleNormalizeURLString() {
|
||||
if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
|
||||
FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
fmt.Print(normalized)
|
||||
}
|
||||
// Output: http://somewebsite.com:80/Amazing%3F/url/
|
||||
}
|
||||
|
||||
func ExampleMustNormalizeURLString() {
|
||||
normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
|
||||
FlagsUnsafeGreedy)
|
||||
fmt.Print(normalized)
|
||||
|
||||
// Output: http://somewebsite.com/Amazing%FA/url
|
||||
}
|
||||
|
||||
func ExampleNormalizeURL() {
|
||||
if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
|
||||
panic(err)
|
||||
} else {
|
||||
normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
|
||||
fmt.Print(normalized)
|
||||
}
|
||||
|
||||
// Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
|
||||
}
|
||||
```
|
||||
|
||||
## API
|
||||
|
||||
As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags:
|
||||
|
||||
```go
|
||||
const (
|
||||
// Safe normalizations
|
||||
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
|
||||
FlagLowercaseHost // http://HOST -> http://host
|
||||
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
|
||||
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
|
||||
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
|
||||
FlagRemoveDefaultPort // http://host:80 -> http://host
|
||||
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
|
||||
|
||||
// Usually safe normalizations
|
||||
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
|
||||
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
|
||||
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
|
||||
|
||||
// Unsafe normalizations
|
||||
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
|
||||
FlagRemoveFragment // http://host/path#fragment -> http://host/path
|
||||
FlagForceHTTP // https://host -> http://host
|
||||
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
|
||||
FlagRemoveWWW // http://www.host/ -> http://host/
|
||||
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
|
||||
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
|
||||
|
||||
// Normalizations not in the wikipedia article, required to cover tests cases
|
||||
// submitted by jehiah
|
||||
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
|
||||
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
|
||||
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
|
||||
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
|
||||
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
|
||||
|
||||
// Convenience set of safe normalizations
|
||||
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
|
||||
|
||||
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
|
||||
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
|
||||
|
||||
// Convenience set of usually safe normalizations (includes FlagsSafe)
|
||||
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
|
||||
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
|
||||
|
||||
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
|
||||
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
|
||||
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
|
||||
|
||||
// Convenience set of all available flags
|
||||
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
)
|
||||
```
|
||||
|
||||
For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set.
|
||||
|
||||
The [full godoc reference is available on gopkgdoc][godoc].
|
||||
|
||||
Some things to note:
|
||||
|
||||
* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it.
|
||||
|
||||
* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*):
|
||||
- %24 -> $
|
||||
- %26 -> &
|
||||
- %2B-%3B -> +,-./0123456789:;
|
||||
- %3D -> =
|
||||
- %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ
|
||||
- %5F -> _
|
||||
- %61-%7A -> abcdefghijklmnopqrstuvwxyz
|
||||
- %7E -> ~
|
||||
|
||||
|
||||
* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization).
|
||||
|
||||
* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell.
|
||||
|
||||
* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object.
|
||||
|
||||
### Safe vs Usually Safe vs Unsafe
|
||||
|
||||
Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between.
|
||||
|
||||
Consider the following URL:
|
||||
|
||||
`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
|
||||
|
||||
Normalizing with the `FlagsSafe` gives:
|
||||
|
||||
`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
|
||||
|
||||
With the `FlagsUsuallySafeGreedy`:
|
||||
|
||||
`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid`
|
||||
|
||||
And with `FlagsUnsafeGreedy`:
|
||||
|
||||
`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3`
|
||||
|
||||
## TODOs
|
||||
|
||||
* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`.
|
||||
|
||||
## Thanks / Contributions
|
||||
|
||||
@rogpeppe
|
||||
@jehiah
|
||||
@opennota
|
||||
@pchristopher1275
|
||||
@zenovich
|
||||
|
||||
## License
|
||||
|
||||
The [BSD 3-Clause license][bsd].
|
||||
|
||||
[bsd]: http://opensource.org/licenses/BSD-3-Clause
|
||||
[wiki]: http://en.wikipedia.org/wiki/URL_normalization
|
||||
[rfc]: http://tools.ietf.org/html/rfc3986#section-6
|
||||
[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell
|
||||
[pr5]: https://github.com/PuerkitoBio/purell/pull/5
|
||||
[iss7]: https://github.com/PuerkitoBio/purell/issues/7
|
|
@ -0,0 +1,375 @@
|
|||
/*
|
||||
Package purell offers URL normalization as described on the wikipedia page:
|
||||
http://en.wikipedia.org/wiki/URL_normalization
|
||||
*/
|
||||
package purell
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/PuerkitoBio/urlesc"
|
||||
"golang.org/x/net/idna"
|
||||
"golang.org/x/text/secure/precis"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// A set of normalization flags determines how a URL will
|
||||
// be normalized.
|
||||
type NormalizationFlags uint
|
||||
|
||||
const (
|
||||
// Safe normalizations
|
||||
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
|
||||
FlagLowercaseHost // http://HOST -> http://host
|
||||
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
|
||||
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
|
||||
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
|
||||
FlagRemoveDefaultPort // http://host:80 -> http://host
|
||||
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
|
||||
|
||||
// Usually safe normalizations
|
||||
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
|
||||
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
|
||||
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
|
||||
|
||||
// Unsafe normalizations
|
||||
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
|
||||
FlagRemoveFragment // http://host/path#fragment -> http://host/path
|
||||
FlagForceHTTP // https://host -> http://host
|
||||
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
|
||||
FlagRemoveWWW // http://www.host/ -> http://host/
|
||||
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
|
||||
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
|
||||
|
||||
// Normalizations not in the wikipedia article, required to cover tests cases
|
||||
// submitted by jehiah
|
||||
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
|
||||
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
|
||||
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
|
||||
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
|
||||
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
|
||||
|
||||
// Convenience set of safe normalizations
|
||||
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
|
||||
|
||||
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
|
||||
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
|
||||
|
||||
// Convenience set of usually safe normalizations (includes FlagsSafe)
|
||||
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
|
||||
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
|
||||
|
||||
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
|
||||
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
|
||||
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
|
||||
|
||||
// Convenience set of all available flags
|
||||
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
||||
)
|
||||
|
||||
const (
|
||||
defaultHttpPort = ":80"
|
||||
defaultHttpsPort = ":443"
|
||||
)
|
||||
|
||||
// Regular expressions used by the normalizations
|
||||
var rxPort = regexp.MustCompile(`(:\d+)/?$`)
|
||||
var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
|
||||
var rxDupSlashes = regexp.MustCompile(`/{2,}`)
|
||||
var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
|
||||
var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
|
||||
var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
|
||||
var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
|
||||
var rxEmptyPort = regexp.MustCompile(`:+$`)
|
||||
|
||||
// Map of flags to implementation function.
|
||||
// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
|
||||
// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
|
||||
|
||||
// Since maps have undefined traversing order, make a slice of ordered keys
|
||||
var flagsOrder = []NormalizationFlags{
|
||||
FlagLowercaseScheme,
|
||||
FlagLowercaseHost,
|
||||
FlagRemoveDefaultPort,
|
||||
FlagRemoveDirectoryIndex,
|
||||
FlagRemoveDotSegments,
|
||||
FlagRemoveFragment,
|
||||
FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
|
||||
FlagRemoveDuplicateSlashes,
|
||||
FlagRemoveWWW,
|
||||
FlagAddWWW,
|
||||
FlagSortQuery,
|
||||
FlagDecodeDWORDHost,
|
||||
FlagDecodeOctalHost,
|
||||
FlagDecodeHexHost,
|
||||
FlagRemoveUnnecessaryHostDots,
|
||||
FlagRemoveEmptyPortSeparator,
|
||||
FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
|
||||
FlagAddTrailingSlash,
|
||||
}
|
||||
|
||||
// ... and then the map, where order is unimportant
|
||||
var flags = map[NormalizationFlags]func(*url.URL){
|
||||
FlagLowercaseScheme: lowercaseScheme,
|
||||
FlagLowercaseHost: lowercaseHost,
|
||||
FlagRemoveDefaultPort: removeDefaultPort,
|
||||
FlagRemoveDirectoryIndex: removeDirectoryIndex,
|
||||
FlagRemoveDotSegments: removeDotSegments,
|
||||
FlagRemoveFragment: removeFragment,
|
||||
FlagForceHTTP: forceHTTP,
|
||||
FlagRemoveDuplicateSlashes: removeDuplicateSlashes,
|
||||
FlagRemoveWWW: removeWWW,
|
||||
FlagAddWWW: addWWW,
|
||||
FlagSortQuery: sortQuery,
|
||||
FlagDecodeDWORDHost: decodeDWORDHost,
|
||||
FlagDecodeOctalHost: decodeOctalHost,
|
||||
FlagDecodeHexHost: decodeHexHost,
|
||||
FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
|
||||
FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator,
|
||||
FlagRemoveTrailingSlash: removeTrailingSlash,
|
||||
FlagAddTrailingSlash: addTrailingSlash,
|
||||
}
|
||||
|
||||
// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
|
||||
// It takes an URL string as input, as well as the normalization flags.
|
||||
func MustNormalizeURLString(u string, f NormalizationFlags) string {
|
||||
result, e := NormalizeURLString(u, f)
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
|
||||
// It takes an URL string as input, as well as the normalization flags.
|
||||
func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
|
||||
if parsed, e := url.Parse(u); e != nil {
|
||||
return "", e
|
||||
} else {
|
||||
options := make([]precis.Option, 1, 3)
|
||||
options[0] = precis.IgnoreCase
|
||||
if f&FlagLowercaseHost == FlagLowercaseHost {
|
||||
options = append(options, precis.FoldCase())
|
||||
}
|
||||
options = append(options, precis.Norm(norm.NFC))
|
||||
profile := precis.NewFreeform(options...)
|
||||
if parsed.Host, e = idna.ToASCII(profile.NewTransformer().String(parsed.Host)); e != nil {
|
||||
return "", e
|
||||
}
|
||||
return NormalizeURL(parsed, f), nil
|
||||
}
|
||||
panic("Unreachable code.")
|
||||
}
|
||||
|
||||
// NormalizeURL returns the normalized string.
|
||||
// It takes a parsed URL object as input, as well as the normalization flags.
|
||||
func NormalizeURL(u *url.URL, f NormalizationFlags) string {
|
||||
for _, k := range flagsOrder {
|
||||
if f&k == k {
|
||||
flags[k](u)
|
||||
}
|
||||
}
|
||||
return urlesc.Escape(u)
|
||||
}
|
||||
|
||||
func lowercaseScheme(u *url.URL) {
|
||||
if len(u.Scheme) > 0 {
|
||||
u.Scheme = strings.ToLower(u.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
func lowercaseHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
u.Host = strings.ToLower(u.Host)
|
||||
}
|
||||
}
|
||||
|
||||
func removeDefaultPort(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
scheme := strings.ToLower(u.Scheme)
|
||||
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
|
||||
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
|
||||
return ""
|
||||
}
|
||||
return val
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func removeTrailingSlash(u *url.URL) {
|
||||
if l := len(u.Path); l > 0 {
|
||||
if strings.HasSuffix(u.Path, "/") {
|
||||
u.Path = u.Path[:l-1]
|
||||
}
|
||||
} else if l = len(u.Host); l > 0 {
|
||||
if strings.HasSuffix(u.Host, "/") {
|
||||
u.Host = u.Host[:l-1]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addTrailingSlash(u *url.URL) {
|
||||
if l := len(u.Path); l > 0 {
|
||||
if !strings.HasSuffix(u.Path, "/") {
|
||||
u.Path += "/"
|
||||
}
|
||||
} else if l = len(u.Host); l > 0 {
|
||||
if !strings.HasSuffix(u.Host, "/") {
|
||||
u.Host += "/"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeDotSegments(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
var dotFree []string
|
||||
var lastIsDot bool
|
||||
|
||||
sections := strings.Split(u.Path, "/")
|
||||
for _, s := range sections {
|
||||
if s == ".." {
|
||||
if len(dotFree) > 0 {
|
||||
dotFree = dotFree[:len(dotFree)-1]
|
||||
}
|
||||
} else if s != "." {
|
||||
dotFree = append(dotFree, s)
|
||||
}
|
||||
lastIsDot = (s == "." || s == "..")
|
||||
}
|
||||
// Special case if host does not end with / and new path does not begin with /
|
||||
u.Path = strings.Join(dotFree, "/")
|
||||
if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
|
||||
u.Path = "/" + u.Path
|
||||
}
|
||||
// Special case if the last segment was a dot, make sure the path ends with a slash
|
||||
if lastIsDot && !strings.HasSuffix(u.Path, "/") {
|
||||
u.Path += "/"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeDirectoryIndex(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
|
||||
}
|
||||
}
|
||||
|
||||
func removeFragment(u *url.URL) {
|
||||
u.Fragment = ""
|
||||
}
|
||||
|
||||
func forceHTTP(u *url.URL) {
|
||||
if strings.ToLower(u.Scheme) == "https" {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
}
|
||||
|
||||
func removeDuplicateSlashes(u *url.URL) {
|
||||
if len(u.Path) > 0 {
|
||||
u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
|
||||
}
|
||||
}
|
||||
|
||||
func removeWWW(u *url.URL) {
|
||||
if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
|
||||
u.Host = u.Host[4:]
|
||||
}
|
||||
}
|
||||
|
||||
func addWWW(u *url.URL) {
|
||||
if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
|
||||
u.Host = "www." + u.Host
|
||||
}
|
||||
}
|
||||
|
||||
func sortQuery(u *url.URL) {
|
||||
q := u.Query()
|
||||
|
||||
if len(q) > 0 {
|
||||
arKeys := make([]string, len(q))
|
||||
i := 0
|
||||
for k, _ := range q {
|
||||
arKeys[i] = k
|
||||
i++
|
||||
}
|
||||
sort.Strings(arKeys)
|
||||
buf := new(bytes.Buffer)
|
||||
for _, k := range arKeys {
|
||||
sort.Strings(q[k])
|
||||
for _, v := range q[k] {
|
||||
if buf.Len() > 0 {
|
||||
buf.WriteRune('&')
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
|
||||
}
|
||||
}
|
||||
|
||||
// Rebuild the raw query string
|
||||
u.RawQuery = buf.String()
|
||||
}
|
||||
}
|
||||
|
||||
func decodeDWORDHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
|
||||
var parts [4]int64
|
||||
|
||||
dword, _ := strconv.ParseInt(matches[1], 10, 0)
|
||||
for i, shift := range []uint{24, 16, 8, 0} {
|
||||
parts[i] = dword >> shift & 0xFF
|
||||
}
|
||||
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeOctalHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
|
||||
var parts [4]int64
|
||||
|
||||
for i := 1; i <= 4; i++ {
|
||||
parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
|
||||
}
|
||||
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeHexHost(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
|
||||
// Conversion is safe because of regex validation
|
||||
parsed, _ := strconv.ParseInt(matches[1], 16, 0)
|
||||
// Set host as DWORD (base 10) encoded host
|
||||
u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
|
||||
// The rest is the same as decoding a DWORD host
|
||||
decodeDWORDHost(u)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeUnncessaryHostDots(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
|
||||
// Trim the leading and trailing dots
|
||||
u.Host = strings.Trim(matches[1], ".")
|
||||
if len(matches) > 2 {
|
||||
u.Host += matches[2]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func removeEmptyPortSeparator(u *url.URL) {
|
||||
if len(u.Host) > 0 {
|
||||
u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,16 @@
|
|||
urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.png?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc)
|
||||
======
|
||||
|
||||
Package urlesc implements query escaping as per RFC 3986.
|
||||
|
||||
It contains some parts of the net/url package, modified so as to allow
|
||||
some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)).
|
||||
|
||||
## Install
|
||||
|
||||
go get github.com/PuerkitoBio/urlesc
|
||||
|
||||
## License
|
||||
|
||||
Go license (BSD-3-Clause)
|
||||
|
|
@ -0,0 +1,180 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package urlesc implements query escaping as per RFC 3986.
|
||||
// It contains some parts of the net/url package, modified so as to allow
|
||||
// some reserved characters incorrectly escaped by net/url.
|
||||
// See https://github.com/golang/go/issues/5684
|
||||
package urlesc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type encoding int
|
||||
|
||||
const (
|
||||
encodePath encoding = 1 + iota
|
||||
encodeUserPassword
|
||||
encodeQueryComponent
|
||||
encodeFragment
|
||||
)
|
||||
|
||||
// Return true if the specified character should be escaped when
|
||||
// appearing in a URL string, according to RFC 3986.
|
||||
func shouldEscape(c byte, mode encoding) bool {
|
||||
// §2.3 Unreserved characters (alphanum)
|
||||
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
|
||||
return false
|
||||
}
|
||||
|
||||
switch c {
|
||||
case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
|
||||
return false
|
||||
|
||||
// §2.2 Reserved characters (reserved)
|
||||
case ':', '/', '?', '#', '[', ']', '@', // gen-delims
|
||||
'!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
|
||||
// Different sections of the URL allow a few of
|
||||
// the reserved characters to appear unescaped.
|
||||
switch mode {
|
||||
case encodePath: // §3.3
|
||||
// The RFC allows sub-delims and : @.
|
||||
// '/', '[' and ']' can be used to assign meaning to individual path
|
||||
// segments. This package only manipulates the path as a whole,
|
||||
// so we allow those as well. That leaves only ? and # to escape.
|
||||
return c == '?' || c == '#'
|
||||
|
||||
case encodeUserPassword: // §3.2.1
|
||||
// The RFC allows : and sub-delims in
|
||||
// userinfo. The parsing of userinfo treats ':' as special so we must escape
|
||||
// all the gen-delims.
|
||||
return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
|
||||
|
||||
case encodeQueryComponent: // §3.4
|
||||
// The RFC allows / and ?.
|
||||
return c != '/' && c != '?'
|
||||
|
||||
case encodeFragment: // §4.1
|
||||
// The RFC text is silent but the grammar allows
|
||||
// everything, so escape nothing but #
|
||||
return c == '#'
|
||||
}
|
||||
}
|
||||
|
||||
// Everything else must be escaped.
|
||||
return true
|
||||
}
|
||||
|
||||
// QueryEscape escapes the string so it can be safely placed
|
||||
// inside a URL query.
|
||||
func QueryEscape(s string) string {
|
||||
return escape(s, encodeQueryComponent)
|
||||
}
|
||||
|
||||
func escape(s string, mode encoding) string {
|
||||
spaceCount, hexCount := 0, 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if shouldEscape(c, mode) {
|
||||
if c == ' ' && mode == encodeQueryComponent {
|
||||
spaceCount++
|
||||
} else {
|
||||
hexCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if spaceCount == 0 && hexCount == 0 {
|
||||
return s
|
||||
}
|
||||
|
||||
t := make([]byte, len(s)+2*hexCount)
|
||||
j := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
switch c := s[i]; {
|
||||
case c == ' ' && mode == encodeQueryComponent:
|
||||
t[j] = '+'
|
||||
j++
|
||||
case shouldEscape(c, mode):
|
||||
t[j] = '%'
|
||||
t[j+1] = "0123456789ABCDEF"[c>>4]
|
||||
t[j+2] = "0123456789ABCDEF"[c&15]
|
||||
j += 3
|
||||
default:
|
||||
t[j] = s[i]
|
||||
j++
|
||||
}
|
||||
}
|
||||
return string(t)
|
||||
}
|
||||
|
||||
var uiReplacer = strings.NewReplacer(
|
||||
"%21", "!",
|
||||
"%27", "'",
|
||||
"%28", "(",
|
||||
"%29", ")",
|
||||
"%2A", "*",
|
||||
)
|
||||
|
||||
// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
|
||||
func unescapeUserinfo(s string) string {
|
||||
return uiReplacer.Replace(s)
|
||||
}
|
||||
|
||||
// Escape reassembles the URL into a valid URL string.
|
||||
// The general form of the result is one of:
|
||||
//
|
||||
// scheme:opaque
|
||||
// scheme://userinfo@host/path?query#fragment
|
||||
//
|
||||
// If u.Opaque is non-empty, String uses the first form;
|
||||
// otherwise it uses the second form.
|
||||
//
|
||||
// In the second form, the following rules apply:
|
||||
// - if u.Scheme is empty, scheme: is omitted.
|
||||
// - if u.User is nil, userinfo@ is omitted.
|
||||
// - if u.Host is empty, host/ is omitted.
|
||||
// - if u.Scheme and u.Host are empty and u.User is nil,
|
||||
// the entire scheme://userinfo@host/ is omitted.
|
||||
// - if u.Host is non-empty and u.Path begins with a /,
|
||||
// the form host/path does not add its own /.
|
||||
// - if u.RawQuery is empty, ?query is omitted.
|
||||
// - if u.Fragment is empty, #fragment is omitted.
|
||||
func Escape(u *url.URL) string {
|
||||
var buf bytes.Buffer
|
||||
if u.Scheme != "" {
|
||||
buf.WriteString(u.Scheme)
|
||||
buf.WriteByte(':')
|
||||
}
|
||||
if u.Opaque != "" {
|
||||
buf.WriteString(u.Opaque)
|
||||
} else {
|
||||
if u.Scheme != "" || u.Host != "" || u.User != nil {
|
||||
buf.WriteString("//")
|
||||
if ui := u.User; ui != nil {
|
||||
buf.WriteString(unescapeUserinfo(ui.String()))
|
||||
buf.WriteByte('@')
|
||||
}
|
||||
if h := u.Host; h != "" {
|
||||
buf.WriteString(h)
|
||||
}
|
||||
}
|
||||
if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
|
||||
buf.WriteByte('/')
|
||||
}
|
||||
buf.WriteString(escape(u.Path, encodePath))
|
||||
}
|
||||
if u.RawQuery != "" {
|
||||
buf.WriteByte('?')
|
||||
buf.WriteString(u.RawQuery)
|
||||
}
|
||||
if u.Fragment != "" {
|
||||
buf.WriteByte('#')
|
||||
buf.WriteString(escape(u.Fragment, encodeFragment))
|
||||
}
|
||||
return buf.String()
|
||||
}
|
|
@ -1558,25 +1558,67 @@ func (m *ConfState) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
|
||||
}
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRaft
|
||||
if wireType == 0 {
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRaft
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iNdEx >= l {
|
||||
m.Nodes = append(m.Nodes, v)
|
||||
} else if wireType == 2 {
|
||||
var packedLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRaft
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
packedLen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if packedLen < 0 {
|
||||
return ErrInvalidLengthRaft
|
||||
}
|
||||
postIndex := iNdEx + packedLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
for iNdEx < postIndex {
|
||||
var v uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRaft
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.Nodes = append(m.Nodes, v)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
|
||||
}
|
||||
m.Nodes = append(m.Nodes, v)
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipRaft(dAtA[iNdEx:])
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
## About
|
||||
|
||||
This directory contains a collection of scripts used to build and manage this
|
||||
repository. If there are any issues regarding the intention of a particular
|
||||
script (or even part of a certain script), please reach out to us.
|
||||
It may help us either refine our current scripts, or add on new ones
|
||||
that are appropriate for a given use case.
|
||||
|
||||
## DinD (dind.sh)
|
||||
|
||||
DinD is a wrapper script which allows Docker to be run inside a Docker
|
||||
container. DinD requires the container to
|
||||
be run with privileged mode enabled.
|
||||
|
||||
## Generate Authors (generate-authors.sh)
|
||||
|
||||
Generates AUTHORS; a file with all the names and corresponding emails of
|
||||
individual contributors. AUTHORS can be found in the home directory of
|
||||
this repository.
|
||||
|
||||
## Make
|
||||
|
||||
There are two make files, each with different extensions. Neither are supposed
|
||||
to be called directly; only invoke `make`. Both scripts run inside a Docker
|
||||
container.
|
||||
|
||||
### make.ps1
|
||||
|
||||
- The Windows native build script that uses PowerShell semantics; it is limited
|
||||
unlike `hack\make.sh` since it does not provide support for the full set of
|
||||
operations provided by the Linux counterpart, `make.sh`. However, `make.ps1`
|
||||
does provide support for local Windows development and Windows to Windows CI.
|
||||
More information is found within `make.ps1` by the author, @jhowardmsft
|
||||
|
||||
### make.sh
|
||||
|
||||
- Referenced via `make test` when running tests on a local machine,
|
||||
or directly referenced when running tests inside a Docker development container.
|
||||
- When running on a local machine, `make test` to run all tests found in
|
||||
`test`, `test-unit`, `test-integration`, and `test-docker-py` on
|
||||
your local machine. The default timeout is set in `make.sh` to 60 minutes
|
||||
(`${TIMEOUT:=60m}`), since it currently takes up to an hour to run
|
||||
all of the tests.
|
||||
- When running inside a Docker development container, `hack/make.sh` does
|
||||
not have a single target that runs all the tests. You need to provide a
|
||||
single command line with multiple targets that performs the same thing.
|
||||
An example referenced from [Run targets inside a development container](https://docs.docker.com/opensource/project/test-and-docs/#run-targets-inside-a-development-container): `root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit test-integration test-docker-py`
|
||||
- For more information related to testing outside the scope of this README,
|
||||
refer to
|
||||
[Run tests and test documentation](https://docs.docker.com/opensource/project/test-and-docs/)
|
||||
|
||||
## Release (release.sh)
|
||||
|
||||
Releases any bundles built by `make` on a public AWS S3 bucket.
|
||||
For information regarding configuration, please view `release.sh`.
|
||||
|
||||
## Vendor (vendor.sh)
|
||||
|
||||
A shell script that is a wrapper around Vndr. For information on how to use
|
||||
this, please refer to [vndr's README](https://github.com/LK4D4/vndr/blob/master/README.md)
|
69
vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md
сгенерированный
поставляемый
Normal file
69
vendor/github.com/docker/docker/hack/integration-cli-on-swarm/README.md
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,69 @@
|
|||
# Integration Testing on Swarm
|
||||
|
||||
IT on Swarm allows you to execute integration test in parallel across a Docker Swarm cluster
|
||||
|
||||
## Architecture
|
||||
|
||||
### Master service
|
||||
|
||||
- Works as a funker caller
|
||||
- Calls a worker funker (`-worker-service`) with a chunk of `-check.f` filter strings (passed as a file via `-input` flag, typically `/mnt/input`)
|
||||
|
||||
### Worker service
|
||||
|
||||
- Works as a funker callee
|
||||
- Executes an equivalent of `TESTFLAGS=-check.f TestFoo|TestBar|TestBaz ... make test-integration-cli` using the bind-mounted API socket (`docker.sock`)
|
||||
|
||||
### Client
|
||||
|
||||
- Controls master and workers via `docker stack`
|
||||
- No need to have a local daemon
|
||||
|
||||
Typically, the master and workers are supposed to be running on a cloud environment,
|
||||
while the client is supposed to be running on a laptop, e.g. Docker for Mac/Windows.
|
||||
|
||||
## Requirement
|
||||
|
||||
- Docker daemon 1.13 or later
|
||||
- Private registry for distributed execution with multiple nodes
|
||||
|
||||
## Usage
|
||||
|
||||
### Step 1: Prepare images
|
||||
|
||||
$ make build-integration-cli-on-swarm
|
||||
|
||||
Following environment variables are known to work in this step:
|
||||
|
||||
- `BUILDFLAGS`
|
||||
- `DOCKER_INCREMENTAL_BINARY`
|
||||
|
||||
Note: during the transition into Moby Project, you might need to create a symbolic link `$GOPATH/src/github.com/docker/docker` to `$GOPATH/src/github.com/moby/moby`.
|
||||
|
||||
### Step 2: Execute tests
|
||||
|
||||
$ ./hack/integration-cli-on-swarm/integration-cli-on-swarm -replicas 40 -push-worker-image YOUR_REGISTRY.EXAMPLE.COM/integration-cli-worker:latest
|
||||
|
||||
Following environment variables are known to work in this step:
|
||||
|
||||
- `DOCKER_GRAPHDRIVER`
|
||||
- `DOCKER_EXPERIMENTAL`
|
||||
|
||||
#### Flags
|
||||
|
||||
Basic flags:
|
||||
|
||||
- `-replicas N`: the number of worker service replicas. i.e. degree of parallelism.
|
||||
- `-chunks N`: the number of chunks. By default, `chunks` == `replicas`.
|
||||
- `-push-worker-image REGISTRY/IMAGE:TAG`: push the worker image to the registry. Note that if you have only single node and hence you do not need a private registry, you do not need to specify `-push-worker-image`.
|
||||
|
||||
Experimental flags for mitigating makespan nonuniformity:
|
||||
|
||||
- `-shuffle`: Shuffle the test filter strings
|
||||
|
||||
Flags for debugging IT on Swarm itself:
|
||||
|
||||
- `-rand-seed N`: the random seed. This flag is useful for deterministic replaying. By default(0), the timestamp is used.
|
||||
- `-filters-file FILE`: the file contains `-check.f` strings. By default, the file is automatically generated.
|
||||
- `-dry-run`: skip the actual workload
|
||||
- `keep-executor`: do not auto-remove executor containers, which is used for running privileged programs on Swarm
|
2
vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf
сгенерированный
поставляемый
Normal file
2
vendor/github.com/docker/docker/hack/integration-cli-on-swarm/agent/vendor.conf
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,2 @@
|
|||
# dependencies specific to worker (i.e. github.com/docker/docker/...) are not vendored here
|
||||
github.com/bfirsh/funker-go eaa0a2e06f30e72c9a0b7f858951e581e26ef773
|
97
vendor/github.com/docker/docker/pkg/archive/example_changes.go
сгенерированный
поставляемый
Normal file
97
vendor/github.com/docker/docker/pkg/archive/example_changes.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,97 @@
|
|||
// +build ignore
|
||||
|
||||
// Simple tool to create an archive stream from an old and new directory
|
||||
//
|
||||
// By default it will stream the comparison of two temporary directories with junk files
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
flDebug = flag.Bool("D", false, "debugging output")
|
||||
flNewDir = flag.String("newdir", "", "")
|
||||
flOldDir = flag.String("olddir", "", "")
|
||||
log = logrus.New()
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() {
|
||||
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
|
||||
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
flag.Parse()
|
||||
log.Out = os.Stderr
|
||||
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
var newDir, oldDir string
|
||||
|
||||
if len(*flNewDir) == 0 {
|
||||
var err error
|
||||
newDir, err = ioutil.TempDir("", "docker-test-newDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(newDir)
|
||||
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
newDir = *flNewDir
|
||||
}
|
||||
|
||||
if len(*flOldDir) == 0 {
|
||||
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(oldDir)
|
||||
} else {
|
||||
oldDir = *flOldDir
|
||||
}
|
||||
|
||||
changes, err := archive.ChangesDirs(newDir, oldDir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
a, err := archive.ExportChanges(newDir, changes)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer a.Close()
|
||||
|
||||
i, err := io.Copy(os.Stdout, a)
|
||||
if err != nil && err != io.EOF {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
|
||||
}
|
||||
|
||||
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
|
||||
fileData := []byte("fooo")
|
||||
for n := 0; n < numberOfFiles; n++ {
|
||||
fileName := fmt.Sprintf("file-%d", n)
|
||||
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if makeLinks {
|
||||
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
totalSize := numberOfFiles * len(fileData)
|
||||
return totalSize, nil
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2017 Ernest Micklei
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
83
vendor/github.com/emicklei/go-restful-swagger12/README.md
сгенерированный
поставляемый
Normal file
83
vendor/github.com/emicklei/go-restful-swagger12/README.md
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,83 @@
|
|||
# go-restful-swagger12
|
||||
|
||||
[![Build Status](https://travis-ci.org/emicklei/go-restful-swagger12.png)](https://travis-ci.org/emicklei/go-restful-swagger12)
|
||||
[![GoDoc](https://godoc.org/github.com/emicklei/go-restful-swagger12?status.svg)](https://godoc.org/github.com/emicklei/go-restful-swagger12)
|
||||
|
||||
How to use Swagger UI with go-restful
|
||||
=
|
||||
|
||||
Get the Swagger UI sources (version 1.2 only)
|
||||
|
||||
git clone https://github.com/wordnik/swagger-ui.git
|
||||
|
||||
The project contains a "dist" folder.
|
||||
Its contents has all the Swagger UI files you need.
|
||||
|
||||
The `index.html` has an `url` set to `http://petstore.swagger.wordnik.com/api/api-docs`.
|
||||
You need to change that to match your WebService JSON endpoint e.g. `http://localhost:8080/apidocs.json`
|
||||
|
||||
Now, you can install the Swagger WebService for serving the Swagger specification in JSON.
|
||||
|
||||
config := swagger.Config{
|
||||
WebServices: restful.RegisteredWebServices(),
|
||||
ApiPath: "/apidocs.json",
|
||||
SwaggerPath: "/apidocs/",
|
||||
SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"}
|
||||
swagger.InstallSwaggerService(config)
|
||||
|
||||
|
||||
Documenting Structs
|
||||
--
|
||||
|
||||
Currently there are 2 ways to document your structs in the go-restful Swagger.
|
||||
|
||||
###### By using struct tags
|
||||
- Use tag "description" to annotate a struct field with a description to show in the UI
|
||||
- Use tag "modelDescription" to annotate the struct itself with a description to show in the UI. The tag can be added in an field of the struct and in case that there are multiple definition, they will be appended with an empty line.
|
||||
|
||||
###### By using the SwaggerDoc method
|
||||
Here is an example with an `Address` struct and the documentation for each of the fields. The `""` is a special entry for **documenting the struct itself**.
|
||||
|
||||
type Address struct {
|
||||
Country string `json:"country,omitempty"`
|
||||
PostCode int `json:"postcode,omitempty"`
|
||||
}
|
||||
|
||||
func (Address) SwaggerDoc() map[string]string {
|
||||
return map[string]string{
|
||||
"": "Address doc",
|
||||
"country": "Country doc",
|
||||
"postcode": "PostCode doc",
|
||||
}
|
||||
}
|
||||
|
||||
This example will generate a JSON like this
|
||||
|
||||
{
|
||||
"Address": {
|
||||
"id": "Address",
|
||||
"description": "Address doc",
|
||||
"properties": {
|
||||
"country": {
|
||||
"type": "string",
|
||||
"description": "Country doc"
|
||||
},
|
||||
"postcode": {
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"description": "PostCode doc"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
**Very Important Notes:**
|
||||
- `SwaggerDoc()` is using a **NON-Pointer** receiver (e.g. func (Address) and not func (*Address))
|
||||
- The returned map should use as key the name of the field as defined in the JSON parameter (e.g. `"postcode"` and not `"PostCode"`)
|
||||
|
||||
Notes
|
||||
--
|
||||
- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..)
|
||||
- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints.
|
||||
|
||||
© 2017, ernestmicklei.com. MIT License. Contributions welcome.
|
64
vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go
сгенерированный
поставляемый
Normal file
64
vendor/github.com/emicklei/go-restful-swagger12/api_declaration_list.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,64 @@
|
|||
package swagger
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// ApiDeclarationList maintains an ordered list of ApiDeclaration.
|
||||
type ApiDeclarationList struct {
|
||||
List []ApiDeclaration
|
||||
}
|
||||
|
||||
// At returns the ApiDeclaration by its path unless absent, then ok is false
|
||||
func (l *ApiDeclarationList) At(path string) (a ApiDeclaration, ok bool) {
|
||||
for _, each := range l.List {
|
||||
if each.ResourcePath == path {
|
||||
return each, true
|
||||
}
|
||||
}
|
||||
return a, false
|
||||
}
|
||||
|
||||
// Put adds or replaces a ApiDeclaration with this name
|
||||
func (l *ApiDeclarationList) Put(path string, a ApiDeclaration) {
|
||||
// maybe replace existing
|
||||
for i, each := range l.List {
|
||||
if each.ResourcePath == path {
|
||||
// replace
|
||||
l.List[i] = a
|
||||
return
|
||||
}
|
||||
}
|
||||
// add
|
||||
l.List = append(l.List, a)
|
||||
}
|
||||
|
||||
// Do enumerates all the properties, each with its assigned name
|
||||
func (l *ApiDeclarationList) Do(block func(path string, decl ApiDeclaration)) {
|
||||
for _, each := range l.List {
|
||||
block(each.ResourcePath, each)
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalJSON writes the ModelPropertyList as if it was a map[string]ModelProperty
|
||||
func (l ApiDeclarationList) MarshalJSON() ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
encoder := json.NewEncoder(&buf)
|
||||
buf.WriteString("{\n")
|
||||
for i, each := range l.List {
|
||||
buf.WriteString("\"")
|
||||
buf.WriteString(each.ResourcePath)
|
||||
buf.WriteString("\": ")
|
||||
encoder.Encode(each)
|
||||
if i < len(l.List)-1 {
|
||||
buf.WriteString(",\n")
|
||||
}
|
||||
}
|
||||
buf.WriteString("}")
|
||||
return buf.Bytes(), nil
|
||||
}
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче