зеркало из https://github.com/microsoft/docker.git
Merge pull request #8295 from crosbymichael/type-exec-context
Strongly type exec driver context
This commit is contained in:
Коммит
595b5bf06e
|
@ -195,14 +195,7 @@ func (container *Container) getRootResourcePath(path string) (string, error) {
|
|||
}
|
||||
|
||||
func populateCommand(c *Container, env []string) error {
|
||||
var (
|
||||
en *execdriver.Network
|
||||
context = make(map[string][]string)
|
||||
)
|
||||
context["process_label"] = []string{c.GetProcessLabel()}
|
||||
context["mount_label"] = []string{c.GetMountLabel()}
|
||||
|
||||
en = &execdriver.Network{
|
||||
en := &execdriver.Network{
|
||||
Mtu: c.daemon.config.Mtu,
|
||||
Interface: nil,
|
||||
}
|
||||
|
@ -247,7 +240,7 @@ func populateCommand(c *Container, env []string) error {
|
|||
autoCreatedDevices := append(devices.DefaultAutoCreatedDevices, userSpecifiedDevices...)
|
||||
|
||||
// TODO: this can be removed after lxc-conf is fully deprecated
|
||||
mergeLxcConfIntoOptions(c.hostConfig, context)
|
||||
lxcConfig := mergeLxcConfIntoOptions(c.hostConfig)
|
||||
|
||||
resources := &execdriver.Resources{
|
||||
Memory: c.Config.Memory,
|
||||
|
@ -263,21 +256,25 @@ func populateCommand(c *Container, env []string) error {
|
|||
Tty: c.Config.Tty,
|
||||
User: c.Config.User,
|
||||
}
|
||||
|
||||
processConfig.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
|
||||
processConfig.Env = env
|
||||
|
||||
c.command = &execdriver.Command{
|
||||
ID: c.ID,
|
||||
Rootfs: c.RootfsPath(),
|
||||
InitPath: "/.dockerinit",
|
||||
WorkingDir: c.Config.WorkingDir,
|
||||
Network: en,
|
||||
Config: context,
|
||||
Resources: resources,
|
||||
AllowedDevices: allowedDevices,
|
||||
AutoCreatedDevices: autoCreatedDevices,
|
||||
CapAdd: c.hostConfig.CapAdd,
|
||||
CapDrop: c.hostConfig.CapDrop,
|
||||
ProcessConfig: processConfig,
|
||||
ProcessLabel: c.GetProcessLabel(),
|
||||
MountLabel: c.GetMountLabel(),
|
||||
LxcConfig: lxcConfig,
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -99,19 +99,21 @@ type ProcessConfig struct {
|
|||
|
||||
// Process wrapps an os/exec.Cmd to add more metadata
|
||||
type Command struct {
|
||||
ID string `json:"id"`
|
||||
Rootfs string `json:"rootfs"` // root fs of the container
|
||||
InitPath string `json:"initpath"` // dockerinit
|
||||
WorkingDir string `json:"working_dir"`
|
||||
ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver
|
||||
Network *Network `json:"network"`
|
||||
Config map[string][]string `json:"config"` // generic values that specific drivers can consume
|
||||
Resources *Resources `json:"resources"`
|
||||
Mounts []Mount `json:"mounts"`
|
||||
AllowedDevices []*devices.Device `json:"allowed_devices"`
|
||||
AutoCreatedDevices []*devices.Device `json:"autocreated_devices"`
|
||||
CapAdd []string `json:"cap_add"`
|
||||
CapDrop []string `json:"cap_drop"`
|
||||
ContainerPid int `json:"container_pid"` // the pid for the process inside a container
|
||||
ProcessConfig ProcessConfig `json:"process_config"` // Describes the init process of the container.
|
||||
ID string `json:"id"`
|
||||
Rootfs string `json:"rootfs"` // root fs of the container
|
||||
InitPath string `json:"initpath"` // dockerinit
|
||||
WorkingDir string `json:"working_dir"`
|
||||
ConfigPath string `json:"config_path"` // this should be able to be removed when the lxc template is moved into the driver
|
||||
Network *Network `json:"network"`
|
||||
Resources *Resources `json:"resources"`
|
||||
Mounts []Mount `json:"mounts"`
|
||||
AllowedDevices []*devices.Device `json:"allowed_devices"`
|
||||
AutoCreatedDevices []*devices.Device `json:"autocreated_devices"`
|
||||
CapAdd []string `json:"cap_add"`
|
||||
CapDrop []string `json:"cap_drop"`
|
||||
ContainerPid int `json:"container_pid"` // the pid for the process inside a container
|
||||
ProcessConfig ProcessConfig `json:"process_config"` // Describes the init process of the container.
|
||||
ProcessLabel string `json:"process_label"`
|
||||
MountLabel string `json:"mount_label"`
|
||||
LxcConfig []string `json:"lxc_config"`
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/docker/utils"
|
||||
"github.com/docker/libcontainer/cgroups"
|
||||
"github.com/docker/libcontainer/label"
|
||||
"github.com/docker/libcontainer/mount/nodes"
|
||||
)
|
||||
|
||||
|
@ -410,37 +409,24 @@ func rootIsShared() bool {
|
|||
}
|
||||
|
||||
func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) {
|
||||
var (
|
||||
process, mount string
|
||||
root = path.Join(d.root, "containers", c.ID, "config.lxc")
|
||||
labels = c.Config["label"]
|
||||
)
|
||||
root := path.Join(d.root, "containers", c.ID, "config.lxc")
|
||||
|
||||
fo, err := os.Create(root)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer fo.Close()
|
||||
|
||||
if len(labels) > 0 {
|
||||
process, mount, err = label.GenLabels(labels[0])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
if err := LxcTemplateCompiled.Execute(fo, struct {
|
||||
*execdriver.Command
|
||||
AppArmor bool
|
||||
ProcessLabel string
|
||||
MountLabel string
|
||||
AppArmor bool
|
||||
}{
|
||||
Command: c,
|
||||
AppArmor: d.apparmor,
|
||||
ProcessLabel: process,
|
||||
MountLabel: mount,
|
||||
Command: c,
|
||||
AppArmor: d.apparmor,
|
||||
}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return root, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -34,10 +34,6 @@ lxc.pts = 1024
|
|||
|
||||
# disable the main console
|
||||
lxc.console = none
|
||||
{{if .ProcessLabel}}
|
||||
lxc.se_context = {{ .ProcessLabel}}
|
||||
{{end}}
|
||||
{{$MOUNTLABEL := .MountLabel}}
|
||||
|
||||
# no controlling tty at all
|
||||
lxc.tty = 1
|
||||
|
@ -70,8 +66,8 @@ lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noe
|
|||
lxc.mount.entry = {{.ProcessConfig.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0
|
||||
{{end}}
|
||||
|
||||
lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts {{formatMountLabel "newinstance,ptmxmode=0666,nosuid,noexec" $MOUNTLABEL}} 0 0
|
||||
lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs {{formatMountLabel "size=65536k,nosuid,nodev,noexec" $MOUNTLABEL}} 0 0
|
||||
lxc.mount.entry = devpts {{escapeFstabSpaces $ROOTFS}}/dev/pts devpts {{formatMountLabel "newinstance,ptmxmode=0666,nosuid,noexec" ""}} 0 0
|
||||
lxc.mount.entry = shm {{escapeFstabSpaces $ROOTFS}}/dev/shm tmpfs {{formatMountLabel "size=65536k,nosuid,nodev,noexec" ""}} 0 0
|
||||
|
||||
{{range $value := .Mounts}}
|
||||
{{if $value.Writable}}
|
||||
|
@ -106,8 +102,8 @@ lxc.cgroup.cpuset.cpus = {{.Resources.Cpuset}}
|
|||
{{end}}
|
||||
{{end}}
|
||||
|
||||
{{if .Config.lxc}}
|
||||
{{range $value := .Config.lxc}}
|
||||
{{if .LxcConfig}}
|
||||
{{range $value := .LxcConfig}}
|
||||
lxc.{{$value}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
|
|
|
@ -83,11 +83,9 @@ func TestCustomLxcConfig(t *testing.T) {
|
|||
}
|
||||
command := &execdriver.Command{
|
||||
ID: "1",
|
||||
Config: map[string][]string{
|
||||
"lxc": {
|
||||
"lxc.utsname = docker",
|
||||
"lxc.cgroup.cpuset.cpus = 0,1",
|
||||
},
|
||||
LxcConfig: []string{
|
||||
"lxc.utsname = docker",
|
||||
"lxc.cgroup.cpuset.cpus = 0,1",
|
||||
},
|
||||
Network: &execdriver.Network{
|
||||
Mtu: 1500,
|
||||
|
|
|
@ -1,188 +0,0 @@
|
|||
package configuration
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/units"
|
||||
"github.com/docker/libcontainer"
|
||||
)
|
||||
|
||||
type Action func(*libcontainer.Config, interface{}, string) error
|
||||
|
||||
var actions = map[string]Action{
|
||||
"cap.add": addCap, // add a cap
|
||||
"cap.drop": dropCap, // drop a cap
|
||||
|
||||
"ns.add": addNamespace, // add a namespace
|
||||
"ns.drop": dropNamespace, // drop a namespace when cloning
|
||||
|
||||
"net.join": joinNetNamespace, // join another containers net namespace
|
||||
|
||||
"cgroups.cpu_shares": cpuShares, // set the cpu shares
|
||||
"cgroups.memory": memory, // set the memory limit
|
||||
"cgroups.memory_reservation": memoryReservation, // set the memory reservation
|
||||
"cgroups.memory_swap": memorySwap, // set the memory swap limit
|
||||
"cgroups.cpuset.cpus": cpusetCpus, // set the cpus used
|
||||
|
||||
"systemd.slice": systemdSlice, // set parent Slice used for systemd unit
|
||||
|
||||
"apparmor_profile": apparmorProfile, // set the apparmor profile to apply
|
||||
|
||||
"fs.readonly": readonlyFs, // make the rootfs of the container read only
|
||||
}
|
||||
|
||||
func cpusetCpus(container *libcontainer.Config, context interface{}, value string) error {
|
||||
if container.Cgroups == nil {
|
||||
return fmt.Errorf("cannot set cgroups when they are disabled")
|
||||
}
|
||||
container.Cgroups.CpusetCpus = value
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func systemdSlice(container *libcontainer.Config, context interface{}, value string) error {
|
||||
if container.Cgroups == nil {
|
||||
return fmt.Errorf("cannot set slice when cgroups are disabled")
|
||||
}
|
||||
container.Cgroups.Slice = value
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func apparmorProfile(container *libcontainer.Config, context interface{}, value string) error {
|
||||
container.AppArmorProfile = value
|
||||
return nil
|
||||
}
|
||||
|
||||
func cpuShares(container *libcontainer.Config, context interface{}, value string) error {
|
||||
if container.Cgroups == nil {
|
||||
return fmt.Errorf("cannot set cgroups when they are disabled")
|
||||
}
|
||||
v, err := strconv.ParseInt(value, 10, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container.Cgroups.CpuShares = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func memory(container *libcontainer.Config, context interface{}, value string) error {
|
||||
if container.Cgroups == nil {
|
||||
return fmt.Errorf("cannot set cgroups when they are disabled")
|
||||
}
|
||||
|
||||
v, err := units.RAMInBytes(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container.Cgroups.Memory = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func memoryReservation(container *libcontainer.Config, context interface{}, value string) error {
|
||||
if container.Cgroups == nil {
|
||||
return fmt.Errorf("cannot set cgroups when they are disabled")
|
||||
}
|
||||
|
||||
v, err := units.RAMInBytes(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container.Cgroups.MemoryReservation = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func memorySwap(container *libcontainer.Config, context interface{}, value string) error {
|
||||
if container.Cgroups == nil {
|
||||
return fmt.Errorf("cannot set cgroups when they are disabled")
|
||||
}
|
||||
v, err := strconv.ParseInt(value, 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
container.Cgroups.MemorySwap = v
|
||||
return nil
|
||||
}
|
||||
|
||||
func addCap(container *libcontainer.Config, context interface{}, value string) error {
|
||||
container.Capabilities = append(container.Capabilities, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
func dropCap(container *libcontainer.Config, context interface{}, value string) error {
|
||||
// If the capability is specified multiple times, remove all instances.
|
||||
for i, capability := range container.Capabilities {
|
||||
if capability == value {
|
||||
container.Capabilities = append(container.Capabilities[:i], container.Capabilities[i+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
// The capability wasn't found so we will drop it anyways.
|
||||
return nil
|
||||
}
|
||||
|
||||
func addNamespace(container *libcontainer.Config, context interface{}, value string) error {
|
||||
container.Namespaces[value] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func dropNamespace(container *libcontainer.Config, context interface{}, value string) error {
|
||||
container.Namespaces[value] = false
|
||||
return nil
|
||||
}
|
||||
|
||||
func readonlyFs(container *libcontainer.Config, context interface{}, value string) error {
|
||||
switch value {
|
||||
case "1", "true":
|
||||
container.MountConfig.ReadonlyFs = true
|
||||
default:
|
||||
container.MountConfig.ReadonlyFs = false
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func joinNetNamespace(container *libcontainer.Config, context interface{}, value string) error {
|
||||
var (
|
||||
running = context.(map[string]*exec.Cmd)
|
||||
cmd = running[value]
|
||||
)
|
||||
|
||||
if cmd == nil || cmd.Process == nil {
|
||||
return fmt.Errorf("%s is not a valid running container to join", value)
|
||||
}
|
||||
|
||||
nspath := filepath.Join("/proc", fmt.Sprint(cmd.Process.Pid), "ns", "net")
|
||||
container.Networks = append(container.Networks, &libcontainer.Network{
|
||||
Type: "netns",
|
||||
NsPath: nspath,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// configureCustomOptions takes string commands from the user and allows modification of the
|
||||
// container's default configuration.
|
||||
//
|
||||
// TODO: this can be moved to a general utils or parser in pkg
|
||||
func ParseConfiguration(container *libcontainer.Config, running map[string]*exec.Cmd, opts []string) error {
|
||||
for _, opt := range opts {
|
||||
kv := strings.SplitN(opt, "=", 2)
|
||||
if len(kv) < 2 {
|
||||
return fmt.Errorf("invalid format for %s", opt)
|
||||
}
|
||||
|
||||
action, exists := actions[kv[0]]
|
||||
if !exists {
|
||||
return fmt.Errorf("%s is not a valid option for the native driver", kv[0])
|
||||
}
|
||||
|
||||
if err := action(container, running, kv[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,193 +0,0 @@
|
|||
package configuration
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/docker/docker/daemon/execdriver/native/template"
|
||||
"github.com/docker/libcontainer/security/capabilities"
|
||||
)
|
||||
|
||||
// Checks whether the expected capability is specified in the capabilities.
|
||||
func hasCapability(expected string, capabilities []string) bool {
|
||||
for _, capability := range capabilities {
|
||||
if capability == expected {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func TestSetReadonlyRootFs(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
"fs.readonly=true",
|
||||
}
|
||||
)
|
||||
|
||||
if container.MountConfig.ReadonlyFs {
|
||||
t.Fatal("container should not have a readonly rootfs by default")
|
||||
}
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !container.MountConfig.ReadonlyFs {
|
||||
t.Fatal("container should have a readonly rootfs")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigurationsDoNotConflict(t *testing.T) {
|
||||
var (
|
||||
container1 = template.New()
|
||||
container2 = template.New()
|
||||
opts = []string{
|
||||
"cap.add=NET_ADMIN",
|
||||
}
|
||||
)
|
||||
|
||||
if err := ParseConfiguration(container1, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !hasCapability("NET_ADMIN", container1.Capabilities) {
|
||||
t.Fatal("container one should have NET_ADMIN enabled")
|
||||
}
|
||||
if hasCapability("NET_ADMIN", container2.Capabilities) {
|
||||
t.Fatal("container two should not have NET_ADMIN enabled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCpusetCpus(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
"cgroups.cpuset.cpus=1,2",
|
||||
}
|
||||
)
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if expected := "1,2"; container.Cgroups.CpusetCpus != expected {
|
||||
t.Fatalf("expected %s got %s for cpuset.cpus", expected, container.Cgroups.CpusetCpus)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppArmorProfile(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
"apparmor_profile=koye-the-protector",
|
||||
}
|
||||
)
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if expected := "koye-the-protector"; container.AppArmorProfile != expected {
|
||||
t.Fatalf("expected profile %s got %s", expected, container.AppArmorProfile)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCpuShares(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
"cgroups.cpu_shares=1048",
|
||||
}
|
||||
)
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if expected := int64(1048); container.Cgroups.CpuShares != expected {
|
||||
t.Fatalf("expected cpu shares %d got %d", expected, container.Cgroups.CpuShares)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemory(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
"cgroups.memory=500m",
|
||||
}
|
||||
)
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if expected := int64(500 * 1024 * 1024); container.Cgroups.Memory != expected {
|
||||
t.Fatalf("expected memory %d got %d", expected, container.Cgroups.Memory)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMemoryReservation(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
"cgroups.memory_reservation=500m",
|
||||
}
|
||||
)
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if expected := int64(500 * 1024 * 1024); container.Cgroups.MemoryReservation != expected {
|
||||
t.Fatalf("expected memory reservation %d got %d", expected, container.Cgroups.MemoryReservation)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddCap(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
"cap.add=MKNOD",
|
||||
"cap.add=SYS_ADMIN",
|
||||
}
|
||||
)
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !hasCapability("MKNOD", container.Capabilities) {
|
||||
t.Fatal("container should have MKNOD enabled")
|
||||
}
|
||||
if !hasCapability("SYS_ADMIN", container.Capabilities) {
|
||||
t.Fatal("container should have SYS_ADMIN enabled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropCap(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
"cap.drop=MKNOD",
|
||||
}
|
||||
)
|
||||
// enabled all caps like in privileged mode
|
||||
container.Capabilities = capabilities.GetAllCapabilities()
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if hasCapability("MKNOD", container.Capabilities) {
|
||||
t.Fatal("container should not have MKNOD enabled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropNamespace(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
"ns.drop=NEWNET",
|
||||
}
|
||||
)
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if container.Namespaces["NEWNET"] {
|
||||
t.Fatal("container should not have NEWNET enabled")
|
||||
}
|
||||
}
|
|
@ -9,7 +9,6 @@ import (
|
|||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/daemon/execdriver"
|
||||
"github.com/docker/docker/daemon/execdriver/native/configuration"
|
||||
"github.com/docker/docker/daemon/execdriver/native/template"
|
||||
"github.com/docker/libcontainer"
|
||||
"github.com/docker/libcontainer/apparmor"
|
||||
|
@ -69,10 +68,6 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Config, e
|
|||
}
|
||||
d.Unlock()
|
||||
|
||||
if err := configuration.ParseConfiguration(container, cmds, c.Config["native"]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return container, nil
|
||||
}
|
||||
|
||||
|
@ -175,8 +170,8 @@ func (d *driver) setupMounts(container *libcontainer.Config, c *execdriver.Comma
|
|||
}
|
||||
|
||||
func (d *driver) setupLabels(container *libcontainer.Config, c *execdriver.Command) error {
|
||||
container.ProcessLabel = c.Config["process_label"][0]
|
||||
container.MountConfig.MountLabel = c.Config["mount_label"][0]
|
||||
container.ProcessLabel = c.ProcessLabel
|
||||
container.MountConfig.MountLabel = c.MountLabel
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -32,20 +32,22 @@ func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostCon
|
|||
return nil
|
||||
}
|
||||
|
||||
func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig, driverConfig map[string][]string) {
|
||||
func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig) []string {
|
||||
if hostConfig == nil {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
out := []string{}
|
||||
|
||||
// merge in the lxc conf options into the generic config map
|
||||
if lxcConf := hostConfig.LxcConf; lxcConf != nil {
|
||||
lxc := driverConfig["lxc"]
|
||||
for _, pair := range lxcConf {
|
||||
// because lxc conf gets the driver name lxc.XXXX we need to trim it off
|
||||
// and let the lxc driver add it back later if needed
|
||||
parts := strings.SplitN(pair.Key, ".", 2)
|
||||
lxc = append(lxc, fmt.Sprintf("%s=%s", parts[1], pair.Value))
|
||||
out = append(out, fmt.Sprintf("%s=%s", parts[1], pair.Value))
|
||||
}
|
||||
driverConfig["lxc"] = lxc
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
|
|
@ -8,21 +8,15 @@ import (
|
|||
)
|
||||
|
||||
func TestMergeLxcConfig(t *testing.T) {
|
||||
var (
|
||||
hostConfig = &runconfig.HostConfig{
|
||||
LxcConf: []utils.KeyValuePair{
|
||||
{Key: "lxc.cgroups.cpuset", Value: "1,2"},
|
||||
},
|
||||
}
|
||||
driverConfig = make(map[string][]string)
|
||||
)
|
||||
|
||||
mergeLxcConfIntoOptions(hostConfig, driverConfig)
|
||||
if l := len(driverConfig["lxc"]); l > 1 {
|
||||
t.Fatalf("expected lxc options len of 1 got %d", l)
|
||||
hostConfig := &runconfig.HostConfig{
|
||||
LxcConf: []utils.KeyValuePair{
|
||||
{Key: "lxc.cgroups.cpuset", Value: "1,2"},
|
||||
},
|
||||
}
|
||||
|
||||
cpuset := driverConfig["lxc"][0]
|
||||
out := mergeLxcConfIntoOptions(hostConfig)
|
||||
|
||||
cpuset := out[0]
|
||||
if expected := "cgroups.cpuset=1,2"; cpuset != expected {
|
||||
t.Fatalf("expected %s got %s", expected, cpuset)
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче