зеркало из https://github.com/microsoft/docker.git
Add unit test for lxc conf merge and native opts
Docker-DCO-1.1-Signed-off-by: Michael Crosby <michael@crosbymichael.com> (github: crosbymichael)
This commit is contained in:
Родитель
9a7be1b015
Коммит
10fdbc0467
|
@ -383,14 +383,8 @@ func populateCommand(c *Container) {
|
|||
}
|
||||
}
|
||||
|
||||
// merge in the lxc conf options into the generic config map
|
||||
if lxcConf := c.hostConfig.LxcConf; lxcConf != nil {
|
||||
lxc := driverConfig["lxc"]
|
||||
for _, pair := range lxcConf {
|
||||
lxc = append(lxc, fmt.Sprintf("%s = %s", pair.Key, pair.Value))
|
||||
}
|
||||
driverConfig["lxc"] = lxc
|
||||
}
|
||||
// TODO: this can be removed after lxc-conf is fully deprecated
|
||||
mergeLxcConfIntoOptions(c.hostConfig, driverConfig)
|
||||
|
||||
resources := &execdriver.Resources{
|
||||
Memory: c.Config.Memory,
|
||||
|
|
|
@ -120,7 +120,7 @@ lxc.cgroup.cpu.shares = {{.Resources.CpuShares}}
|
|||
|
||||
{{if .Config.lxc}}
|
||||
{{range $value := .Config.lxc}}
|
||||
{{$value}}
|
||||
lxc.{{$value}}
|
||||
{{end}}
|
||||
{{end}}
|
||||
`
|
||||
|
|
|
@ -31,20 +31,6 @@ var actions = map[string]Action{
|
|||
"fs.readonly": readonlyFs, // make the rootfs of the container read only
|
||||
}
|
||||
|
||||
// GetSupportedActions returns a list of all the avaliable actions supported by the driver
|
||||
// TODO: this should return a description also
|
||||
func GetSupportedActions() []string {
|
||||
var (
|
||||
i int
|
||||
out = make([]string, len(actions))
|
||||
)
|
||||
for k := range actions {
|
||||
out[i] = k
|
||||
i++
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func cpusetCpus(container *libcontainer.Container, context interface{}, value string) error {
|
||||
if container.Cgroups == nil {
|
||||
return fmt.Errorf("cannot set cgroups when they are disabled")
|
||||
|
|
|
@ -0,0 +1,166 @@
|
|||
package configuration
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/runtime/execdriver/native/template"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSetReadonlyRootFs(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
"fs.readonly=true",
|
||||
}
|
||||
)
|
||||
|
||||
if container.ReadonlyFs {
|
||||
t.Fatal("container should not have a readonly rootfs by default")
|
||||
}
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !container.ReadonlyFs {
|
||||
t.Fatal("container should have a readonly rootfs")
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigurationsDoNotConflict(t *testing.T) {
|
||||
var (
|
||||
container1 = template.New()
|
||||
container2 = template.New()
|
||||
opts = []string{
|
||||
"cap.add=NET_ADMIN",
|
||||
}
|
||||
)
|
||||
|
||||
if err := ParseConfiguration(container1, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !container1.CapabilitiesMask.Get("NET_ADMIN").Enabled {
|
||||
t.Fatal("container one should have NET_ADMIN enabled")
|
||||
}
|
||||
if container2.CapabilitiesMask.Get("NET_ADMIN").Enabled {
|
||||
t.Fatal("container two should not have NET_ADMIN enabled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCpusetCpus(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
"cgroups.cpuset.cpus=1,2",
|
||||
}
|
||||
)
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if expected := "1,2"; container.Cgroups.CpusetCpus != expected {
|
||||
t.Fatalf("expected %s got %s for cpuset.cpus", expected, container.Cgroups.CpusetCpus)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAppArmorProfile(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
"apparmor_profile=koye-the-protector",
|
||||
}
|
||||
)
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if expected := "koye-the-protector"; container.Context["apparmor_profile"] != expected {
|
||||
t.Fatalf("expected profile %s got %s", expected, container.Context["apparmor_profile"])
|
||||
}
|
||||
}
|
||||
|
||||
func TestCpuShares(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
"cgroups.cpu_shares=1048",
|
||||
}
|
||||
)
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if expected := int64(1048); container.Cgroups.CpuShares != expected {
|
||||
t.Fatalf("expected cpu shares %d got %d", expected, container.Cgroups.CpuShares)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCgroupMemory(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
"cgroups.memory=500m",
|
||||
}
|
||||
)
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if expected := int64(500 * 1024 * 1024); container.Cgroups.Memory != expected {
|
||||
t.Fatalf("expected memory %d got %d", expected, container.Cgroups.Memory)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddCap(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
"cap.add=MKNOD",
|
||||
"cap.add=SYS_ADMIN",
|
||||
}
|
||||
)
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !container.CapabilitiesMask.Get("MKNOD").Enabled {
|
||||
t.Fatal("container should have MKNOD enabled")
|
||||
}
|
||||
if !container.CapabilitiesMask.Get("SYS_ADMIN").Enabled {
|
||||
t.Fatal("container should have SYS_ADMIN enabled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropCap(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
"cap.drop=MKNOD",
|
||||
}
|
||||
)
|
||||
// enabled all caps like in privileged mode
|
||||
for _, c := range container.CapabilitiesMask {
|
||||
c.Enabled = true
|
||||
}
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if container.CapabilitiesMask.Get("MKNOD").Enabled {
|
||||
t.Fatal("container should not have MKNOD enabled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropNamespace(t *testing.T) {
|
||||
var (
|
||||
container = template.New()
|
||||
opts = []string{
|
||||
"ns.drop=NEWNET",
|
||||
}
|
||||
)
|
||||
if err := ParseConfiguration(container, nil, opts); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if container.Namespaces.Get("NEWNET").Enabled {
|
||||
t.Fatal("container should not have NEWNET enabled")
|
||||
}
|
||||
}
|
|
@ -5,30 +5,53 @@ import (
|
|||
"github.com/dotcloud/docker/pkg/libcontainer"
|
||||
"github.com/dotcloud/docker/runtime/execdriver"
|
||||
"github.com/dotcloud/docker/runtime/execdriver/native/configuration"
|
||||
"github.com/dotcloud/docker/runtime/execdriver/native/template"
|
||||
"os"
|
||||
)
|
||||
|
||||
// createContainer populates and configures the container type with the
|
||||
// data provided by the execdriver.Command
|
||||
func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container, error) {
|
||||
container := getDefaultTemplate()
|
||||
container := template.New()
|
||||
|
||||
container.Hostname = getEnv("HOSTNAME", c.Env)
|
||||
container.Tty = c.Tty
|
||||
container.User = c.User
|
||||
container.WorkingDir = c.WorkingDir
|
||||
container.Env = c.Env
|
||||
container.Cgroups.Name = c.ID
|
||||
// check to see if we are running in ramdisk to disable pivot root
|
||||
container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
|
||||
|
||||
loopbackNetwork := libcontainer.Network{
|
||||
if err := d.createNetwork(container, c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c.Privileged {
|
||||
if err := d.setPrivileged(container); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := d.setupCgroups(container, c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := d.setupMounts(container, c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := configuration.ParseConfiguration(container, d.activeContainers, c.Config["native"]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return container, nil
|
||||
}
|
||||
|
||||
func (d *driver) createNetwork(container *libcontainer.Container, c *execdriver.Command) error {
|
||||
container.Networks = []*libcontainer.Network{
|
||||
{
|
||||
Mtu: c.Network.Mtu,
|
||||
Address: fmt.Sprintf("%s/%d", "127.0.0.1", 0),
|
||||
Gateway: "localhost",
|
||||
Type: "loopback",
|
||||
Context: libcontainer.Context{},
|
||||
}
|
||||
|
||||
container.Networks = []*libcontainer.Network{
|
||||
&loopbackNetwork,
|
||||
},
|
||||
}
|
||||
|
||||
if c.Network.Interface != nil {
|
||||
|
@ -44,27 +67,30 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container
|
|||
}
|
||||
container.Networks = append(container.Networks, &vethNetwork)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
container.Cgroups.Name = c.ID
|
||||
if c.Privileged {
|
||||
container.CapabilitiesMask = nil
|
||||
func (d *driver) setPrivileged(container *libcontainer.Container) error {
|
||||
for _, c := range container.CapabilitiesMask {
|
||||
c.Enabled = true
|
||||
}
|
||||
container.Cgroups.DeviceAccess = true
|
||||
container.Context["apparmor_profile"] = "unconfined"
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) setupCgroups(container *libcontainer.Container, c *execdriver.Command) error {
|
||||
if c.Resources != nil {
|
||||
container.Cgroups.CpuShares = c.Resources.CpuShares
|
||||
container.Cgroups.Memory = c.Resources.Memory
|
||||
container.Cgroups.MemorySwap = c.Resources.MemorySwap
|
||||
}
|
||||
// check to see if we are running in ramdisk to disable pivot root
|
||||
container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driver) setupMounts(container *libcontainer.Container, c *execdriver.Command) error {
|
||||
for _, m := range c.Mounts {
|
||||
container.Mounts = append(container.Mounts, libcontainer.Mount{m.Source, m.Destination, m.Writable, m.Private})
|
||||
}
|
||||
|
||||
if err := configuration.ParseConfiguration(container, d.activeContainers, c.Config["native"]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return container, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,13 +1,12 @@
|
|||
package native
|
||||
package template
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/pkg/cgroups"
|
||||
"github.com/dotcloud/docker/pkg/libcontainer"
|
||||
)
|
||||
|
||||
// getDefaultTemplate returns the docker default for
|
||||
// the libcontainer configuration file
|
||||
func getDefaultTemplate() *libcontainer.Container {
|
||||
// New returns the docker default configuration for libcontainer
|
||||
func New() *libcontainer.Container {
|
||||
return &libcontainer.Container{
|
||||
CapabilitiesMask: libcontainer.Capabilities{
|
||||
libcontainer.GetCapability("SETPCAP"),
|
|
@ -1,9 +1,11 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/dotcloud/docker/nat"
|
||||
"github.com/dotcloud/docker/pkg/namesgenerator"
|
||||
"github.com/dotcloud/docker/runconfig"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostConfig) error {
|
||||
|
@ -30,6 +32,24 @@ func migratePortMappings(config *runconfig.Config, hostConfig *runconfig.HostCon
|
|||
return nil
|
||||
}
|
||||
|
||||
func mergeLxcConfIntoOptions(hostConfig *runconfig.HostConfig, driverConfig map[string][]string) {
|
||||
if hostConfig == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// merge in the lxc conf options into the generic config map
|
||||
if lxcConf := hostConfig.LxcConf; lxcConf != nil {
|
||||
lxc := driverConfig["lxc"]
|
||||
for _, pair := range lxcConf {
|
||||
// because lxc conf gets the driver name lxc.XXXX we need to trim it off
|
||||
// and let the lxc driver add it back later if needed
|
||||
parts := strings.SplitN(pair.Key, ".", 2)
|
||||
lxc = append(lxc, fmt.Sprintf("%s=%s", parts[1], pair.Value))
|
||||
}
|
||||
driverConfig["lxc"] = lxc
|
||||
}
|
||||
}
|
||||
|
||||
type checker struct {
|
||||
runtime *Runtime
|
||||
}
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"github.com/dotcloud/docker/runconfig"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMergeLxcConfig(t *testing.T) {
|
||||
var (
|
||||
hostConfig = &runconfig.HostConfig{
|
||||
LxcConf: []runconfig.KeyValuePair{
|
||||
{Key: "lxc.cgroups.cpuset", Value: "1,2"},
|
||||
},
|
||||
}
|
||||
driverConfig = make(map[string][]string)
|
||||
)
|
||||
|
||||
mergeLxcConfIntoOptions(hostConfig, driverConfig)
|
||||
if l := len(driverConfig["lxc"]); l > 1 {
|
||||
t.Fatalf("expected lxc options len of 1 got %d", l)
|
||||
}
|
||||
|
||||
cpuset := driverConfig["lxc"][0]
|
||||
if expected := "cgroups.cpuset=1,2"; cpuset != expected {
|
||||
t.Fatalf("expected %s got %s", expected, cpuset)
|
||||
}
|
||||
}
|
Загрузка…
Ссылка в новой задаче