Merge branch 'master' into libcontainer-fixes

Conflicts:
	pkg/libcontainer/README.md
	pkg/libcontainer/container.json

Docker-DCO-1.1-Signed-off-by: Rohit Jnagal <jnagal@google.com> (github: rjnagal)
This commit is contained in:
Rohit Jnagal 2014-04-28 23:04:04 +00:00
Родитель 8cdb720d26 af72ca199d
Коммит c44c51e3ce
60 изменённых файлов: 1828 добавлений и 811 удалений

Просмотреть файл

@ -654,9 +654,12 @@ func (container *Container) Kill() error {
// 2. Wait for the process to die, in last resort, try to kill the process directly
if err := container.WaitTimeout(10 * time.Second); err != nil {
log.Printf("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID))
if err := syscall.Kill(container.State.Pid, 9); err != nil {
return err
// Ensure that we don't kill ourselves
if pid := container.State.Pid; pid != 0 {
log.Printf("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", utils.TruncateID(container.ID))
if err := syscall.Kill(pid, 9); err != nil {
return err
}
}
}

Просмотреть файл

@ -59,9 +59,10 @@ func init() {
}
type driver struct {
root string // root path for the driver to use
apparmor bool
sharedRoot bool
root string // root path for the driver to use
apparmor bool
sharedRoot bool
restrictionPath string
}
func NewDriver(root string, apparmor bool) (*driver, error) {
@ -69,10 +70,15 @@ func NewDriver(root string, apparmor bool) (*driver, error) {
if err := linkLxcStart(root); err != nil {
return nil, err
}
restrictionPath := filepath.Join(root, "empty")
if err := os.MkdirAll(restrictionPath, 0700); err != nil {
return nil, err
}
return &driver{
apparmor: apparmor,
root: root,
sharedRoot: rootIsShared(),
apparmor: apparmor,
root: root,
sharedRoot: rootIsShared(),
restrictionPath: restrictionPath,
}, nil
}
@ -403,14 +409,16 @@ func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) {
if err := LxcTemplateCompiled.Execute(fo, struct {
*execdriver.Command
AppArmor bool
ProcessLabel string
MountLabel string
AppArmor bool
ProcessLabel string
MountLabel string
RestrictionSource string
}{
Command: c,
AppArmor: d.apparmor,
ProcessLabel: process,
MountLabel: mount,
Command: c,
AppArmor: d.apparmor,
ProcessLabel: process,
MountLabel: mount,
RestrictionSource: d.restrictionPath,
}); err != nil {
return "", err
}

Просмотреть файл

@ -88,7 +88,9 @@ lxc.mount.entry = proc {{escapeFstabSpaces $ROOTFS}}/proc proc nosuid,nodev,noex
# WARNING: sysfs is a known attack vector and should probably be disabled
# if your userspace allows it. eg. see http://bit.ly/T9CkqJ
{{if .Privileged}}
lxc.mount.entry = sysfs {{escapeFstabSpaces $ROOTFS}}/sys sysfs nosuid,nodev,noexec 0 0
{{end}}
{{if .Tty}}
lxc.mount.entry = {{.Console}} {{escapeFstabSpaces $ROOTFS}}/dev/console none bind,rw 0 0
@ -109,8 +111,15 @@ lxc.mount.entry = {{$value.Source}} {{escapeFstabSpaces $ROOTFS}}/{{escapeFstabS
{{if .AppArmor}}
lxc.aa_profile = unconfined
{{else}}
#lxc.aa_profile = unconfined
# not unconfined
{{end}}
{{else}}
# restrict access to proc
lxc.mount.entry = {{.RestrictionSource}} {{escapeFstabSpaces $ROOTFS}}/proc/sys none bind,ro 0 0
lxc.mount.entry = {{.RestrictionSource}} {{escapeFstabSpaces $ROOTFS}}/proc/irq none bind,ro 0 0
lxc.mount.entry = {{.RestrictionSource}} {{escapeFstabSpaces $ROOTFS}}/proc/acpi none bind,ro 0 0
lxc.mount.entry = {{escapeFstabSpaces $ROOTFS}}/dev/null {{escapeFstabSpaces $ROOTFS}}/proc/sysrq-trigger none bind,ro 0 0
lxc.mount.entry = {{escapeFstabSpaces $ROOTFS}}/dev/null {{escapeFstabSpaces $ROOTFS}}/proc/kcore none bind,ro 0 0
{{end}}
# limits

Просмотреть файл

@ -25,6 +25,7 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container
container.Cgroups.Name = c.ID
// check to see if we are running in ramdisk to disable pivot root
container.NoPivotRoot = os.Getenv("DOCKER_RAMDISK") != ""
container.Context["restriction_path"] = d.restrictionPath
if err := d.createNetwork(container, c); err != nil {
return nil, err
@ -33,6 +34,8 @@ func (d *driver) createContainer(c *execdriver.Command) (*libcontainer.Container
if err := d.setPrivileged(container); err != nil {
return nil, err
}
} else {
container.Mounts = append(container.Mounts, libcontainer.Mount{Type: "devtmpfs"})
}
if err := d.setupCgroups(container, c); err != nil {
return nil, err
@ -81,6 +84,11 @@ func (d *driver) setPrivileged(container *libcontainer.Container) error {
c.Enabled = true
}
container.Cgroups.DeviceAccess = true
// add sysfs as a mount for privileged containers
container.Mounts = append(container.Mounts, libcontainer.Mount{Type: "sysfs"})
delete(container.Context, "restriction_path")
if apparmor.IsEnabled() {
container.Context["apparmor_profile"] = "unconfined"
}
@ -99,7 +107,13 @@ func (d *driver) setupCgroups(container *libcontainer.Container, c *execdriver.C
func (d *driver) setupMounts(container *libcontainer.Container, c *execdriver.Command) error {
for _, m := range c.Mounts {
container.Mounts = append(container.Mounts, libcontainer.Mount{m.Source, m.Destination, m.Writable, m.Private})
container.Mounts = append(container.Mounts, libcontainer.Mount{
Type: "bind",
Source: m.Source,
Destination: m.Destination,
Writable: m.Writable,
Private: m.Private,
})
}
return nil
}

Просмотреть файл

@ -23,7 +23,7 @@ import (
const (
DriverName = "native"
Version = "0.1"
Version = "0.2"
BackupApparmorProfilePath = "apparmor/docker.back" // relative to docker root
)
@ -62,6 +62,7 @@ type driver struct {
root string
initPath string
activeContainers map[string]*exec.Cmd
restrictionPath string
}
func NewDriver(root, initPath string) (*driver, error) {
@ -72,8 +73,14 @@ func NewDriver(root, initPath string) (*driver, error) {
if err := apparmor.InstallDefaultProfile(filepath.Join(root, "../..", BackupApparmorProfilePath)); err != nil {
return nil, err
}
restrictionPath := filepath.Join(root, "empty")
if err := os.MkdirAll(restrictionPath, 0700); err != nil {
return nil, err
}
return &driver{
root: root,
restrictionPath: restrictionPath,
initPath: initPath,
activeContainers: make(map[string]*exec.Cmd),
}, nil

Просмотреть файл

@ -246,22 +246,22 @@ func createVolumes(container *Container) error {
if err := archive.CopyWithTar(rootVolPath, srcPath); err != nil {
return err
}
}
}
var stat syscall.Stat_t
if err := syscall.Stat(rootVolPath, &stat); err != nil {
return err
}
var srcStat syscall.Stat_t
if err := syscall.Stat(srcPath, &srcStat); err != nil {
return err
}
// Change the source volume's ownership if it differs from the root
// files that were just copied
if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid {
if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil {
return err
}
}
var stat syscall.Stat_t
if err := syscall.Stat(rootVolPath, &stat); err != nil {
return err
}
var srcStat syscall.Stat_t
if err := syscall.Stat(srcPath, &srcStat); err != nil {
return err
}
// Change the source volume's ownership if it differs from the root
// files that were just copied
if stat.Uid != srcStat.Uid || stat.Gid != srcStat.Gid {
if err := os.Chown(srcPath, int(stat.Uid), int(stat.Gid)); err != nil {
return err
}
}
}

Просмотреть файл

@ -4,74 +4,70 @@ Docker Documentation
Overview
--------
The source for Docker documentation is here under ``sources/`` and uses
The source for Docker documentation is here under `sources/` and uses
extended Markdown, as implemented by [mkdocs](http://mkdocs.org).
The HTML files are built and hosted on https://docs.docker.io, and update
automatically after each change to the master or release branch of the
[docker files on GitHub](https://github.com/dotcloud/docker) thanks to
post-commit hooks. The "release" branch maps to the "latest"
documentation and the "master" (unreleased development) branch maps to the "master"
documentation.
The HTML files are built and hosted on `https://docs.docker.io`, and
update automatically after each change to the master or release branch
of [Docker on GitHub](https://github.com/dotcloud/docker)
thanks to post-commit hooks. The "docs" branch maps to the "latest"
documentation and the "master" (unreleased development) branch maps to
the "master" documentation.
## Branches
**There are two branches related to editing docs**: ``master`` and a
``docs`` branch. You should always edit
docs on a local branch of the ``master`` branch, and send a PR against ``master``.
That way your fixes
will automatically get included in later releases, and docs maintainers
can easily cherry-pick your changes into the ``docs`` release branch.
In the rare case where your change is not forward-compatible,
you may need to base your changes on the ``docs`` branch.
**There are two branches related to editing docs**: `master` and a
`docs` branch. You should always edit documentation on a local branch
of the `master` branch, and send a PR against `master`.
Now that we have a ``docs`` branch, we can keep the [http://docs.docker.io](http://docs.docker.io) docs
up to date with any bugs found between ``docker`` code releases.
That way your fixes will automatically get included in later releases,
and docs maintainers can easily cherry-pick your changes into the
`docs` release branch. In the rare case where your change is not
forward-compatible, you may need to base your changes on the `docs`
branch.
**Warning**: When *reading* the docs, the [http://beta-docs.docker.io](http://beta-docs.docker.io) documentation may
include features not yet part of any official docker
release. The ``beta-docs`` site should be used only for understanding
bleeding-edge development and ``docs.docker.io`` (which points to the ``docs``
branch``) should be used for the latest official release.
Also, now that we have a `docs` branch, we can keep the
[http://docs.docker.io](http://docs.docker.io) docs up to date with any
bugs found between `docker` code releases.
**Warning**: When *reading* the docs, the
[http://beta-docs.docker.io](http://beta-docs.docker.io) documentation
may include features not yet part of any official docker release. The
`beta-docs` site should be used only for understanding bleeding-edge
development and `docs.docker.io` (which points to the `docs`
branch`) should be used for the latest official release.
Getting Started
---------------
Docker documentation builds are done in a docker container, which installs all
the required tools, adds the local ``docs/`` directory and builds the HTML
docs. It then starts a HTTP server on port 8000 so that you can connect
and see your changes.
Docker documentation builds are done in a Docker container, which
installs all the required tools, adds the local `docs/` directory and
builds the HTML docs. It then starts a HTTP server on port 8000 so that
you can connect and see your changes.
In the ``docker`` source directory, run:
```make docs```
In the root of the `docker` source directory:
If you have any issues you need to debug, you can use ``make docs-shell`` and
then run ``mkdocs serve``
cd docker
Run:
make docs
If you have any issues you need to debug, you can use `make docs-shell` and
then run `mkdocs serve`
# Contributing
## Normal Case:
* Follow the contribution guidelines ([see
``../CONTRIBUTING.md``](../CONTRIBUTING.md)).
`../CONTRIBUTING.md`](../CONTRIBUTING.md)).
* [Remember to sign your work!](../CONTRIBUTING.md#sign-your-work)
* Work in your own fork of the code, we accept pull requests.
* Change the ``.md`` files with your favorite editor -- try to keep the
lines short (80 chars) and respect Markdown conventions.
* Run ``make clean docs`` to clean up old files and generate new ones,
or just ``make docs`` to update after small changes.
* Your static website can now be found in the ``_build`` directory.
* To preview what you have generated run ``make server`` and open
http://localhost:8000/ in your favorite browser.
``make clean docs`` must complete without any warnings or errors.
Working using GitHub's file editor
----------------------------------
Alternatively, for small changes and typos you might want to use
GitHub's built in file editor. It allows you to preview your changes
right online (though there can be some differences between GitHub
right on-line (though there can be some differences between GitHub
Markdown and mkdocs Markdown). Just be careful not to create many commits.
And you must still [sign your work!](../CONTRIBUTING.md#sign-your-work)
@ -79,26 +75,24 @@ Images
------
When you need to add images, try to make them as small as possible
(e.g. as gif). Usually images should go in the same directory as the
.md file which references them, or in a subdirectory if one already
(e.g. as gifs). Usually images should go in the same directory as the
`.md` file which references them, or in a subdirectory if one already
exists.
Publishing Documentation
------------------------
To publish a copy of the documentation you need a ``docs/awsconfig``
file containing AWS settings to deploy to. The release script will
To publish a copy of the documentation you need a `docs/awsconfig`
file containing AWS settings to deploy to. The release script will
create an s3 if needed, and will then push the files to it.
```
[profile dowideit-docs]
aws_access_key_id = IHOIUAHSIDH234rwf....
aws_secret_access_key = OIUYSADJHLKUHQWIUHE......
region = ap-southeast-2
```
[profile dowideit-docs]
aws_access_key_id = IHOIUAHSIDH234rwf....
aws_secret_access_key = OIUYSADJHLKUHQWIUHE......
region = ap-southeast-2
The ``profile`` name must be the same as the name of the bucket you are
deploying to - which you call from the docker directory:
The `profile` name must be the same as the name of the bucket you are
deploying to - which you call from the `docker` directory:
``make AWS_S3_BUCKET=dowideit-docs docs-release``
make AWS_S3_BUCKET=dowideit-docs docs-release

Просмотреть файл

@ -106,6 +106,8 @@ pages:
- ['reference/api/docker_remote_api_v1.10.md', 'Reference', 'Docker Remote API v1.10']
- ['reference/api/docker_remote_api_v1.9.md', 'Reference', 'Docker Remote API v1.9']
- ['reference/api/remote_api_client_libraries.md', 'Reference', 'Docker Remote API Client Libraries']
- ['reference/api/docker_io_oauth_api.md', 'Reference', 'Docker IO OAuth API']
- ['reference/api/docker_io_accounts_api.md', 'Reference', 'Docker IO Accounts API']
# Contribute:
- ['contributing/index.md', '**HIDDEN**']

Просмотреть файл

@ -14,7 +14,7 @@ to need a [Docker IO](https://www.docker.io) account.
### Registration for a Docker IO Account
You can get a Docker IO account by [signing up for one here](
https://index.docker.io/account/signup/). A valid email address is required to
https://www.docker.io/account/signup/). A valid email address is required to
register, which you will need to verify for account activation.
### Email activation process
@ -22,7 +22,7 @@ register, which you will need to verify for account activation.
You need to have at least one verified email address to be able to use your
Docker IO account. If you can't find the validation email, you can request
another by visiting the [Resend Email Confirmation](
https://index.docker.io/account/resend-email-confirmation/) page.
https://www.docker.io/account/resend-email-confirmation/) page.
### Password reset process

Просмотреть файл

@ -4,10 +4,6 @@ page_keywords: Docker, Docker documentation, requirements, virtualbox, vagrant,
# Ubuntu
> **Warning**:
> These instructions have changed for 0.6. If you are upgrading from an
> earlier version, you will need to follow them again.
> **Note**:
> Docker is still under heavy development! We don't recommend using it in
> production yet, but we're getting closer with each release. Please see
@ -16,6 +12,7 @@ page_keywords: Docker, Docker documentation, requirements, virtualbox, vagrant,
Docker is supported on the following versions of Ubuntu:
- [*Ubuntu Trusty 14.04 (LTS) (64-bit)*](#ubuntu-trusty-1404-lts-64-bit)
- [*Ubuntu Precise 12.04 (LTS) (64-bit)*](#ubuntu-precise-1204-lts-64-bit)
- [*Ubuntu Raring 13.04 and Saucy 13.10 (64
bit)*](#ubuntu-raring-1304-and-saucy-1310-64-bit)
@ -23,6 +20,30 @@ Docker is supported on the following versions of Ubuntu:
Please read [*Docker and UFW*](#docker-and-ufw), if you plan to use [UFW
(Uncomplicated Firewall)](https://help.ubuntu.com/community/UFW)
## Ubuntu Trusty 14.04 (LTS) (64-bit)
Ubuntu Trusty comes with a 3.13.0 Linux kernel, and a `docker.io` package which
installs all its prerequisites from Ubuntu's repository.
> **Note**:
> Ubuntu (and Debian) contain a much older KDE3/GNOME2 package called ``docker``, so the
> package and the executable are called ``docker.io``.
### Installation
To install the latest Ubuntu package (may not be the latest Docker release):
sudo apt-get update
sudo apt-get install docker.io
sudo ln -sf /usr/bin/docker.io /usr/local/bin/docker
To verify that everything has worked as expected:
sudo docker run -i -t ubuntu /bin/bash
Which should download the `ubuntu` image, and then start `bash` in a container.
## Ubuntu Precise 12.04 (LTS) (64-bit)
This installation path should work at all times.

Просмотреть файл

@ -87,14 +87,14 @@ to you.
*Docker is made for humans.*
It's easy to get started and easy to build and deploy applications with
Docker: or as we say "*dockerise*" them! As much of Docker as possible
Docker: or as we say "*dockerize*" them! As much of Docker as possible
uses plain English for commands and tries to be as lightweight and
transparent as possible. We want to get out of the way so you can build
and deploy your applications.
### Docker is Portable
*Dockerise And Go!*
*Dockerize And Go!*
Docker containers are highly portable. Docker provides a standard
container format to hold your applications:

Просмотреть файл

@ -454,6 +454,7 @@ Kill a container
HTTP/1.1 204 OK
:query signal: Signal to send to the container (integer). When not set, SIGKILL is assumed and the call will waits for the container to exit.
:statuscode 204: no error
:statuscode 404: no such container
:statuscode 500: server error

Просмотреть файл

@ -482,6 +482,7 @@ Kill a container
HTTP/1.1 204 OK
:query signal: Signal to send to the container (integer). When not set, SIGKILL is assumed and the call will waits for the container to exit.
:statuscode 204: no error
:statuscode 404: no such container
:statuscode 500: server error

Просмотреть файл

@ -237,78 +237,7 @@ automatically sent.
"primary": false
}
### 1.5 Update an email address for a user
`PATCH /api/v1.1/users/:username/emails/`
Update an email address for the specified user to either verify an
email address or set it as the primary email for the user. You
cannot use this endpoint to un-verify an email address. You cannot
use this endpoint to unset the primary email, only set another as
the primary.
Parameters:
- **username** – username of the user whose email info is being
updated.
Json Parameters:
 
- **email** (*string*) – the email address to be updated.
- **verified** (*boolean*) – (optional) whether the email address
is verified, must be `true` or absent.
- **primary** (*boolean*) – (optional) whether to set the email
address as the primary email, must be `true`
or absent.
Request Headers:
 
- **Authorization** – required authentication credentials of
either type HTTP Basic or OAuth Bearer Token.
- **Content-Type** – MIME Type of post data. JSON, url-encoded
form data, etc.
Status Codes:
- **200** – success, user's email updated.
- **400** – data validation error.
- **401** – authentication error.
- **403** – permission error, authenticated user must be the user
whose data is being updated, OAuth access tokens must have
`email_write` scope.
- **404** – the specified username or email address does not
exist.
**Example request**:
Once you have independently verified an email address.
PATCH /api/v1.1/users/janedoe/emails/ HTTP/1.1
Host: www.docker.io
Accept: application/json
Authorization: Basic dXNlcm5hbWU6cGFzc3dvcmQ=
{
"email": "jane.doe+other@example.com",
"verified": true,
}
**Example response**:
HTTP/1.1 200 OK
Content-Type: application/json
{
"email": "jane.doe+other@example.com",
"verified": true,
"primary": false
}
### 1.6 Delete email address for a user
### 1.5 Delete email address for a user
`DELETE /api/v1.1/users/:username/emails/`

Просмотреть файл

@ -127,83 +127,83 @@ entry for each repo/tag on an image, each image is only represented
once, with a nested attribute indicating the repo/tags that apply to
that image.
Instead of:
Instead of:
HTTP/1.1 200 OK
Content-Type: application/json
HTTP/1.1 200 OK
Content-Type: application/json
[
{
"VirtualSize": 131506275,
"Size": 131506275,
"Created": 1365714795,
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
"Tag": "12.04",
"Repository": "ubuntu"
},
{
"VirtualSize": 131506275,
"Size": 131506275,
"Created": 1365714795,
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
"Tag": "latest",
"Repository": "ubuntu"
},
{
"VirtualSize": 131506275,
"Size": 131506275,
"Created": 1365714795,
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
"Tag": "precise",
"Repository": "ubuntu"
},
{
"VirtualSize": 180116135,
"Size": 24653,
"Created": 1364102658,
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
"Tag": "12.10",
"Repository": "ubuntu"
},
{
"VirtualSize": 180116135,
"Size": 24653,
"Created": 1364102658,
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
"Tag": "quantal",
"Repository": "ubuntu"
}
]
[
{
"VirtualSize": 131506275,
"Size": 131506275,
"Created": 1365714795,
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
"Tag": "12.04",
"Repository": "ubuntu"
},
{
"VirtualSize": 131506275,
"Size": 131506275,
"Created": 1365714795,
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
"Tag": "latest",
"Repository": "ubuntu"
},
{
"VirtualSize": 131506275,
"Size": 131506275,
"Created": 1365714795,
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
"Tag": "precise",
"Repository": "ubuntu"
},
{
"VirtualSize": 180116135,
"Size": 24653,
"Created": 1364102658,
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
"Tag": "12.10",
"Repository": "ubuntu"
},
{
"VirtualSize": 180116135,
"Size": 24653,
"Created": 1364102658,
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
"Tag": "quantal",
"Repository": "ubuntu"
}
]
The returned json looks like this:
The returned json looks like this:
HTTP/1.1 200 OK
Content-Type: application/json
HTTP/1.1 200 OK
Content-Type: application/json
[
{
"RepoTags": [
"ubuntu:12.04",
"ubuntu:precise",
"ubuntu:latest"
],
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
"Created": 1365714795,
"Size": 131506275,
"VirtualSize": 131506275
},
{
"RepoTags": [
"ubuntu:12.10",
"ubuntu:quantal"
],
"ParentId": "27cf784147099545",
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
"Created": 1364102658,
"Size": 24653,
"VirtualSize": 180116135
}
]
[
{
"RepoTags": [
"ubuntu:12.04",
"ubuntu:precise",
"ubuntu:latest"
],
"Id": "8dbd9e392a964056420e5d58ca5cc376ef18e2de93b5cc90e868a1bbc8318c1c",
"Created": 1365714795,
"Size": 131506275,
"VirtualSize": 131506275
},
{
"RepoTags": [
"ubuntu:12.10",
"ubuntu:quantal"
],
"ParentId": "27cf784147099545",
"Id": "b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc",
"Created": 1364102658,
"Size": 24653,
"VirtualSize": 180116135
}
]
`GET /images/viz`

Просмотреть файл

@ -459,6 +459,11 @@ Kill the container `id`
HTTP/1.1 204 OK
Query Parameters
- **signal** - Signal to send to the container: integer or string like "SIGINT".
When not set, SIGKILL is assumed and the call will waits for the container to exit.
Status Codes:
- **204** – no error

Просмотреть файл

@ -466,6 +466,7 @@ Kill a container
HTTP/1.1 204 OK
:query signal: Signal to send to the container, must be integer or string (i.e. SIGINT). When not set, SIGKILL is assumed and the call will waits for the container to exit.
:statuscode 204: no error
:statuscode 404: no such container
:statuscode 500: server error

Просмотреть файл

@ -461,6 +461,11 @@ Kill the container `id`
HTTP/1.1 204 OK
Query Parameters
- **signal** - Signal to send to the container: integer or string like "SIGINT".
When not set, SIGKILL is assumed and the call will waits for the container to exit.
Status Codes:
- **204** – no error

Просмотреть файл

@ -468,6 +468,7 @@ Kill a container
HTTP/1.1 204 OK
:query signal: Signal to send to the container, must be integer or string (i.e. SIGINT). When not set, SIGKILL is assumed and the call will waits for the container to exit.
:statuscode 204: no error
:statuscode 404: no such container
:statuscode 500: server error

Просмотреть файл

@ -482,6 +482,11 @@ Kill the container `id`
HTTP/1.1 204 OK
Query Parameters
- **signal** - Signal to send to the container: integer or string like "SIGINT".
When not set, SIGKILL is assumed and the call will waits for the container to exit.
Status Codes:
- **204** – no error

Просмотреть файл

@ -480,6 +480,7 @@ Kill a container
HTTP/1.1 204 OK
:query signal: Signal to send to the container, must be integer or string (i.e. SIGINT). When not set, SIGKILL is assumed and the call will waits for the container to exit.
:statuscode 204: no error
:statuscode 404: no such container
:statuscode 500: server error

Просмотреть файл

@ -43,6 +43,7 @@ func unregister(name string) {
// containers by executing *jobs*.
type Engine struct {
handlers map[string]Handler
catchall Handler
hack Hack // data for temporary hackery (see hack.go)
id string
Stdout io.Writer
@ -60,6 +61,10 @@ func (eng *Engine) Register(name string, handler Handler) error {
return nil
}
func (eng *Engine) RegisterCatchall(catchall Handler) {
eng.catchall = catchall
}
// New initializes a new engine.
func New() *Engine {
eng := &Engine{
@ -113,9 +118,13 @@ func (eng *Engine) Job(name string, args ...string) *Job {
if eng.Logging {
job.Stderr.Add(utils.NopWriteCloser(eng.Stderr))
}
handler, exists := eng.handlers[name]
if exists {
job.handler = handler
if eng.catchall != nil {
job.handler = eng.catchall
} else {
handler, exists := eng.handlers[name]
if exists {
job.handler = handler
}
}
return job
}

Просмотреть файл

@ -36,6 +36,13 @@ func (env *Env) Exists(key string) bool {
return exists
}
// Len returns the number of keys in the environment.
// Note that len(env) might be different from env.Len(),
// because the same key might be set multiple times.
func (env *Env) Len() int {
return len(env.Map())
}
func (env *Env) Init(src *Env) {
(*env) = make([]string, 0, len(*src))
for _, val := range *src {

Просмотреть файл

@ -4,6 +4,34 @@ import (
"testing"
)
func TestEnvLenZero(t *testing.T) {
env := &Env{}
if env.Len() != 0 {
t.Fatalf("%d", env.Len())
}
}
func TestEnvLenNotZero(t *testing.T) {
env := &Env{}
env.Set("foo", "bar")
env.Set("ga", "bu")
if env.Len() != 2 {
t.Fatalf("%d", env.Len())
}
}
func TestEnvLenDup(t *testing.T) {
env := &Env{
"foo=bar",
"foo=baz",
"a=b",
}
// len(env) != env.Len()
if env.Len() != 2 {
t.Fatalf("%d", env.Len())
}
}
func TestNewJob(t *testing.T) {
job := mkJob(t, "dummy", "--level=awesome")
if job.Name != "dummy" {

Просмотреть файл

@ -208,3 +208,7 @@ func (job *Job) Error(err error) Status {
fmt.Fprintf(job.Stderr, "%s\n", err)
return StatusErr
}
func (job *Job) StatusCode() int {
return int(job.status)
}

120
engine/remote.go Normal file
Просмотреть файл

@ -0,0 +1,120 @@
package engine
import (
"fmt"
"github.com/dotcloud/docker/pkg/beam"
"github.com/dotcloud/docker/pkg/beam/data"
"io"
"os"
"strconv"
"sync"
)
type Sender struct {
beam.Sender
}
func NewSender(s beam.Sender) *Sender {
return &Sender{s}
}
func (s *Sender) Install(eng *Engine) error {
// FIXME: this doesn't exist yet.
eng.RegisterCatchall(s.Handle)
return nil
}
func (s *Sender) Handle(job *Job) Status {
msg := data.Empty().Set("cmd", append([]string{job.Name}, job.Args...)...)
peer, err := beam.SendConn(s, msg.Bytes())
if err != nil {
return job.Errorf("beamsend: %v", err)
}
defer peer.Close()
var tasks sync.WaitGroup
defer tasks.Wait()
r := beam.NewRouter(nil)
r.NewRoute().KeyStartsWith("cmd", "log", "stdout").HasAttachment().Handler(func(p []byte, stdout *os.File) error {
tasks.Add(1)
io.Copy(job.Stdout, stdout)
tasks.Done()
return nil
})
r.NewRoute().KeyStartsWith("cmd", "log", "stderr").HasAttachment().Handler(func(p []byte, stderr *os.File) error {
tasks.Add(1)
io.Copy(job.Stderr, stderr)
tasks.Done()
return nil
})
r.NewRoute().KeyStartsWith("cmd", "log", "stdin").HasAttachment().Handler(func(p []byte, stdin *os.File) error {
tasks.Add(1)
io.Copy(stdin, job.Stdin)
tasks.Done()
return nil
})
var status int
r.NewRoute().KeyStartsWith("cmd", "status").Handler(func(p []byte, f *os.File) error {
cmd := data.Message(p).Get("cmd")
if len(cmd) != 2 {
return fmt.Errorf("usage: %s <0-127>", cmd[0])
}
s, err := strconv.ParseUint(cmd[1], 10, 8)
if err != nil {
return fmt.Errorf("usage: %s <0-127>", cmd[0])
}
status = int(s)
return nil
})
if _, err := beam.Copy(r, peer); err != nil {
return job.Errorf("%v", err)
}
return Status(status)
}
type Receiver struct {
*Engine
peer beam.Receiver
}
func NewReceiver(peer beam.Receiver) *Receiver {
return &Receiver{Engine: New(), peer: peer}
}
func (rcv *Receiver) Run() error {
r := beam.NewRouter(nil)
r.NewRoute().KeyExists("cmd").Handler(func(p []byte, f *os.File) error {
// Use the attachment as a beam return channel
peer, err := beam.FileConn(f)
if err != nil {
f.Close()
return err
}
cmd := data.Message(p).Get("cmd")
job := rcv.Engine.Job(cmd[0], cmd[1:]...)
stdout, err := beam.SendPipe(peer, data.Empty().Set("cmd", "log", "stdout").Bytes())
if err != nil {
return err
}
job.Stdout.Add(stdout)
stderr, err := beam.SendPipe(peer, data.Empty().Set("cmd", "log", "stderr").Bytes())
if err != nil {
return err
}
job.Stderr.Add(stderr)
stdin, err := beam.SendPipe(peer, data.Empty().Set("cmd", "log", "stdin").Bytes())
if err != nil {
return err
}
job.Stdin.Add(stdin)
// ignore error because we pass the raw status
job.Run()
err = peer.Send(data.Empty().Set("cmd", "status", fmt.Sprintf("%d", job.status)).Bytes(), nil)
if err != nil {
return err
}
return nil
})
_, err := beam.Copy(r, rcv.peer)
return err
}

3
engine/remote_test.go Normal file
Просмотреть файл

@ -0,0 +1,3 @@
package engine
import ()

43
engine/rengine/main.go Normal file
Просмотреть файл

@ -0,0 +1,43 @@
package main
import (
"fmt"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/pkg/beam"
"net"
"os"
)
func main() {
eng := engine.New()
c, err := net.Dial("unix", "beam.sock")
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return
}
defer c.Close()
f, err := c.(*net.UnixConn).File()
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return
}
child, err := beam.FileConn(f)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
return
}
defer child.Close()
sender := engine.NewSender(child)
sender.Install(eng)
cmd := eng.Job(os.Args[1], os.Args[2:]...)
cmd.Stdout.Add(os.Stdout)
cmd.Stderr.Add(os.Stderr)
if err := cmd.Run(); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}

119
engine/spawn/spawn.go Normal file
Просмотреть файл

@ -0,0 +1,119 @@
package spawn
import (
"fmt"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/pkg/beam"
"github.com/dotcloud/docker/utils"
"os"
"os/exec"
)
var initCalled bool
// Init checks if the current process has been created by Spawn.
//
// If no, it returns nil and the original program can continue
// unmodified.
//
// If no, it hijacks the process to run as a child worker controlled
// by its parent over a beam connection, with f exposed as a remote
// service. In this case Init never returns.
//
// The hijacking process takes place as follows:
// - Open file descriptor 3 as a beam endpoint. If this fails,
// terminate the current process.
// - Start a new engine.
// - Call f.Install on the engine. Any handlers registered
// will be available for remote invocation by the parent.
// - Listen for beam messages from the parent and pass them to
// the handlers.
// - When the beam endpoint is closed by the parent, terminate
// the current process.
//
// NOTE: Init must be called at the beginning of the same program
// calling Spawn. This is because Spawn approximates a "fork" by
// re-executing the current binary - where it expects spawn.Init
// to intercept the control flow and execute the worker code.
func Init(f engine.Installer) error {
initCalled = true
if os.Getenv("ENGINESPAWN") != "1" {
return nil
}
fmt.Printf("[%d child]\n", os.Getpid())
// Hijack the process
childErr := func() error {
fd3 := os.NewFile(3, "beam-introspect")
introsp, err := beam.FileConn(fd3)
if err != nil {
return fmt.Errorf("beam introspection error: %v", err)
}
fd3.Close()
defer introsp.Close()
eng := engine.NewReceiver(introsp)
if err := f.Install(eng.Engine); err != nil {
return err
}
if err := eng.Run(); err != nil {
return err
}
return nil
}()
if childErr != nil {
os.Exit(1)
}
os.Exit(0)
return nil // Never reached
}
// Spawn starts a new Engine in a child process and returns
// a proxy Engine through which it can be controlled.
//
// The commands available on the child engine are determined
// by an earlier call to Init. It is important that Init be
// called at the very beginning of the current program - this
// allows it to be called as a re-execution hook in the child
// process.
//
// Long story short, if you want to expose `myservice` in a child
// process, do this:
//
// func main() {
// spawn.Init(myservice)
// [..]
// child, err := spawn.Spawn()
// [..]
// child.Job("dosomething").Run()
// }
func Spawn() (*engine.Engine, error) {
if !initCalled {
return nil, fmt.Errorf("spawn.Init must be called at the top of the main() function")
}
cmd := exec.Command(utils.SelfPath())
cmd.Env = append(cmd.Env, "ENGINESPAWN=1")
local, remote, err := beam.SocketPair()
if err != nil {
return nil, err
}
child, err := beam.FileConn(local)
if err != nil {
local.Close()
remote.Close()
return nil, err
}
local.Close()
cmd.ExtraFiles = append(cmd.ExtraFiles, remote)
// FIXME: the beam/engine glue has no way to inform the caller
// of the child's termination. The next call will simply return
// an error.
if err := cmd.Start(); err != nil {
child.Close()
return nil, err
}
eng := engine.New()
if err := engine.NewSender(child).Install(eng); err != nil {
child.Close()
return nil, err
}
return eng, nil
}

Просмотреть файл

@ -0,0 +1,61 @@
package main
import (
"fmt"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/engine/spawn"
"log"
"os"
"os/exec"
"strings"
)
func main() {
fmt.Printf("[%d] MAIN\n", os.Getpid())
spawn.Init(&Worker{})
fmt.Printf("[%d parent] spawning\n", os.Getpid())
eng, err := spawn.Spawn()
if err != nil {
log.Fatal(err)
}
fmt.Printf("[parent] spawned\n")
job := eng.Job(os.Args[1], os.Args[2:]...)
job.Stdout.Add(os.Stdout)
job.Stderr.Add(os.Stderr)
job.Run()
// FIXME: use the job's status code
os.Exit(0)
}
type Worker struct {
}
func (w *Worker) Install(eng *engine.Engine) error {
eng.Register("exec", w.Exec)
eng.Register("cd", w.Cd)
eng.Register("echo", w.Echo)
return nil
}
func (w *Worker) Exec(job *engine.Job) engine.Status {
fmt.Printf("--> %v\n", job.Args)
cmd := exec.Command(job.Args[0], job.Args[1:]...)
cmd.Stdout = job.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return job.Errorf("%v\n", err)
}
return engine.StatusOK
}
func (w *Worker) Cd(job *engine.Job) engine.Status {
if err := os.Chdir(job.Args[0]); err != nil {
return job.Errorf("%v\n", err)
}
return engine.StatusOK
}
func (w *Worker) Echo(job *engine.Job) engine.Status {
fmt.Fprintf(job.Stdout, "%s\n", strings.Join(job.Args, " "))
return engine.StatusOK
}

Просмотреть файл

@ -665,3 +665,25 @@ func TestUnPrivilegedCannotMount(t *testing.T) {
logDone("run - test un-privileged cannot mount")
}
func TestSysNotAvaliableInNonPrivilegedContainers(t *testing.T) {
cmd := exec.Command(dockerBinary, "run", "busybox", "ls", "/sys/kernel")
if code, err := runCommand(cmd); err == nil || code == 0 {
t.Fatal("sys should not be available in a non privileged container")
}
deleteAllContainers()
logDone("run - sys not avaliable in non privileged container")
}
func TestSysAvaliableInPrivilegedContainers(t *testing.T) {
cmd := exec.Command(dockerBinary, "run", "--privileged", "busybox", "ls", "/sys/kernel")
if code, err := runCommand(cmd); err != nil || code != 0 {
t.Fatalf("sys should be available in privileged container")
}
deleteAllContainers()
logDone("run - sys avaliable in privileged container")
}

4
integration/MAINTAINERS Normal file
Просмотреть файл

@ -0,0 +1,4 @@
Solomon Hykes <s@docker.com>
# WE ARE LOOKING FOR VOLUNTEERS TO HELP CLEAN THIS UP.
# TO VOLUNTEER PLEASE OPEN A PULL REQUEST ADDING YOURSELF TO THIS FILE.
# WE WILL HELP YOU GET STARTED. THANKS!

23
integration/README.md Normal file
Просмотреть файл

@ -0,0 +1,23 @@
## Legacy integration tests
`./integration` contains Docker's legacy integration tests.
It is DEPRECATED and will eventually be removed.
### If you are a *CONTRIBUTOR* and want to add a test:
* Consider mocking out side effects and contributing a *unit test* in the subsystem
you're modifying. For example, the remote API has unit tests in `./api/server/server_unit_tests.go`.
The events subsystem has unit tests in `./events/events_test.go`. And so on.
* For end-to-end integration tests, please contribute to `./integration-cli`.
### If you are a *MAINTAINER*
Please don't allow patches adding new tests to `./integration`.
### If you are *LOOKING FOR A WAY TO HELP*
Please consider porting tests away from `./integration` and into either unit tests or CLI tests.
Any help will be greatly appreciated!

Просмотреть файл

@ -407,7 +407,7 @@ func TestCopyVolumeUidGid(t *testing.T) {
defer r.Nuke()
// Add directory not owned by root
container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test.txt && chown daemon.daemon /hello"}, t)
container1, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && touch /hello/test && chown daemon.daemon /hello"}, t)
defer r.Destroy(container1)
if container1.State.IsRunning() {
@ -432,6 +432,32 @@ func TestCopyVolumeUidGid(t *testing.T) {
if !strings.Contains(stdout1, "daemon daemon") {
t.Fatal("Container failed to transfer uid and gid to volume")
}
container2, _, _ := mkContainer(r, []string{"_", "/bin/sh", "-c", "mkdir -p /hello && chown daemon.daemon /hello"}, t)
defer r.Destroy(container1)
if container2.State.IsRunning() {
t.Errorf("Container shouldn't be running")
}
if err := container2.Run(); err != nil {
t.Fatal(err)
}
if container2.State.IsRunning() {
t.Errorf("Container shouldn't be running")
}
img2, err := r.Commit(container2, "", "", "unit test commited image", "", nil)
if err != nil {
t.Error(err)
}
// Test that the uid and gid is copied from the image to the volume
tmpDir2 := tempDir(t)
defer os.RemoveAll(tmpDir2)
stdout2, _ := runContainer(eng, r, []string{"-v", "/hello", img2.ID, "stat", "-c", "%U %G", "/hello"}, t)
if !strings.Contains(stdout2, "daemon daemon") {
t.Fatal("Container failed to transfer uid and gid to volume")
}
}
// Test for #1582

Просмотреть файл

@ -0,0 +1,169 @@
package fs
import (
"testing"
)
const (
sectorsRecursiveContents = `8:0 1024`
serviceBytesRecursiveContents = `8:0 Read 100
8:0 Write 400
8:0 Sync 200
8:0 Async 300
8:0 Total 500
Total 500`
servicedRecursiveContents = `8:0 Read 10
8:0 Write 40
8:0 Sync 20
8:0 Async 30
8:0 Total 50
Total 50`
queuedRecursiveContents = `8:0 Read 1
8:0 Write 4
8:0 Sync 2
8:0 Async 3
8:0 Total 5
Total 5`
)
func TestBlkioStats(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
})
blkio := &blkioGroup{}
stats, err := blkio.Stats(helper.CgroupData)
if err != nil {
t.Fatal(err)
}
// Verify expected stats.
expectedStats := map[string]float64{
"blkio.sectors_recursive:8:0": 1024.0,
// Serviced bytes.
"io_service_bytes_recursive:8:0:Read": 100.0,
"io_service_bytes_recursive:8:0:Write": 400.0,
"io_service_bytes_recursive:8:0:Sync": 200.0,
"io_service_bytes_recursive:8:0:Async": 300.0,
"io_service_bytes_recursive:8:0:Total": 500.0,
// Serviced requests.
"io_serviced_recursive:8:0:Read": 10.0,
"io_serviced_recursive:8:0:Write": 40.0,
"io_serviced_recursive:8:0:Sync": 20.0,
"io_serviced_recursive:8:0:Async": 30.0,
"io_serviced_recursive:8:0:Total": 50.0,
// Queued requests.
"io_queued_recursive:8:0:Read": 1.0,
"io_queued_recursive:8:0:Write": 4.0,
"io_queued_recursive:8:0:Sync": 2.0,
"io_queued_recursive:8:0:Async": 3.0,
"io_queued_recursive:8:0:Total": 5.0,
}
expectStats(t, expectedStats, stats)
}
func TestBlkioStatsNoSectorsFile(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
})
blkio := &blkioGroup{}
_, err := blkio.Stats(helper.CgroupData)
if err == nil {
t.Fatal("Expected to fail, but did not")
}
}
func TestBlkioStatsNoServiceBytesFile(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
})
blkio := &blkioGroup{}
_, err := blkio.Stats(helper.CgroupData)
if err == nil {
t.Fatal("Expected to fail, but did not")
}
}
func TestBlkioStatsNoServicedFile(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
})
blkio := &blkioGroup{}
_, err := blkio.Stats(helper.CgroupData)
if err == nil {
t.Fatal("Expected to fail, but did not")
}
}
func TestBlkioStatsNoQueuedFile(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_service_bytes_recursive": serviceBytesRecursiveContents,
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
})
blkio := &blkioGroup{}
_, err := blkio.Stats(helper.CgroupData)
if err == nil {
t.Fatal("Expected to fail, but did not")
}
}
func TestBlkioStatsUnexpectedNumberOfFields(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_service_bytes_recursive": "8:0 Read 100 100",
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
})
blkio := &blkioGroup{}
_, err := blkio.Stats(helper.CgroupData)
if err == nil {
t.Fatal("Expected to fail, but did not")
}
}
func TestBlkioStatsUnexpectedFieldType(t *testing.T) {
helper := NewCgroupTestUtil("blkio", t)
defer helper.cleanup()
helper.writeFileContents(map[string]string{
"blkio.io_service_bytes_recursive": "8:0 Read Write",
"blkio.io_serviced_recursive": servicedRecursiveContents,
"blkio.io_queued_recursive": queuedRecursiveContents,
"blkio.sectors_recursive": sectorsRecursiveContents,
})
blkio := &blkioGroup{}
_, err := blkio.Stats(helper.CgroupData)
if err == nil {
t.Fatal("Expected to fail, but did not")
}
}

Просмотреть файл

@ -2,6 +2,7 @@ package fs
import (
"bufio"
"fmt"
"os"
"path/filepath"
"strconv"
@ -56,13 +57,14 @@ func (s *memoryGroup) Stats(d *data) (map[string]float64, error) {
return nil, err
}
f, err := os.Open(filepath.Join(path, "memory.stat"))
// Set stats from memory.stat.
statsFile, err := os.Open(filepath.Join(path, "memory.stat"))
if err != nil {
return nil, err
}
defer f.Close()
defer statsFile.Close()
sc := bufio.NewScanner(f)
sc := bufio.NewScanner(statsFile)
for sc.Scan() {
t, v, err := getCgroupParamKeyValue(sc.Text())
if err != nil {
@ -70,5 +72,19 @@ func (s *memoryGroup) Stats(d *data) (map[string]float64, error) {
}
paramData[t] = v
}
// Set memory usage and max historical usage.
params := []string{
"usage_in_bytes",
"max_usage_in_bytes",
}
for _, param := range params {
value, err := getCgroupParamFloat64(path, fmt.Sprintf("memory.%s", param))
if err != nil {
return nil, err
}
paramData[param] = value
}
return paramData, nil
}

Просмотреть файл

@ -0,0 +1,75 @@
/*
Utility for testing cgroup operations.
Creates a mock of the cgroup filesystem for the duration of the test.
*/
package fs
import (
"fmt"
"io/ioutil"
"log"
"os"
"testing"
)
type cgroupTestUtil struct {
// data to use in tests.
CgroupData *data
// Path to the mock cgroup directory.
CgroupPath string
// Temporary directory to store mock cgroup filesystem.
tempDir string
t *testing.T
}
// Creates a new test util for the specified subsystem
func NewCgroupTestUtil(subsystem string, t *testing.T) *cgroupTestUtil {
d := &data{}
tempDir, err := ioutil.TempDir("", fmt.Sprintf("%s_cgroup_test", subsystem))
if err != nil {
t.Fatal(err)
}
d.root = tempDir
testCgroupPath, err := d.path(subsystem)
if err != nil {
t.Fatal(err)
}
// Ensure the full mock cgroup path exists.
err = os.MkdirAll(testCgroupPath, 0755)
if err != nil {
t.Fatal(err)
}
return &cgroupTestUtil{CgroupData: d, CgroupPath: testCgroupPath, tempDir: tempDir, t: t}
}
func (c *cgroupTestUtil) cleanup() {
os.RemoveAll(c.tempDir)
}
// Write the specified contents on the mock of the specified cgroup files.
func (c *cgroupTestUtil) writeFileContents(fileContents map[string]string) {
for file, contents := range fileContents {
err := writeFile(c.CgroupPath, file, contents)
if err != nil {
c.t.Fatal(err)
}
}
}
// Expect the specified stats.
func expectStats(t *testing.T, expected, actual map[string]float64) {
for stat, expectedValue := range expected {
actualValue, ok := actual[stat]
if !ok {
log.Printf("Expected stat %s to exist: %s", stat, actual)
t.Fail()
} else if actualValue != expectedValue {
log.Printf("Expected stats %s to have value %f but had %f instead", stat, expectedValue, actualValue)
t.Fail()
}
}
}

Просмотреть файл

@ -3,6 +3,8 @@ package fs
import (
"errors"
"fmt"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
)
@ -27,3 +29,12 @@ func getCgroupParamKeyValue(t string) (string, float64, error) {
return "", 0.0, ErrNotValidFormat
}
}
// Gets a single float64 value from the specified cgroup file.
func getCgroupParamFloat64(cgroupPath, cgroupFile string) (float64, error) {
contents, err := ioutil.ReadFile(filepath.Join(cgroupPath, cgroupFile))
if err != nil {
return -1.0, err
}
return strconv.ParseFloat(strings.TrimSpace(string(contents)), 64)
}

Просмотреть файл

@ -0,0 +1,68 @@
package fs
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
)
const (
cgroupFile = "cgroup.file"
floatValue = 2048.0
floatString = "2048"
)
func TestGetCgroupParamsFloat64(t *testing.T) {
// Setup tempdir.
tempDir, err := ioutil.TempDir("", "cgroup_utils_test")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(tempDir)
tempFile := filepath.Join(tempDir, cgroupFile)
// Success.
err = ioutil.WriteFile(tempFile, []byte(floatString), 0755)
if err != nil {
t.Fatal(err)
}
value, err := getCgroupParamFloat64(tempDir, cgroupFile)
if err != nil {
t.Fatal(err)
} else if value != floatValue {
t.Fatalf("Expected %f to equal %f", value, floatValue)
}
// Success with new line.
err = ioutil.WriteFile(tempFile, []byte(floatString+"\n"), 0755)
if err != nil {
t.Fatal(err)
}
value, err = getCgroupParamFloat64(tempDir, cgroupFile)
if err != nil {
t.Fatal(err)
} else if value != floatValue {
t.Fatalf("Expected %f to equal %f", value, floatValue)
}
// Not a float.
err = ioutil.WriteFile(tempFile, []byte("not-a-float"), 0755)
if err != nil {
t.Fatal(err)
}
_, err = getCgroupParamFloat64(tempDir, cgroupFile)
if err == nil {
t.Fatal("Expecting error, got none")
}
// Unknown file.
err = os.Remove(tempFile)
if err != nil {
t.Fatal(err)
}
_, err = getCgroupParamFloat64(tempDir, cgroupFile)
if err == nil {
t.Fatal("Expecting error, got none")
}
}

Просмотреть файл

@ -16,135 +16,148 @@ process are specified in this file. The configuration is used for each process
Sample `container.json` file:
```json
{
"mounts" : [
{
"type" : "devtmpfs"
}
],
"tty" : true,
"environment" : [
"HOME=/",
"PATH=PATH=$PATH:/bin:/usr/bin:/sbin:/usr/sbin",
"container=docker",
"TERM=xterm-256color"
],
"hostname" : "koye",
"cgroups" : {
"parent" : "docker",
"name" : "docker-koye"
},
"capabilities_mask" : [
{
"value" : 8,
"key" : "SETPCAP",
"enabled" : false
},
{
"enabled" : false,
"value" : 16,
"key" : "SYS_MODULE"
},
{
"value" : 17,
"key" : "SYS_RAWIO",
"enabled" : false
},
{
"key" : "SYS_PACCT",
"value" : 20,
"enabled" : false
},
{
"value" : 21,
"key" : "SYS_ADMIN",
"enabled" : false
},
{
"value" : 23,
"key" : "SYS_NICE",
"enabled" : false
},
{
"value" : 24,
"key" : "SYS_RESOURCE",
"enabled" : false
},
{
"key" : "SYS_TIME",
"value" : 25,
"enabled" : false
},
{
"enabled" : false,
"value" : 26,
"key" : "SYS_TTY_CONFIG"
},
{
"key" : "AUDIT_WRITE",
"value" : 29,
"enabled" : false
},
{
"value" : 30,
"key" : "AUDIT_CONTROL",
"enabled" : false
},
{
"enabled" : false,
"key" : "MAC_OVERRIDE",
"value" : 32
},
{
"enabled" : false,
"key" : "MAC_ADMIN",
"value" : 33
},
{
"key" : "NET_ADMIN",
"value" : 12,
"enabled" : false
},
{
"value" : 27,
"key" : "MKNOD",
"enabled" : true
}
],
"networks" : [
{
"gateway" : "172.17.42.1",
"mtu" : 1500,
"address" : "127.0.0.1/0",
"type" : "loopback",
"gateway" : "localhost"
},
{
"mtu" : 1500,
"address" : "172.17.42.2/16",
"type" : "veth",
"context" : {
"bridge" : "docker0",
"prefix" : "veth"
},
"address" : "172.17.0.2/16",
"type" : "veth",
"mtu" : 1500
}
],
"cgroups" : {
"parent" : "docker",
"name" : "11bb30683fb0bdd57fab4d3a8238877f1e4395a2cfc7320ea359f7a02c1a5620"
},
"tty" : true,
"environment" : [
"HOME=/",
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
"HOSTNAME=11bb30683fb0",
"TERM=xterm"
],
"capabilities_mask" : [
{
"key": "SETPCAP",
"enabled": false
},
{
"key": "SYS_MODULE",
"enabled": false
},
{
"key": "SYS_RAWIO",
"enabled": false
},
{
"key": "SYS_PACCT",
"enabled": false
},
{
"key": "SYS_ADMIN",
"enabled": false
},
{
"key": "SYS_NICE",
"enabled": false
},
{
"key": "SYS_RESOURCE",
"enabled": false
},
{
"key": "SYS_TIME",
"enabled": false
},
{
"key": "SYS_TTY_CONFIG",
"enabled": false
},
{
"key": "MKNOD",
"enabled": true
},
{
"key": "AUDIT_WRITE",
"enabled": false
},
{
"key": "AUDIT_CONTROL",
"enabled": false
},
{
"key": "MAC_OVERRIDE",
"enabled": false
},
{
"key": "MAC_ADMIN",
"enabled": false
},
{
"key": "NET_ADMIN",
"enabled": false
}
],
"context" : {
"apparmor_profile" : "docker-default"
},
"mounts" : [
{
"source" : "/var/lib/docker/containers/11bb30683fb0bdd57fab4d3a8238877f1e4395a2cfc7320ea359f7a02c1a5620/resolv.conf",
"writable" : false,
"destination" : "/etc/resolv.conf",
"private" : true
},
{
"source" : "/var/lib/docker/containers/11bb30683fb0bdd57fab4d3a8238877f1e4395a2cfc7320ea359f7a02c1a5620/hostname",
"writable" : false,
"destination" : "/etc/hostname",
"private" : true
},
{
"source" : "/var/lib/docker/containers/11bb30683fb0bdd57fab4d3a8238877f1e4395a2cfc7320ea359f7a02c1a5620/hosts",
"writable" : false,
"destination" : "/etc/hosts",
"private" : true
"gateway" : "172.17.42.1"
}
],
"namespaces" : [
{
"key": "NEWNS",
"enabled": true
"key" : "NEWNS",
"value" : 131072,
"enabled" : true,
"file" : "mnt"
},
{
"key": "NEWUTS",
"enabled": true
"key" : "NEWUTS",
"value" : 67108864,
"enabled" : true,
"file" : "uts"
},
{
"key": "NEWIPC",
"enabled": true
"enabled" : true,
"file" : "ipc",
"key" : "NEWIPC",
"value" : 134217728
},
{
"key": "NEWPID",
"enabled": true
"file" : "pid",
"enabled" : true,
"value" : 536870912,
"key" : "NEWPID"
},
{
"key": "NEWNET",
"enabled": true
"enabled" : true,
"file" : "net",
"key" : "NEWNET",
"value" : 1073741824
}
]
}

Просмотреть файл

@ -0,0 +1,60 @@
// +build linux
package console
import (
"fmt"
"github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/pkg/system"
"os"
"path/filepath"
"syscall"
)
// Setup initializes the proper /dev/console inside the rootfs path
func Setup(rootfs, consolePath, mountLabel string) error {
oldMask := system.Umask(0000)
defer system.Umask(oldMask)
stat, err := os.Stat(consolePath)
if err != nil {
return fmt.Errorf("stat console %s %s", consolePath, err)
}
var (
st = stat.Sys().(*syscall.Stat_t)
dest = filepath.Join(rootfs, "dev/console")
)
if err := os.Remove(dest); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("remove %s %s", dest, err)
}
if err := os.Chmod(consolePath, 0600); err != nil {
return err
}
if err := os.Chown(consolePath, 0, 0); err != nil {
return err
}
if err := system.Mknod(dest, (st.Mode&^07777)|0600, int(st.Rdev)); err != nil {
return fmt.Errorf("mknod %s %s", dest, err)
}
if err := label.SetFileLabel(consolePath, mountLabel); err != nil {
return fmt.Errorf("set file label %s %s", dest, err)
}
if err := system.Mount(consolePath, dest, "bind", syscall.MS_BIND, ""); err != nil {
return fmt.Errorf("bind %s to %s %s", consolePath, dest, err)
}
return nil
}
func OpenAndDup(consolePath string) error {
slave, err := system.OpenTerminal(consolePath, syscall.O_RDWR)
if err != nil {
return fmt.Errorf("open terminal %s", err)
}
if err := system.Dup2(slave.Fd(), 0); err != nil {
return err
}
if err := system.Dup2(slave.Fd(), 1); err != nil {
return err
}
return system.Dup2(slave.Fd(), 2)
}

Просмотреть файл

@ -23,7 +23,7 @@ type Container struct {
Networks []*Network `json:"networks,omitempty"` // nil for host's network stack
Cgroups *cgroups.Cgroup `json:"cgroups,omitempty"` // cgroups
Context Context `json:"context,omitempty"` // generic context for specific options (apparmor, selinux)
Mounts []Mount `json:"mounts,omitempty"`
Mounts Mounts `json:"mounts,omitempty"`
}
// Network defines configuration for a container's networking stack
@ -37,12 +37,3 @@ type Network struct {
Gateway string `json:"gateway,omitempty"`
Mtu int `json:"mtu,omitempty"`
}
// Bind mounts from the host system to the container
//
type Mount struct {
Source string `json:"source"` // Source path, in the host namespace
Destination string `json:"destination"` // Destination path, in the container
Writable bool `json:"writable"`
Private bool `json:"private"`
}

Просмотреть файл

@ -1,129 +1,146 @@
{
"hostname": "koye",
"tty": true,
"environment": [
"HOME=/",
"PATH=PATH=$PATH:/bin:/usr/bin:/sbin:/usr/sbin",
"container=docker",
"TERM=xterm-256color"
],
"namespaces": [
{
"file": "ipc",
"value": 134217728,
"enabled": true,
"key": "NEWIPC"
},
{
"file": "mnt",
"value": 131072,
"enabled": true,
"key": "NEWNS"
},
{
"file": "pid",
"value": 536870912,
"enabled": true,
"key": "NEWPID"
},
{
"file": "uts",
"value": 67108864,
"enabled": true,
"key": "NEWUTS"
},
{
"file": "net",
"value": 1073741824,
"enabled": true,
"key": "NEWNET"
}
],
"capabilities_mask": [
{
"key": "SETPCAP",
"enabled": false
},
{
"key": "SYS_MODULE",
"enabled": false
},
{
"key": "SYS_RAWIO",
"enabled": false
},
{
"key": "SYS_PACCT",
"enabled": false
},
{
"key": "SYS_ADMIN",
"enabled": false
},
{
"key": "SYS_NICE",
"enabled": false
},
{
"key": "SYS_RESOURCE",
"enabled": false
},
{
"key": "SYS_TIME",
"enabled": false
},
{
"key": "SYS_TTY_CONFIG",
"enabled": false
},
{
"key": "MKNOD",
"enabled": true
},
{
"key": "AUDIT_WRITE",
"enabled": false
},
{
"key": "AUDIT_CONTROL",
"enabled": false
},
{
"key": "MAC_OVERRIDE",
"enabled": false
},
{
"key": "MAC_ADMIN",
"enabled": false
},
{
"key": "NET_ADMIN",
"enabled": false
}
],
"networks": [
{
"type": "loopback",
"gateway": "localhost",
"address": "127.0.0.1/0",
"mtu": 1500
},
{
"type": "veth",
"gateway": "172.17.42.1",
"address": "172.17.0.4/16",
"context": {
"prefix": "dock",
"bridge": "docker0"
},
"mtu": 1500
}
],
"cgroups": {
"name": "docker-koye",
"parent": "docker",
"memory": 5248000,
"memory_swap": -1,
"cpu_shares": 1024
}
"mounts" : [
{
"type" : "devtmpfs"
}
],
"tty" : true,
"environment" : [
"HOME=/",
"PATH=PATH=$PATH:/bin:/usr/bin:/sbin:/usr/sbin",
"container=docker",
"TERM=xterm-256color"
],
"hostname" : "koye",
"cgroups" : {
"parent" : "docker",
"name" : "docker-koye"
},
"capabilities_mask" : [
{
"value" : 8,
"key" : "SETPCAP",
"enabled" : false
},
{
"enabled" : false,
"value" : 16,
"key" : "SYS_MODULE"
},
{
"value" : 17,
"key" : "SYS_RAWIO",
"enabled" : false
},
{
"key" : "SYS_PACCT",
"value" : 20,
"enabled" : false
},
{
"value" : 21,
"key" : "SYS_ADMIN",
"enabled" : false
},
{
"value" : 23,
"key" : "SYS_NICE",
"enabled" : false
},
{
"value" : 24,
"key" : "SYS_RESOURCE",
"enabled" : false
},
{
"key" : "SYS_TIME",
"value" : 25,
"enabled" : false
},
{
"enabled" : false,
"value" : 26,
"key" : "SYS_TTY_CONFIG"
},
{
"key" : "AUDIT_WRITE",
"value" : 29,
"enabled" : false
},
{
"value" : 30,
"key" : "AUDIT_CONTROL",
"enabled" : false
},
{
"enabled" : false,
"key" : "MAC_OVERRIDE",
"value" : 32
},
{
"enabled" : false,
"key" : "MAC_ADMIN",
"value" : 33
},
{
"key" : "NET_ADMIN",
"value" : 12,
"enabled" : false
},
{
"value" : 27,
"key" : "MKNOD",
"enabled" : true
}
],
"networks" : [
{
"mtu" : 1500,
"address" : "127.0.0.1/0",
"type" : "loopback",
"gateway" : "localhost"
},
{
"mtu" : 1500,
"address" : "172.17.42.2/16",
"type" : "veth",
"context" : {
"bridge" : "docker0",
"prefix" : "veth"
},
"gateway" : "172.17.42.1"
}
],
"namespaces" : [
{
"key" : "NEWNS",
"value" : 131072,
"enabled" : true,
"file" : "mnt"
},
{
"key" : "NEWUTS",
"value" : 67108864,
"enabled" : true,
"file" : "uts"
},
{
"enabled" : true,
"file" : "ipc",
"key" : "NEWIPC",
"value" : 134217728
},
{
"file" : "pid",
"enabled" : true,
"value" : 536870912,
"key" : "NEWPID"
},
{
"enabled" : true,
"file" : "net",
"key" : "NEWNET",
"value" : 1073741824
}
]
}

Просмотреть файл

@ -56,14 +56,4 @@ func TestContainerJsonFormat(t *testing.T) {
t.Log("capabilities mask should not contain SYS_CHROOT")
t.Fail()
}
if container.Cgroups.CpuShares != 1024 {
t.Log("cpu shares not set correctly")
t.Fail()
}
if container.Cgroups.Memory != 5248000 {
t.Log("memory limit not set correctly")
t.Fail()
}
}

Просмотреть файл

@ -0,0 +1,143 @@
// +build linux
package mount
import (
"fmt"
"github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/libcontainer/mount/nodes"
"github.com/dotcloud/docker/pkg/libcontainer/security/restrict"
"github.com/dotcloud/docker/pkg/system"
"os"
"path/filepath"
"syscall"
)
// default mount point flags
const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV
type mount struct {
source string
path string
device string
flags int
data string
}
// InitializeMountNamespace setups up the devices, mount points, and filesystems for use inside a
// new mount namepsace
func InitializeMountNamespace(rootfs, console string, container *libcontainer.Container) error {
var (
err error
flag = syscall.MS_PRIVATE
)
if container.NoPivotRoot {
flag = syscall.MS_SLAVE
}
if err := system.Mount("", "/", "", uintptr(flag|syscall.MS_REC), ""); err != nil {
return fmt.Errorf("mounting / as slave %s", err)
}
if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil {
return fmt.Errorf("mouting %s as bind %s", rootfs, err)
}
if err := mountSystem(rootfs, container); err != nil {
return fmt.Errorf("mount system %s", err)
}
if err := setupBindmounts(rootfs, container.Mounts); err != nil {
return fmt.Errorf("bind mounts %s", err)
}
if err := nodes.CopyN(rootfs, nodes.DefaultNodes); err != nil {
return fmt.Errorf("copy dev nodes %s", err)
}
if restrictionPath := container.Context["restriction_path"]; restrictionPath != "" {
if err := restrict.Restrict(rootfs, restrictionPath); err != nil {
return fmt.Errorf("restrict %s", err)
}
}
if err := SetupPtmx(rootfs, console, container.Context["mount_label"]); err != nil {
return err
}
if err := system.Chdir(rootfs); err != nil {
return fmt.Errorf("chdir into %s %s", rootfs, err)
}
if container.NoPivotRoot {
err = MsMoveRoot(rootfs)
} else {
err = PivotRoot(rootfs)
}
if err != nil {
return err
}
if container.ReadonlyFs {
if err := SetReadonly(); err != nil {
return fmt.Errorf("set readonly %s", err)
}
}
system.Umask(0022)
return nil
}
// mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts
// inside the mount namespace
func mountSystem(rootfs string, container *libcontainer.Container) error {
for _, m := range newSystemMounts(rootfs, container.Context["mount_label"], container.Mounts) {
if err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) {
return fmt.Errorf("mkdirall %s %s", m.path, err)
}
if err := system.Mount(m.source, m.path, m.device, uintptr(m.flags), m.data); err != nil {
return fmt.Errorf("mounting %s into %s %s", m.source, m.path, err)
}
}
return nil
}
func setupBindmounts(rootfs string, bindMounts libcontainer.Mounts) error {
for _, m := range bindMounts.OfType("bind") {
var (
flags = syscall.MS_BIND | syscall.MS_REC
dest = filepath.Join(rootfs, m.Destination)
)
if !m.Writable {
flags = flags | syscall.MS_RDONLY
}
if err := system.Mount(m.Source, dest, "bind", uintptr(flags), ""); err != nil {
return fmt.Errorf("mounting %s into %s %s", m.Source, dest, err)
}
if !m.Writable {
if err := system.Mount(m.Source, dest, "bind", uintptr(flags|syscall.MS_REMOUNT), ""); err != nil {
return fmt.Errorf("remounting %s into %s %s", m.Source, dest, err)
}
}
if m.Private {
if err := system.Mount("", dest, "none", uintptr(syscall.MS_PRIVATE), ""); err != nil {
return fmt.Errorf("mounting %s private %s", dest, err)
}
}
}
return nil
}
// TODO: this is crappy right now and should be cleaned up with a better way of handling system and
// standard bind mounts allowing them to be more dymanic
func newSystemMounts(rootfs, mountLabel string, mounts libcontainer.Mounts) []mount {
systemMounts := []mount{
{source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags},
}
if len(mounts.OfType("devtmpfs")) == 1 {
systemMounts = append(systemMounts, mount{source: "tmpfs", path: filepath.Join(rootfs, "dev"), device: "tmpfs", flags: syscall.MS_NOSUID | syscall.MS_STRICTATIME, data: "mode=755"})
}
systemMounts = append(systemMounts,
mount{source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)},
mount{source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)})
if len(mounts.OfType("sysfs")) == 1 {
systemMounts = append(systemMounts, mount{source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags})
}
return systemMounts
}

Просмотреть файл

@ -0,0 +1,19 @@
// +build linux
package mount
import (
"fmt"
"github.com/dotcloud/docker/pkg/system"
"syscall"
)
func MsMoveRoot(rootfs string) error {
if err := system.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil {
return fmt.Errorf("mount move %s into / %s", rootfs, err)
}
if err := system.Chroot("."); err != nil {
return fmt.Errorf("chroot . %s", err)
}
return system.Chdir("/")
}

Просмотреть файл

@ -0,0 +1,49 @@
// +build linux
package nodes
import (
"fmt"
"github.com/dotcloud/docker/pkg/system"
"os"
"path/filepath"
"syscall"
)
// Default list of device nodes to copy
var DefaultNodes = []string{
"null",
"zero",
"full",
"random",
"urandom",
"tty",
}
// CopyN copies the device node from the host into the rootfs
func CopyN(rootfs string, nodesToCopy []string) error {
oldMask := system.Umask(0000)
defer system.Umask(oldMask)
for _, node := range nodesToCopy {
if err := Copy(rootfs, node); err != nil {
return err
}
}
return nil
}
func Copy(rootfs, node string) error {
stat, err := os.Stat(filepath.Join("/dev", node))
if err != nil {
return err
}
var (
dest = filepath.Join(rootfs, "dev", node)
st = stat.Sys().(*syscall.Stat_t)
)
if err := system.Mknod(dest, st.Mode, int(st.Rdev)); err != nil && !os.IsExist(err) {
return fmt.Errorf("copy %s %s", node, err)
}
return nil
}

Просмотреть файл

@ -0,0 +1,31 @@
// +build linux
package mount
import (
"fmt"
"github.com/dotcloud/docker/pkg/system"
"io/ioutil"
"os"
"path/filepath"
"syscall"
)
func PivotRoot(rootfs string) error {
pivotDir, err := ioutil.TempDir(rootfs, ".pivot_root")
if err != nil {
return fmt.Errorf("can't create pivot_root dir %s", pivotDir, err)
}
if err := system.Pivotroot(rootfs, pivotDir); err != nil {
return fmt.Errorf("pivot_root %s", err)
}
if err := system.Chdir("/"); err != nil {
return fmt.Errorf("chdir / %s", err)
}
// path to pivot dir now changed, update
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
if err := system.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {
return fmt.Errorf("unmount pivot_root dir %s", err)
}
return os.Remove(pivotDir)
}

Просмотреть файл

@ -0,0 +1,26 @@
// +build linux
package mount
import (
"fmt"
"github.com/dotcloud/docker/pkg/libcontainer/console"
"os"
"path/filepath"
)
func SetupPtmx(rootfs, consolePath, mountLabel string) error {
ptmx := filepath.Join(rootfs, "dev/ptmx")
if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) {
return err
}
if err := os.Symlink("pts/ptmx", ptmx); err != nil {
return fmt.Errorf("symlink dev ptmx %s", err)
}
if consolePath != "" {
if err := console.Setup(rootfs, consolePath, mountLabel); err != nil {
return err
}
}
return nil
}

Просмотреть файл

@ -0,0 +1,12 @@
// +build linux
package mount
import (
"github.com/dotcloud/docker/pkg/system"
"syscall"
)
func SetReadonly() error {
return system.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, "")
}

Просмотреть файл

@ -0,0 +1,31 @@
// +build linux
package mount
import (
"github.com/dotcloud/docker/pkg/system"
"syscall"
)
func RemountProc() error {
if err := system.Unmount("/proc", syscall.MNT_DETACH); err != nil {
return err
}
if err := system.Mount("proc", "/proc", "proc", uintptr(defaultMountFlags), ""); err != nil {
return err
}
return nil
}
func RemountSys() error {
if err := system.Unmount("/sys", syscall.MNT_DETACH); err != nil {
if err != syscall.EINVAL {
return err
}
} else {
if err := system.Mount("sysfs", "/sys", "sysfs", uintptr(defaultMountFlags), ""); err != nil {
return err
}
}
return nil
}

Просмотреть файл

@ -6,6 +6,7 @@ import (
"fmt"
"github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/libcontainer/mount"
"github.com/dotcloud/docker/pkg/system"
"os"
"path/filepath"
@ -63,10 +64,10 @@ func (ns *linuxNs) ExecIn(container *libcontainer.Container, nspid int, args []s
if err := system.Unshare(syscall.CLONE_NEWNS); err != nil {
return -1, err
}
if err := remountProc(); err != nil {
if err := mount.RemountProc(); err != nil {
return -1, fmt.Errorf("remount proc %s", err)
}
if err := remountSys(); err != nil {
if err := mount.RemountSys(); err != nil {
return -1, fmt.Errorf("remount sys %s", err)
}
goto dropAndExec

Просмотреть файл

@ -11,8 +11,10 @@ import (
"github.com/dotcloud/docker/pkg/apparmor"
"github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/libcontainer/capabilities"
"github.com/dotcloud/docker/pkg/libcontainer/console"
"github.com/dotcloud/docker/pkg/libcontainer/mount"
"github.com/dotcloud/docker/pkg/libcontainer/network"
"github.com/dotcloud/docker/pkg/libcontainer/security/capabilities"
"github.com/dotcloud/docker/pkg/libcontainer/utils"
"github.com/dotcloud/docker/pkg/system"
"github.com/dotcloud/docker/pkg/user"
@ -20,7 +22,7 @@ import (
// Init is the init process that first runs inside a new namespace to setup mounts, users, networking,
// and other options required for the new container.
func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, console string, syncPipe *SyncPipe, args []string) error {
func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consolePath string, syncPipe *SyncPipe, args []string) error {
rootfs, err := utils.ResolveRootfs(uncleanRootfs)
if err != nil {
return err
@ -36,20 +38,16 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol
ns.logger.Println("received context from parent")
syncPipe.Close()
if console != "" {
ns.logger.Printf("setting up %s as console\n", console)
slave, err := system.OpenTerminal(console, syscall.O_RDWR)
if err != nil {
return fmt.Errorf("open terminal %s", err)
}
if err := dupSlave(slave); err != nil {
return fmt.Errorf("dup2 slave %s", err)
if consolePath != "" {
ns.logger.Printf("setting up %s as console\n", consolePath)
if err := console.OpenAndDup(consolePath); err != nil {
return err
}
}
if _, err := system.Setsid(); err != nil {
return fmt.Errorf("setsid %s", err)
}
if console != "" {
if consolePath != "" {
if err := system.Setctty(); err != nil {
return fmt.Errorf("setctty %s", err)
}
@ -60,7 +58,7 @@ func (ns *linuxNs) Init(container *libcontainer.Container, uncleanRootfs, consol
label.Init()
ns.logger.Println("setup mount namespace")
if err := setupNewMountNamespace(rootfs, container.Mounts, console, container.ReadonlyFs, container.NoPivotRoot, container.Context["mount_label"]); err != nil {
if err := mount.InitializeMountNamespace(rootfs, consolePath, container); err != nil {
return fmt.Errorf("setup mount namespace %s", err)
}
if err := system.Sethostname(container.Hostname); err != nil {
@ -114,21 +112,6 @@ func setupUser(container *libcontainer.Container) error {
return nil
}
// dupSlave dup2 the pty slave's fd into stdout and stdin and ensures that
// the slave's fd is 0, or stdin
func dupSlave(slave *os.File) error {
if err := system.Dup2(slave.Fd(), 0); err != nil {
return err
}
if err := system.Dup2(slave.Fd(), 1); err != nil {
return err
}
if err := system.Dup2(slave.Fd(), 2); err != nil {
return err
}
return nil
}
// setupVethNetwork uses the Network config if it is not nil to initialize
// the new veth interface inside the container for use by changing the name to eth0
// setting the MTU and IP address along with the default gateway

Просмотреть файл

@ -1,265 +0,0 @@
// +build linux
package nsinit
import (
"fmt"
"github.com/dotcloud/docker/pkg/label"
"github.com/dotcloud/docker/pkg/libcontainer"
"github.com/dotcloud/docker/pkg/system"
"io/ioutil"
"os"
"path/filepath"
"syscall"
)
// default mount point flags
const defaultMountFlags = syscall.MS_NOEXEC | syscall.MS_NOSUID | syscall.MS_NODEV
// setupNewMountNamespace is used to initialize a new mount namespace for an new
// container in the rootfs that is specified.
//
// There is no need to unmount the new mounts because as soon as the mount namespace
// is no longer in use, the mounts will be removed automatically
func setupNewMountNamespace(rootfs string, bindMounts []libcontainer.Mount, console string, readonly, noPivotRoot bool, mountLabel string) error {
flag := syscall.MS_PRIVATE
if noPivotRoot {
flag = syscall.MS_SLAVE
}
if err := system.Mount("", "/", "", uintptr(flag|syscall.MS_REC), ""); err != nil {
return fmt.Errorf("mounting / as slave %s", err)
}
if err := system.Mount(rootfs, rootfs, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil {
return fmt.Errorf("mouting %s as bind %s", rootfs, err)
}
if err := mountSystem(rootfs, mountLabel); err != nil {
return fmt.Errorf("mount system %s", err)
}
for _, m := range bindMounts {
var (
flags = syscall.MS_BIND | syscall.MS_REC
dest = filepath.Join(rootfs, m.Destination)
)
if !m.Writable {
flags = flags | syscall.MS_RDONLY
}
if err := system.Mount(m.Source, dest, "bind", uintptr(flags), ""); err != nil {
return fmt.Errorf("mounting %s into %s %s", m.Source, dest, err)
}
if !m.Writable {
if err := system.Mount(m.Source, dest, "bind", uintptr(flags|syscall.MS_REMOUNT), ""); err != nil {
return fmt.Errorf("remounting %s into %s %s", m.Source, dest, err)
}
}
if m.Private {
if err := system.Mount("", dest, "none", uintptr(syscall.MS_PRIVATE), ""); err != nil {
return fmt.Errorf("mounting %s private %s", dest, err)
}
}
}
if err := copyDevNodes(rootfs); err != nil {
return fmt.Errorf("copy dev nodes %s", err)
}
if err := setupPtmx(rootfs, console, mountLabel); err != nil {
return err
}
if err := system.Chdir(rootfs); err != nil {
return fmt.Errorf("chdir into %s %s", rootfs, err)
}
if noPivotRoot {
if err := rootMsMove(rootfs); err != nil {
return err
}
} else {
if err := rootPivot(rootfs); err != nil {
return err
}
}
if readonly {
if err := system.Mount("/", "/", "bind", syscall.MS_BIND|syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_REC, ""); err != nil {
return fmt.Errorf("mounting %s as readonly %s", rootfs, err)
}
}
system.Umask(0022)
return nil
}
// use a pivot root to setup the rootfs
func rootPivot(rootfs string) error {
pivotDir, err := ioutil.TempDir(rootfs, ".pivot_root")
if err != nil {
return fmt.Errorf("can't create pivot_root dir %s", pivotDir, err)
}
if err := system.Pivotroot(rootfs, pivotDir); err != nil {
return fmt.Errorf("pivot_root %s", err)
}
if err := system.Chdir("/"); err != nil {
return fmt.Errorf("chdir / %s", err)
}
// path to pivot dir now changed, update
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
if err := system.Unmount(pivotDir, syscall.MNT_DETACH); err != nil {
return fmt.Errorf("unmount pivot_root dir %s", err)
}
if err := os.Remove(pivotDir); err != nil {
return fmt.Errorf("remove pivot_root dir %s", err)
}
return nil
}
// use MS_MOVE and chroot to setup the rootfs
func rootMsMove(rootfs string) error {
if err := system.Mount(rootfs, "/", "", syscall.MS_MOVE, ""); err != nil {
return fmt.Errorf("mount move %s into / %s", rootfs, err)
}
if err := system.Chroot("."); err != nil {
return fmt.Errorf("chroot . %s", err)
}
if err := system.Chdir("/"); err != nil {
return fmt.Errorf("chdir / %s", err)
}
return nil
}
// copyDevNodes mknods the hosts devices so the new container has access to them
func copyDevNodes(rootfs string) error {
oldMask := system.Umask(0000)
defer system.Umask(oldMask)
for _, node := range []string{
"null",
"zero",
"full",
"random",
"urandom",
"tty",
} {
if err := copyDevNode(rootfs, node); err != nil {
return err
}
}
return nil
}
func copyDevNode(rootfs, node string) error {
stat, err := os.Stat(filepath.Join("/dev", node))
if err != nil {
return err
}
var (
dest = filepath.Join(rootfs, "dev", node)
st = stat.Sys().(*syscall.Stat_t)
)
if err := system.Mknod(dest, st.Mode, int(st.Rdev)); err != nil && !os.IsExist(err) {
return fmt.Errorf("copy %s %s", node, err)
}
return nil
}
// setupConsole ensures that the container has a proper /dev/console setup
func setupConsole(rootfs, console string, mountLabel string) error {
oldMask := system.Umask(0000)
defer system.Umask(oldMask)
stat, err := os.Stat(console)
if err != nil {
return fmt.Errorf("stat console %s %s", console, err)
}
var (
st = stat.Sys().(*syscall.Stat_t)
dest = filepath.Join(rootfs, "dev/console")
)
if err := os.Remove(dest); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("remove %s %s", dest, err)
}
if err := os.Chmod(console, 0600); err != nil {
return err
}
if err := os.Chown(console, 0, 0); err != nil {
return err
}
if err := system.Mknod(dest, (st.Mode&^07777)|0600, int(st.Rdev)); err != nil {
return fmt.Errorf("mknod %s %s", dest, err)
}
if err := label.SetFileLabel(console, mountLabel); err != nil {
return fmt.Errorf("SetFileLabel Failed %s %s", dest, err)
}
if err := system.Mount(console, dest, "bind", syscall.MS_BIND, ""); err != nil {
return fmt.Errorf("bind %s to %s %s", console, dest, err)
}
return nil
}
// mountSystem sets up linux specific system mounts like sys, proc, shm, and devpts
// inside the mount namespace
func mountSystem(rootfs string, mountLabel string) error {
for _, m := range []struct {
source string
path string
device string
flags int
data string
}{
{source: "proc", path: filepath.Join(rootfs, "proc"), device: "proc", flags: defaultMountFlags},
{source: "sysfs", path: filepath.Join(rootfs, "sys"), device: "sysfs", flags: defaultMountFlags},
{source: "shm", path: filepath.Join(rootfs, "dev", "shm"), device: "tmpfs", flags: defaultMountFlags, data: label.FormatMountLabel("mode=1777,size=65536k", mountLabel)},
{source: "devpts", path: filepath.Join(rootfs, "dev", "pts"), device: "devpts", flags: syscall.MS_NOSUID | syscall.MS_NOEXEC, data: label.FormatMountLabel("newinstance,ptmxmode=0666,mode=620,gid=5", mountLabel)},
} {
if err := os.MkdirAll(m.path, 0755); err != nil && !os.IsExist(err) {
return fmt.Errorf("mkdirall %s %s", m.path, err)
}
if err := system.Mount(m.source, m.path, m.device, uintptr(m.flags), m.data); err != nil {
return fmt.Errorf("mounting %s into %s %s", m.source, m.path, err)
}
}
return nil
}
// setupPtmx adds a symlink to pts/ptmx for /dev/ptmx and
// finishes setting up /dev/console
func setupPtmx(rootfs, console string, mountLabel string) error {
ptmx := filepath.Join(rootfs, "dev/ptmx")
if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) {
return err
}
if err := os.Symlink("pts/ptmx", ptmx); err != nil {
return fmt.Errorf("symlink dev ptmx %s", err)
}
if console != "" {
if err := setupConsole(rootfs, console, mountLabel); err != nil {
return err
}
}
return nil
}
// remountProc is used to detach and remount the proc filesystem
// commonly needed with running a new process inside an existing container
func remountProc() error {
if err := system.Unmount("/proc", syscall.MNT_DETACH); err != nil {
return err
}
if err := system.Mount("proc", "/proc", "proc", uintptr(defaultMountFlags), ""); err != nil {
return err
}
return nil
}
func remountSys() error {
if err := system.Unmount("/sys", syscall.MNT_DETACH); err != nil {
if err != syscall.EINVAL {
return err
}
} else {
if err := system.Mount("sysfs", "/sys", "sysfs", uintptr(defaultMountFlags), ""); err != nil {
return err
}
}
return nil
}

Просмотреть файл

@ -0,0 +1,51 @@
package restrict
import (
"fmt"
"os"
"path/filepath"
"syscall"
"github.com/dotcloud/docker/pkg/system"
)
const flags = syscall.MS_BIND | syscall.MS_REC | syscall.MS_RDONLY
var restrictions = map[string]string{
// dirs
"/proc/sys": "",
"/proc/irq": "",
"/proc/acpi": "",
// files
"/proc/sysrq-trigger": "/dev/null",
"/proc/kcore": "/dev/null",
}
// Restrict locks down access to many areas of proc
// by using the asumption that the user does not have mount caps to
// revert the changes made here
func Restrict(rootfs, empty string) error {
for dest, source := range restrictions {
dest = filepath.Join(rootfs, dest)
// we don't have a "/dev/null" for dirs so have the requester pass a dir
// for us to bind mount
switch source {
case "":
source = empty
default:
source = filepath.Join(rootfs, source)
}
if err := system.Mount(source, dest, "bind", flags, ""); err != nil {
if os.IsNotExist(err) {
continue
}
return fmt.Errorf("unable to mount %s over %s %s", source, dest, err)
}
if err := system.Mount("", dest, "bind", flags|syscall.MS_REMOUNT, ""); err != nil {
return fmt.Errorf("unable to mount %s over %s %s", source, dest, err)
}
}
return nil
}

Просмотреть файл

@ -11,6 +11,26 @@ var (
ErrUnsupported = errors.New("Unsupported method")
)
type Mounts []Mount
func (s Mounts) OfType(t string) Mounts {
out := Mounts{}
for _, m := range s {
if m.Type == t {
out = append(out, m)
}
}
return out
}
type Mount struct {
Type string `json:"type,omitempty"`
Source string `json:"source,omitempty"` // Source path, in the host namespace
Destination string `json:"destination,omitempty"` // Destination path, in the container
Writable bool `json:"writable,omitempty"`
Private bool `json:"private,omitempty"`
}
// namespaceList is used to convert the libcontainer types
// into the names of the files located in /proc/<pid>/ns/* for
// each namespace

Просмотреть файл

@ -292,6 +292,25 @@ func (r *Registry) GetRemoteTags(registries []string, repository string, token [
return nil, fmt.Errorf("Could not reach any registry endpoint")
}
func buildEndpointsList(headers []string, indexEp string) ([]string, error) {
var endpoints []string
parsedUrl, err := url.Parse(indexEp)
if err != nil {
return nil, err
}
var urlScheme = parsedUrl.Scheme
// The Registry's URL scheme has to match the Index'
for _, ep := range headers {
epList := strings.Split(ep, ",")
for _, epListElement := range epList {
endpoints = append(
endpoints,
fmt.Sprintf("%s://%s/v1/", urlScheme, strings.TrimSpace(epListElement)))
}
}
return endpoints, nil
}
func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) {
indexEp := r.indexEndpoint
repositoryTarget := fmt.Sprintf("%srepositories/%s/images", indexEp, remote)
@ -327,11 +346,10 @@ func (r *Registry) GetRepositoryData(remote string) (*RepositoryData, error) {
}
var endpoints []string
var urlScheme = indexEp[:strings.Index(indexEp, ":")]
if res.Header.Get("X-Docker-Endpoints") != "" {
// The Registry's URL scheme has to match the Index'
for _, ep := range res.Header["X-Docker-Endpoints"] {
endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", urlScheme, ep))
endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], indexEp)
if err != nil {
return nil, err
}
} else {
return nil, fmt.Errorf("Index response didn't contain any endpoints")
@ -560,7 +578,6 @@ func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validat
}
var tokens, endpoints []string
var urlScheme = indexEp[:strings.Index(indexEp, ":")]
if !validate {
if res.StatusCode != 200 && res.StatusCode != 201 {
errBody, err := ioutil.ReadAll(res.Body)
@ -577,9 +594,9 @@ func (r *Registry) PushImageJSONIndex(remote string, imgList []*ImgData, validat
}
if res.Header.Get("X-Docker-Endpoints") != "" {
// The Registry's URL scheme has to match the Index'
for _, ep := range res.Header["X-Docker-Endpoints"] {
endpoints = append(endpoints, fmt.Sprintf("%s://%s/v1/", urlScheme, ep))
endpoints, err = buildEndpointsList(res.Header["X-Docker-Endpoints"], indexEp)
if err != nil {
return nil, err
}
} else {
return nil, fmt.Errorf("Index response didn't contain any endpoints")

Просмотреть файл

@ -291,7 +291,7 @@ func handlerUsers(w http.ResponseWriter, r *http.Request) {
func handlerImages(w http.ResponseWriter, r *http.Request) {
u, _ := url.Parse(testHttpServer.URL)
w.Header().Add("X-Docker-Endpoints", u.Host)
w.Header().Add("X-Docker-Endpoints", fmt.Sprintf("%s , %s ", u.Host, "test.example.com"))
w.Header().Add("X-Docker-Token", fmt.Sprintf("FAKE-SESSION-%d", time.Now().UnixNano()))
if r.Method == "PUT" {
if strings.HasSuffix(r.URL.Path, "images") {

Просмотреть файл

@ -1,7 +1,9 @@
package registry
import (
"fmt"
"github.com/dotcloud/docker/utils"
"net/url"
"strings"
"testing"
)
@ -99,12 +101,23 @@ func TestGetRemoteTags(t *testing.T) {
func TestGetRepositoryData(t *testing.T) {
r := spawnTestRegistry(t)
parsedUrl, err := url.Parse(makeURL("/v1/"))
if err != nil {
t.Fatal(err)
}
host := "http://" + parsedUrl.Host + "/v1/"
data, err := r.GetRepositoryData("foo42/bar")
if err != nil {
t.Fatal(err)
}
assertEqual(t, len(data.ImgList), 2, "Expected 2 images in ImgList")
assertEqual(t, len(data.Endpoints), 1, "Expected one endpoint in Endpoints")
assertEqual(t, len(data.Endpoints), 2,
fmt.Sprintf("Expected 2 endpoints in Endpoints, found %d instead", len(data.Endpoints)))
assertEqual(t, data.Endpoints[0], host,
fmt.Sprintf("Expected first endpoint to be %s but found %s instead", host, data.Endpoints[0]))
assertEqual(t, data.Endpoints[1], "http://test.example.com/v1/",
fmt.Sprintf("Expected first endpoint to be http://test.example.com/v1/ but found %s instead", data.Endpoints[1]))
}
func TestPushImageJSONRegistry(t *testing.T) {