Merge pull request #24199 from tonistiigi/update-swarm

Update swarmkit, add support for external ca
This commit is contained in:
Sebastiaan van Stijn 2016-07-01 00:50:32 -07:00 коммит произвёл GitHub
Родитель 2358d5425e fa147591ed
Коммит 972c6a7113
84 изменённых файлов: 3203 добавлений и 788 удалений

Просмотреть файл

@ -28,7 +28,7 @@ func New(client client.APIClient, noResolve bool) *IDResolver {
func (r *IDResolver) get(ctx context.Context, t interface{}, id string) (string, error) {
switch t.(type) {
case swarm.Node:
node, err := r.client.NodeInspect(ctx, id)
node, _, err := r.client.NodeInspectWithRaw(ctx, id)
if err != nil {
return id, nil
}

Просмотреть файл

@ -49,7 +49,7 @@ func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error {
if err != nil {
return nil, nil, err
}
node, err := client.NodeInspect(ctx, nodeRef)
node, _, err := client.NodeInspectWithRaw(ctx, nodeRef)
return node, nil, err
}

Просмотреть файл

@ -48,7 +48,7 @@ func runTasks(dockerCli *client.DockerCli, opts tasksOptions) error {
if err != nil {
return nil
}
node, err := client.NodeInspect(ctx, nodeRef)
node, _, err := client.NodeInspectWithRaw(ctx, nodeRef)
if err != nil {
return err
}

Просмотреть файл

@ -42,7 +42,7 @@ func updateNodes(dockerCli *client.DockerCli, nodes []string, mergeNode func(nod
ctx := context.Background()
for _, nodeID := range nodes {
node, err := client.NodeInspect(ctx, nodeID)
node, _, err := client.NodeInspectWithRaw(ctx, nodeID)
if err != nil {
return err
}

Просмотреть файл

@ -1,6 +1,8 @@
package swarm
import (
"encoding/csv"
"errors"
"fmt"
"strings"
"time"
@ -23,6 +25,7 @@ const (
flagListenAddr = "listen-addr"
flagSecret = "secret"
flagTaskHistoryLimit = "task-history-limit"
flagExternalCA = "external-ca"
)
var (
@ -38,6 +41,7 @@ type swarmOptions struct {
taskHistoryLimit int64
dispatcherHeartbeat time.Duration
nodeCertExpiry time.Duration
externalCA ExternalCAOption
}
// NodeAddrOption is a pflag.Value for listen and remote addresses
@ -142,12 +146,102 @@ func NewAutoAcceptOption() AutoAcceptOption {
return AutoAcceptOption{values: make(map[string]bool)}
}
// ExternalCAOption is a Value type for parsing external CA specifications.
type ExternalCAOption struct {
values []*swarm.ExternalCA
}
// Set parses an external CA option.
func (m *ExternalCAOption) Set(value string) error {
parsed, err := parseExternalCA(value)
if err != nil {
return err
}
m.values = append(m.values, parsed)
return nil
}
// Type returns the type of this option.
func (m *ExternalCAOption) Type() string {
return "external-ca"
}
// String returns a string repr of this option.
func (m *ExternalCAOption) String() string {
externalCAs := []string{}
for _, externalCA := range m.values {
repr := fmt.Sprintf("%s: %s", externalCA.Protocol, externalCA.URL)
externalCAs = append(externalCAs, repr)
}
return strings.Join(externalCAs, ", ")
}
// Value returns the external CAs
func (m *ExternalCAOption) Value() []*swarm.ExternalCA {
return m.values
}
// parseExternalCA parses an external CA specification from the command line,
// such as protocol=cfssl,url=https://example.com.
func parseExternalCA(caSpec string) (*swarm.ExternalCA, error) {
csvReader := csv.NewReader(strings.NewReader(caSpec))
fields, err := csvReader.Read()
if err != nil {
return nil, err
}
externalCA := swarm.ExternalCA{
Options: make(map[string]string),
}
var (
hasProtocol bool
hasURL bool
)
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) != 2 {
return nil, fmt.Errorf("invalid field '%s' must be a key=value pair", field)
}
key, value := parts[0], parts[1]
switch strings.ToLower(key) {
case "protocol":
hasProtocol = true
if strings.ToLower(value) == string(swarm.ExternalCAProtocolCFSSL) {
externalCA.Protocol = swarm.ExternalCAProtocolCFSSL
} else {
return nil, fmt.Errorf("unrecognized external CA protocol %s", value)
}
case "url":
hasURL = true
externalCA.URL = value
default:
externalCA.Options[key] = value
}
}
if !hasProtocol {
return nil, errors.New("the external-ca option needs a protocol= parameter")
}
if !hasURL {
return nil, errors.New("the external-ca option needs a url= parameter")
}
return &externalCA, nil
}
func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) {
flags.Var(&opts.autoAccept, flagAutoAccept, "Auto acceptance policy (worker, manager or none)")
flags.StringVar(&opts.secret, flagSecret, "", "Set secret value needed to accept nodes into cluster")
flags.Int64Var(&opts.taskHistoryLimit, flagTaskHistoryLimit, 10, "Task history retention limit")
flags.DurationVar(&opts.dispatcherHeartbeat, flagDispatcherHeartbeat, time.Duration(5*time.Second), "Dispatcher heartbeat period")
flags.DurationVar(&opts.nodeCertExpiry, flagCertExpiry, time.Duration(90*24*time.Hour), "Validity period for node certificates")
flags.Var(&opts.externalCA, flagExternalCA, "Specifications of one or more certificate signing endpoints")
}
func (opts *swarmOptions) ToSpec() swarm.Spec {
@ -160,5 +254,6 @@ func (opts *swarmOptions) ToSpec() swarm.Spec {
spec.Orchestration.TaskHistoryRetentionLimit = opts.taskHistoryLimit
spec.Dispatcher.HeartbeatPeriod = uint64(opts.dispatcherHeartbeat.Nanoseconds())
spec.CAConfig.NodeCertExpiry = opts.nodeCertExpiry
spec.CAConfig.ExternalCAs = opts.externalCA.Value()
return spec
}

Просмотреть файл

@ -85,5 +85,10 @@ func mergeSwarm(swarm *swarm.Swarm, flags *pflag.FlagSet) error {
}
}
if flags.Changed(flagExternalCA) {
value := flags.Lookup(flagExternalCA).Value.(*ExternalCAOption)
spec.CAConfig.ExternalCAs = value.Value()
}
return nil
}

Просмотреть файл

@ -35,6 +35,14 @@ func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm {
swarm.Spec.CAConfig.NodeCertExpiry, _ = ptypes.Duration(c.Spec.CAConfig.NodeCertExpiry)
for _, ca := range c.Spec.CAConfig.ExternalCAs {
swarm.Spec.CAConfig.ExternalCAs = append(swarm.Spec.CAConfig.ExternalCAs, &types.ExternalCA{
Protocol: types.ExternalCAProtocol(strings.ToLower(ca.Protocol.String())),
URL: ca.URL,
Options: ca.Options,
})
}
// Meta
swarm.Version.Index = c.Meta.Version.Index
swarm.CreatedAt, _ = ptypes.Timestamp(c.Meta.CreatedAt)
@ -84,6 +92,18 @@ func SwarmSpecToGRPCandMerge(s types.Spec, existingSpec *swarmapi.ClusterSpec) (
},
}
for _, ca := range s.CAConfig.ExternalCAs {
protocol, ok := swarmapi.ExternalCA_CAProtocol_value[strings.ToUpper(string(ca.Protocol))]
if !ok {
return swarmapi.ClusterSpec{}, fmt.Errorf("invalid protocol: %q", ca.Protocol)
}
spec.CAConfig.ExternalCAs = append(spec.CAConfig.ExternalCAs, &swarmapi.ExternalCA{
Protocol: swarmapi.ExternalCA_CAProtocol(protocol),
URL: ca.URL,
Options: ca.Options,
})
}
if err := SwarmSpecUpdateAcceptancePolicy(&spec, s.AcceptancePolicy, existingSpec); err != nil {
return swarmapi.ClusterSpec{}, err
}

Просмотреть файл

@ -117,8 +117,8 @@ func (c *containerConfig) config() *enginecontainer.Config {
// If Command is provided, we replace the whole invocation with Command
// by replacing Entrypoint and specifying Cmd. Args is ignored in this
// case.
config.Entrypoint = append(config.Entrypoint, c.spec().Command[0])
config.Cmd = append(config.Cmd, c.spec().Command[1:]...)
config.Entrypoint = append(config.Entrypoint, c.spec().Command...)
config.Cmd = append(config.Cmd, c.spec().Args...)
} else if len(c.spec().Args) > 0 {
// In this case, we assume the image has an Entrypoint and Args
// specifies the arguments for that entrypoint.

Просмотреть файл

@ -5,7 +5,6 @@ import (
"github.com/docker/docker/container"
"github.com/docker/docker/libcontainerd"
"github.com/docker/engine-api/types"
)
func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) (*[]libcontainerd.CreateOption, error) {
@ -13,7 +12,7 @@ func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Contain
// Ensure a runtime has been assigned to this container
if container.HostConfig.Runtime == "" {
container.HostConfig.Runtime = types.DefaultRuntimeName
container.HostConfig.Runtime = stockRuntimeName
container.ToDisk()
}

Просмотреть файл

@ -3644,8 +3644,15 @@ JSON Parameters:
election.
- **Dispatcher** – Configuration settings for the task dispatcher.
- **HeartbeatPeriod** – The delay for an agent to send a heartbeat to the dispatcher.
- **CAConfig** – CA configuration.
- **CAConfig** – Certificate authority configuration.
- **NodeCertExpiry** – Automatic expiry for nodes certificates.
- **ExternalCA** - Configuration for forwarding signing requests to an external
certificate authority.
- **Protocol** - Protocol for communication with the external CA
(currently only "cfssl" is supported).
- **URL** - URL where certificate signing requests should be sent.
- **Options** - An object with key/value pairs that are interpreted
as protocol-specific options for the external CA driver.
### Join an existing Swarm
@ -3792,6 +3799,13 @@ JSON Parameters:
- **HeartbeatPeriod** – The delay for an agent to send a heartbeat to the dispatcher.
- **CAConfig** – CA configuration.
- **NodeCertExpiry** – Automatic expiry for nodes certificates.
- **ExternalCA** - Configuration for forwarding signing requests to an external
certificate authority.
- **Protocol** - Protocol for communication with the external CA
(currently only "cfssl" is supported).
- **URL** - URL where certificate signing requests should be sent.
- **Options** - An object with key/value pairs that are interpreted
as protocol-specific options for the external CA driver.
## 3.8 Services

Просмотреть файл

@ -3645,8 +3645,15 @@ JSON Parameters:
election.
- **Dispatcher** – Configuration settings for the task dispatcher.
- **HeartbeatPeriod** – The delay for an agent to send a heartbeat to the dispatcher.
- **CAConfig** – CA configuration.
- **CAConfig** – Certificate authority configuration.
- **NodeCertExpiry** – Automatic expiry for nodes certificates.
- **ExternalCA** - Configuration for forwarding signing requests to an external
certificate authority.
- **Protocol** - Protocol for communication with the external CA
(currently only "cfssl" is supported).
- **URL** - URL where certificate signing requests should be sent.
- **Options** - An object with key/value pairs that are interpreted
as protocol-specific options for the external CA driver.
### Join an existing Swarm
@ -3793,6 +3800,13 @@ JSON Parameters:
- **HeartbeatPeriod** – The delay for an agent to send a heartbeat to the dispatcher.
- **CAConfig** – CA configuration.
- **NodeCertExpiry** – Automatic expiry for nodes certificates.
- **ExternalCA** - Configuration for forwarding signing requests to an external
certificate authority.
- **Protocol** - Protocol for communication with the external CA
(currently only "cfssl" is supported).
- **URL** - URL where certificate signing requests should be sent.
- **Options** - An object with key/value pairs that are interpreted
as protocol-specific options for the external CA driver.
## 3.8 Services

Просмотреть файл

@ -17,6 +17,7 @@ parent = "smn_cli"
Options:
--auto-accept value Acceptance policy (default [worker,manager])
--external-ca value Specifications of one or more certificate signing endpoints
--force-new-cluster Force create a new cluster from current state.
--help Print usage
--listen-addr value Listen address (default 0.0.0.0:2377)
@ -34,7 +35,7 @@ ID NAME MEMBERSHIP STATUS AVAILABILITY MANAGER
1ujecd0j9n3ro9i6628smdmth * manager1 Accepted Ready Active Reachable Yes
```
### --auto-accept value
### `--auto-accept value`
This flag controls node acceptance into the cluster. By default, both `worker` and `manager`
nodes are auto accepted by the cluster. This can be changed by specifing what kinds of nodes
@ -49,6 +50,13 @@ $ docker swarm init --listen-addr 192.168.99.121:2377 --auto-accept worker
Swarm initialized: current node (1m8cdsylxbf3lk8qriqt07hx1) is now a manager.
```
### `--external-ca value`
This flag sets up the swarm to use an external CA to issue node certificates. The value takes
the form `protocol=X,url=Y`. The value for `protocol` specifies what protocol should be used
to send signing requests to the external CA. Currently, the only supported value is `cfssl`.
The URL specifies the endpoint where signing requests should be submitted.
### `--force-new-cluster`
This flag forces an existing node that was part of a quorum that was lost to restart as a single node Manager without losing its data

Просмотреть файл

@ -17,6 +17,7 @@ parent = "smn_cli"
Options:
--auto-accept value Auto acceptance policy (worker, manager or none)
--external-ca value Specifications of one or more certificate signing endpoints
--dispatcher-heartbeat duration Dispatcher heartbeat period (default 5s)
--help Print usage
--secret string Set secret value needed to accept nodes into cluster

Просмотреть файл

@ -60,12 +60,12 @@ clone git golang.org/x/net 2beffdc2e92c8a3027590f898fe88f69af48a3f8 https://gith
clone git golang.org/x/sys eb2c74142fd19a79b3f237334c7384d5167b1b46 https://github.com/golang/sys.git
clone git github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3
clone git github.com/docker/go-connections fa2850ff103453a9ad190da0df0af134f0314b3d
clone git github.com/docker/engine-api 19b4fb48a86c3318e610e156ec06b684f79ac31d
clone git github.com/docker/engine-api 62043eb79d581a32ea849645277023c550732e52
clone git github.com/RackSec/srslog 259aed10dfa74ea2961eddd1d9847619f6e98837
clone git github.com/imdario/mergo 0.2.1
#get libnetwork packages
clone git github.com/docker/libnetwork ed311d050fda7821f2e7c53a7e08a0205923aef5
clone git github.com/docker/libnetwork 377a7337f2387cce3be1df7a4503446147b68ff1
clone git github.com/docker/go-events 39718a26497694185f8fb58a7d6f31947f3dc42d
clone git github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
@ -139,10 +139,10 @@ clone git github.com/docker/docker-credential-helpers v0.3.0
clone git github.com/docker/containerd b93a33be39bc4ef0fb00bfcb79147a28c33d9d43
# cluster
clone git github.com/docker/swarmkit 3f135f206179ea157aeef2d1d401eb795f618da8
clone git github.com/docker/swarmkit 036a4a1e934bd1bbb35c3ec7f85dea2ba6d4e336
clone git github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9
clone git github.com/gogo/protobuf 43a2e0b1c32252bfbbdf81f7faa7a88fb3fa4028
clone git github.com/cloudflare/cfssl 92f037e39eb103fb30f9151be40d9ed267fc4ae2
clone git github.com/cloudflare/cfssl b895b0549c0ff676f92cf09ba971ae02bb41367b
clone git github.com/google/certificate-transparency 025a5cab06f6a819c455d9fdc9e2a1b6d0982284
clone git golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 https://github.com/golang/crypto.git
clone git github.com/mreiferson/go-httpclient 63fe23f7434723dc904c901043af07931f293c47

231
vendor/src/github.com/cloudflare/cfssl/api/api.go поставляемый Normal file
Просмотреть файл

@ -0,0 +1,231 @@
// Package api implements an HTTP-based API and server for CFSSL.
package api
import (
"encoding/json"
"io/ioutil"
"net/http"
"github.com/cloudflare/cfssl/errors"
"github.com/cloudflare/cfssl/log"
)
// Handler is an interface providing a generic mechanism for handling HTTP requests.
type Handler interface {
Handle(w http.ResponseWriter, r *http.Request) error
}
// HTTPHandler is a wrapper that encapsulates Handler interface as http.Handler.
// HTTPHandler also enforces that the Handler only responds to requests with registered HTTP methods.
type HTTPHandler struct {
Handler // CFSSL handler
Methods []string // The associated HTTP methods
}
// HandlerFunc is similar to the http.HandlerFunc type; it serves as
// an adapter allowing the use of ordinary functions as Handlers. If
// f is a function with the appropriate signature, HandlerFunc(f) is a
// Handler object that calls f.
type HandlerFunc func(http.ResponseWriter, *http.Request) error
// Handle calls f(w, r)
func (f HandlerFunc) Handle(w http.ResponseWriter, r *http.Request) error {
w.Header().Set("Content-Type", "application/json")
return f(w, r)
}
// handleError is the centralised error handling and reporting.
func handleError(w http.ResponseWriter, err error) (code int) {
if err == nil {
return http.StatusOK
}
msg := err.Error()
httpCode := http.StatusInternalServerError
// If it is recognized as HttpError emitted from cfssl,
// we rewrite the status code accordingly. If it is a
// cfssl error, set the http status to StatusBadRequest
switch err := err.(type) {
case *errors.HTTPError:
httpCode = err.StatusCode
code = err.StatusCode
case *errors.Error:
httpCode = http.StatusBadRequest
code = err.ErrorCode
msg = err.Message
}
response := NewErrorResponse(msg, code)
jsonMessage, err := json.Marshal(response)
if err != nil {
log.Errorf("Failed to marshal JSON: %v", err)
} else {
msg = string(jsonMessage)
}
http.Error(w, msg, httpCode)
return code
}
// ServeHTTP encapsulates the call to underlying Handler to handle the request
// and return the response with proper HTTP status code
func (h HTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var err error
var match bool
// Throw 405 when requested with an unsupported verb.
for _, m := range h.Methods {
if m == r.Method {
match = true
}
}
if match {
err = h.Handle(w, r)
} else {
err = errors.NewMethodNotAllowed(r.Method)
}
status := handleError(w, err)
log.Infof("%s - \"%s %s\" %d", r.RemoteAddr, r.Method, r.URL, status)
}
// readRequestBlob takes a JSON-blob-encoded response body in the form
// map[string]string and returns it, the list of keywords presented,
// and any error that occurred.
func readRequestBlob(r *http.Request) (map[string]string, error) {
var blob map[string]string
body, err := ioutil.ReadAll(r.Body)
if err != nil {
return nil, err
}
r.Body.Close()
err = json.Unmarshal(body, &blob)
if err != nil {
return nil, err
}
return blob, nil
}
// ProcessRequestOneOf reads a JSON blob for the request and makes
// sure it contains one of a set of keywords. For example, a request
// might have the ('foo' && 'bar') keys, OR it might have the 'baz'
// key. In either case, we want to accept the request; however, if
// none of these sets shows up, the request is a bad request, and it
// should be returned.
func ProcessRequestOneOf(r *http.Request, keywordSets [][]string) (map[string]string, []string, error) {
blob, err := readRequestBlob(r)
if err != nil {
return nil, nil, err
}
var matched []string
for _, set := range keywordSets {
if matchKeywords(blob, set) {
if matched != nil {
return nil, nil, errors.NewBadRequestString("mismatched parameters")
}
matched = set
}
}
if matched == nil {
return nil, nil, errors.NewBadRequestString("no valid parameter sets found")
}
return blob, matched, nil
}
// ProcessRequestFirstMatchOf reads a JSON blob for the request and returns
// the first match of a set of keywords. For example, a request
// might have one of the following combinations: (foo=1, bar=2), (foo=1), and (bar=2)
// By giving a specific ordering of those combinations, we could decide how to accept
// the request.
func ProcessRequestFirstMatchOf(r *http.Request, keywordSets [][]string) (map[string]string, []string, error) {
blob, err := readRequestBlob(r)
if err != nil {
return nil, nil, err
}
for _, set := range keywordSets {
if matchKeywords(blob, set) {
return blob, set, nil
}
}
return nil, nil, errors.NewBadRequestString("no valid parameter sets found")
}
func matchKeywords(blob map[string]string, keywords []string) bool {
for _, keyword := range keywords {
if _, ok := blob[keyword]; !ok {
return false
}
}
return true
}
// ResponseMessage implements the standard for response errors and
// messages. A message has a code and a string message.
type ResponseMessage struct {
Code int `json:"code"`
Message string `json:"message"`
}
// Response implements the CloudFlare standard for API
// responses.
type Response struct {
Success bool `json:"success"`
Result interface{} `json:"result"`
Errors []ResponseMessage `json:"errors"`
Messages []ResponseMessage `json:"messages"`
}
// NewSuccessResponse is a shortcut for creating new successul API
// responses.
func NewSuccessResponse(result interface{}) Response {
return Response{
Success: true,
Result: result,
Errors: []ResponseMessage{},
Messages: []ResponseMessage{},
}
}
// NewSuccessResponseWithMessage is a shortcut for creating new successul API
// responses that includes a message.
func NewSuccessResponseWithMessage(result interface{}, message string, code int) Response {
return Response{
Success: true,
Result: result,
Errors: []ResponseMessage{},
Messages: []ResponseMessage{{code, message}},
}
}
// NewErrorResponse is a shortcut for creating an error response for a
// single error.
func NewErrorResponse(message string, code int) Response {
return Response{
Success: false,
Result: nil,
Errors: []ResponseMessage{{code, message}},
Messages: []ResponseMessage{},
}
}
// SendResponse builds a response from the result, sets the JSON
// header, and writes to the http.ResponseWriter.
func SendResponse(w http.ResponseWriter, result interface{}) error {
response := NewSuccessResponse(result)
w.Header().Set("Content-Type", "application/json")
enc := json.NewEncoder(w)
err := enc.Encode(response)
return err
}
// SendResponseWithMessage builds a response from the result and the
// provided message, sets the JSON header, and writes to the
// http.ResponseWriter.
func SendResponseWithMessage(w http.ResponseWriter, result interface{}, message string, code int) error {
response := NewSuccessResponseWithMessage(result, message, code)
w.Header().Set("Content-Type", "application/json")
enc := json.NewEncoder(w)
err := enc.Encode(response)
return err
}

Просмотреть файл

@ -16,21 +16,26 @@ A database is required for the following:
This directory stores [goose](https://bitbucket.org/liamstask/goose/) db migration scripts for various DB backends.
Currently supported:
- SQLite in sqlite
- MySQL in mysql
- PostgreSQL in pg
- SQLite in sqlite
### Get goose
go get https://bitbucket.org/liamstask/goose/
go get bitbucket.org/liamstask/goose/cmd/goose
### Use goose to start and terminate a SQLite DB
To start a SQLite DB using goose:
### Use goose to start and terminate a MySQL DB
To start a MySQL using goose:
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite up'
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/mysql up
To tear down a SQLite DB using goose
To tear down a MySQL DB using goose
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite down
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/mysql down
Note: the administration of MySQL DB is not included. We assume
the databases being connected to are already created and access control
is properly handled.
### Use goose to start and terminate a PostgreSQL DB
To start a PostgreSQL using goose:
@ -43,7 +48,16 @@ To tear down a PostgreSQL DB using goose
Note: the administration of PostgreSQL DB is not included. We assume
the databases being connected to are already created and access control
are properly handled.
is properly handled.
### Use goose to start and terminate a SQLite DB
To start a SQLite DB using goose:
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite up
To tear down a SQLite DB using goose
goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite down
## CFSSL Configuration
@ -55,4 +69,3 @@ JSON dictionary:
or
{"driver":"postgres","data_source":"postgres://user:password@host/db"}

Просмотреть файл

@ -9,6 +9,7 @@ import (
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/pem"
"errors"
"net"
@ -129,8 +130,9 @@ func (kr *BasicKeyRequest) SigAlgo() x509.SignatureAlgorithm {
// CAConfig is a section used in the requests initialising a new CA.
type CAConfig struct {
PathLength int `json:"pathlen"`
Expiry string `json:"expiry"`
PathLength int `json:"pathlen"`
PathLenZero bool `json:"pathlenzero"`
Expiry string `json:"expiry"`
}
// A CertificateRequest encapsulates the API interface to the
@ -175,6 +177,12 @@ func (cr *CertificateRequest) Name() pkix.Name {
return name
}
// BasicConstraints CSR information RFC 5280, 4.2.1.9
type BasicConstraints struct {
IsCA bool `asn1:"optional"`
MaxPathLen int `asn1:"optional,default:-1"`
}
// ParseRequest takes a certificate request and generates a key and
// CSR from it. It does no validation -- caveat emptor. It will,
// however, fail if the key request is not valid (i.e., an unsupported
@ -217,34 +225,11 @@ func ParseRequest(req *CertificateRequest) (csr, key []byte, err error) {
panic("Generate should have failed to produce a valid key.")
}
var tpl = x509.CertificateRequest{
Subject: req.Name(),
SignatureAlgorithm: req.KeyRequest.SigAlgo(),
}
for i := range req.Hosts {
if ip := net.ParseIP(req.Hosts[i]); ip != nil {
tpl.IPAddresses = append(tpl.IPAddresses, ip)
} else if email, err := mail.ParseAddress(req.Hosts[i]); err == nil && email != nil {
tpl.EmailAddresses = append(tpl.EmailAddresses, req.Hosts[i])
} else {
tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i])
}
}
csr, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv)
csr, err = Generate(priv.(crypto.Signer), req)
if err != nil {
log.Errorf("failed to generate a CSR: %v", err)
err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err)
return
}
block := pem.Block{
Type: "CERTIFICATE REQUEST",
Bytes: csr,
}
log.Info("encoded CSR")
csr = pem.EncodeToMemory(&block)
return
}
@ -265,6 +250,7 @@ func ExtractCertificateRequest(cert *x509.Certificate) *CertificateRequest {
// issue date and expiry date.
req.CA.Expiry = cert.NotAfter.Sub(cert.NotBefore).String()
req.CA.PathLength = cert.MaxPathLen
req.CA.PathLenZero = cert.MaxPathLenZero
}
return req
@ -377,7 +363,7 @@ func Regenerate(priv crypto.Signer, csr []byte) ([]byte, error) {
// Generate creates a new CSR from a CertificateRequest structure and
// an existing key. The KeyRequest field is ignored.
func Generate(priv crypto.Signer, req *CertificateRequest) (csr []byte, err error) {
sigAlgo := helpers.SignerAlgo(priv, crypto.SHA256)
sigAlgo := helpers.SignerAlgo(priv)
if sigAlgo == x509.UnknownSignatureAlgorithm {
return nil, cferr.New(cferr.PrivateKeyError, cferr.Unavailable)
}
@ -397,6 +383,14 @@ func Generate(priv crypto.Signer, req *CertificateRequest) (csr []byte, err erro
}
}
if req.CA != nil {
err = appendCAInfoToCSR(req.CA, &tpl)
if err != nil {
err = cferr.Wrap(cferr.CSRError, cferr.GenerationFailed, err)
return
}
}
csr, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv)
if err != nil {
log.Errorf("failed to generate a CSR: %v", err)
@ -412,3 +406,26 @@ func Generate(priv crypto.Signer, req *CertificateRequest) (csr []byte, err erro
csr = pem.EncodeToMemory(&block)
return
}
// appendCAInfoToCSR appends CAConfig BasicConstraint extension to a CSR
func appendCAInfoToCSR(reqConf *CAConfig, csr *x509.CertificateRequest) error {
pathlen := reqConf.PathLength
if pathlen == 0 && !reqConf.PathLenZero {
pathlen = -1
}
val, err := asn1.Marshal(BasicConstraints{true, pathlen})
if err != nil {
return err
}
csr.ExtraExtensions = []pkix.Extension{
{
Id: asn1.ObjectIdentifier{2, 5, 29, 19},
Value: val,
Critical: true,
},
}
return nil
}

Просмотреть файл

@ -6,6 +6,7 @@ import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"crypto/x509"
"encoding/asn1"
@ -410,7 +411,7 @@ func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error)
in = bytes.TrimSpace(in)
p, rest := pem.Decode(in)
if p != nil {
if p.Type != "CERTIFICATE REQUEST" {
if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" {
return nil, rest, cferr.New(cferr.CSRError, cferr.BadRequest)
}
@ -446,28 +447,28 @@ func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) {
return csrObject, nil
}
// SignerAlgo returns an X.509 signature algorithm corresponding to
// the crypto.Hash provided from a crypto.Signer.
func SignerAlgo(priv crypto.Signer, h crypto.Hash) x509.SignatureAlgorithm {
switch priv.Public().(type) {
// SignerAlgo returns an X.509 signature algorithm from a crypto.Signer.
func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
switch pub := priv.Public().(type) {
case *rsa.PublicKey:
switch h {
case crypto.SHA512:
bitLength := pub.N.BitLen()
switch {
case bitLength >= 4096:
return x509.SHA512WithRSA
case crypto.SHA384:
case bitLength >= 3072:
return x509.SHA384WithRSA
case crypto.SHA256:
case bitLength >= 2048:
return x509.SHA256WithRSA
default:
return x509.SHA1WithRSA
}
case *ecdsa.PublicKey:
switch h {
case crypto.SHA512:
switch pub.Curve {
case elliptic.P521():
return x509.ECDSAWithSHA512
case crypto.SHA384:
case elliptic.P384():
return x509.ECDSAWithSHA384
case crypto.SHA256:
case elliptic.P256():
return x509.ECDSAWithSHA256
default:
return x509.ECDSAWithSHA1

Просмотреть файл

@ -5,14 +5,10 @@ package initca
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
"io/ioutil"
"net"
"time"
"github.com/cloudflare/cfssl/config"
@ -47,14 +43,18 @@ func validator(req *csr.CertificateRequest) error {
// New creates a new root certificate from the certificate request.
func New(req *csr.CertificateRequest) (cert, csrPEM, key []byte, err error) {
policy := CAPolicy()
if req.CA != nil {
if req.CA.Expiry != "" {
CAPolicy.Default.ExpiryString = req.CA.Expiry
CAPolicy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry)
policy.Default.ExpiryString = req.CA.Expiry
policy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry)
}
if req.CA.PathLength != 0 {
signer.MaxPathLen = req.CA.PathLength
signer.MaxPathLen = req.CA.PathLength
if req.CA.PathLength != 0 && req.CA.PathLenZero == true {
log.Infof("ignore invalid 'pathlenzero' value")
} else {
signer.MaxPathLenZero = req.CA.PathLenZero
}
}
@ -77,7 +77,7 @@ func New(req *csr.CertificateRequest) (cert, csrPEM, key []byte, err error) {
log.Errorf("failed to create signer: %v", err)
return
}
s.SetPolicy(CAPolicy)
s.SetPolicy(policy)
signReq := signer.SignRequest{Hosts: req.Hosts, Request: string(csrPEM)}
cert, err = s.Sign(signReq)
@ -133,92 +133,35 @@ func RenewFromPEM(caFile, keyFile string) ([]byte, error) {
// NewFromSigner creates a new root certificate from a crypto.Signer.
func NewFromSigner(req *csr.CertificateRequest, priv crypto.Signer) (cert, csrPEM []byte, err error) {
policy := CAPolicy()
if req.CA != nil {
if req.CA.Expiry != "" {
CAPolicy.Default.ExpiryString = req.CA.Expiry
CAPolicy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry)
policy.Default.ExpiryString = req.CA.Expiry
policy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry)
if err != nil {
return nil, nil, err
}
}
if req.CA.PathLength != 0 {
signer.MaxPathLen = req.CA.PathLength
}
}
var sigAlgo x509.SignatureAlgorithm
switch pub := priv.Public().(type) {
case *rsa.PublicKey:
bitLength := pub.N.BitLen()
switch {
case bitLength >= 4096:
sigAlgo = x509.SHA512WithRSA
case bitLength >= 3072:
sigAlgo = x509.SHA384WithRSA
case bitLength >= 2048:
sigAlgo = x509.SHA256WithRSA
default:
sigAlgo = x509.SHA1WithRSA
}
case *ecdsa.PublicKey:
switch pub.Curve {
case elliptic.P521():
sigAlgo = x509.ECDSAWithSHA512
case elliptic.P384():
sigAlgo = x509.ECDSAWithSHA384
case elliptic.P256():
sigAlgo = x509.ECDSAWithSHA256
default:
sigAlgo = x509.ECDSAWithSHA1
}
default:
sigAlgo = x509.UnknownSignatureAlgorithm
}
var tpl = x509.CertificateRequest{
Subject: req.Name(),
SignatureAlgorithm: sigAlgo,
}
for i := range req.Hosts {
if ip := net.ParseIP(req.Hosts[i]); ip != nil {
tpl.IPAddresses = append(tpl.IPAddresses, ip)
signer.MaxPathLen = req.CA.PathLength
if req.CA.PathLength != 0 && req.CA.PathLenZero == true {
log.Infof("ignore invalid 'pathlenzero' value")
} else {
tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i])
signer.MaxPathLenZero = req.CA.PathLenZero
}
}
return signWithCSR(&tpl, priv)
}
// signWithCSR creates a new root certificate from signing a X509.CertificateRequest
// by a crypto.Signer.
func signWithCSR(tpl *x509.CertificateRequest, priv crypto.Signer) (cert, csrPEM []byte, err error) {
csrPEM, err = x509.CreateCertificateRequest(rand.Reader, tpl, priv)
csrPEM, err = csr.Generate(priv, req)
if err != nil {
log.Errorf("failed to generate a CSR: %v", err)
// The use of CertificateError was a matter of some
// debate; it is the one edge case in which a new
// error category specifically for CSRs might be
// useful, but it was deemed that one edge case did
// not a new category justify.
err = cferr.Wrap(cferr.CertificateError, cferr.BadRequest, err)
return
return nil, nil, err
}
p := &pem.Block{
Type: "CERTIFICATE REQUEST",
Bytes: csrPEM,
}
csrPEM = pem.EncodeToMemory(p)
s, err := local.NewSigner(priv, nil, signer.DefaultSigAlgo(priv), nil)
if err != nil {
log.Errorf("failed to create signer: %v", err)
return
}
s.SetPolicy(CAPolicy)
s.SetPolicy(policy)
signReq := signer.SignRequest{Request: string(csrPEM)}
cert, err = s.Sign(signReq)
@ -268,11 +211,13 @@ func RenewFromSigner(ca *x509.Certificate, priv crypto.Signer) ([]byte, error) {
}
// CAPolicy contains the CA issuing policy as default policy.
var CAPolicy = &config.Signing{
Default: &config.SigningProfile{
Usage: []string{"cert sign", "crl sign"},
ExpiryString: "43800h",
Expiry: 5 * helpers.OneYear,
CA: true,
},
var CAPolicy = func() *config.Signing {
return &config.Signing{
Default: &config.SigningProfile{
Usage: []string{"cert sign", "crl sign"},
ExpiryString: "43800h",
Expiry: 5 * helpers.OneYear,
CA: true,
},
}
}

Просмотреть файл

@ -45,12 +45,12 @@ var Level = LevelInfo
//
// SyslogWriter is satisfied by *syslog.Writer.
type SyslogWriter interface {
Debug(string) error
Info(string) error
Warning(string) error
Err(string) error
Crit(string) error
Emerg(string) error
Debug(string)
Info(string)
Warning(string)
Err(string)
Crit(string)
Emerg(string)
}
// syslogWriter stores the SetLogger() parameter.
@ -73,23 +73,19 @@ func init() {
func print(l int, msg string) {
if l >= Level {
if syslogWriter != nil {
var err error
switch l {
case LevelDebug:
err = syslogWriter.Debug(msg)
syslogWriter.Debug(msg)
case LevelInfo:
err = syslogWriter.Info(msg)
syslogWriter.Info(msg)
case LevelWarning:
err = syslogWriter.Warning(msg)
syslogWriter.Warning(msg)
case LevelError:
err = syslogWriter.Err(msg)
syslogWriter.Err(msg)
case LevelCritical:
err = syslogWriter.Crit(msg)
syslogWriter.Crit(msg)
case LevelFatal:
err = syslogWriter.Emerg(msg)
}
if err != nil {
log.Printf("Unable to write syslog: %v for msg: %s\n", err, msg)
syslogWriter.Emerg(msg)
}
} else {
log.Printf("[%s] %s", levelPrefix[l], msg)

Просмотреть файл

@ -96,7 +96,11 @@ func NewSignerFromFile(caFile, caKeyFile string, policy *config.Signing) (*Signe
}
func (s *Signer) sign(template *x509.Certificate, profile *config.SigningProfile) (cert []byte, err error) {
var distPoints = template.CRLDistributionPoints
err = signer.FillTemplate(template, s.policy.Default, profile)
if distPoints != nil && len(distPoints) > 0 {
template.CRLDistributionPoints = distPoints
}
if err != nil {
return
}
@ -111,9 +115,7 @@ func (s *Signer) sign(template *x509.Certificate, profile *config.SigningProfile
template.EmailAddresses = nil
s.ca = template
initRoot = true
template.MaxPathLen = signer.MaxPathLen
} else if template.IsCA {
template.MaxPathLen = 1
template.DNSNames = nil
template.EmailAddresses = nil
}
@ -203,7 +205,7 @@ func (s *Signer) Sign(req signer.SignRequest) (cert []byte, err error) {
return nil, cferr.New(cferr.CSRError, cferr.DecodeFailed)
}
if block.Type != "CERTIFICATE REQUEST" {
if block.Type != "NEW CERTIFICATE REQUEST" && block.Type != "CERTIFICATE REQUEST" {
return nil, cferr.Wrap(cferr.CSRError,
cferr.BadRequest, errors.New("not a certificate or csr"))
}
@ -243,6 +245,26 @@ func (s *Signer) Sign(req signer.SignRequest) (cert []byte, err error) {
}
}
if req.CRLOverride != "" {
safeTemplate.CRLDistributionPoints = []string{req.CRLOverride}
}
if safeTemplate.IsCA {
if !profile.CA {
return nil, cferr.New(cferr.CertificateError, cferr.InvalidRequest)
}
if s.ca != nil && s.ca.MaxPathLen > 0 {
if safeTemplate.MaxPathLen >= s.ca.MaxPathLen {
// do not sign a cert with pathlen > current
return nil, cferr.New(cferr.CertificateError, cferr.InvalidRequest)
}
} else if s.ca != nil && s.ca.MaxPathLen == 0 && s.ca.MaxPathLenZero {
// signer has pathlen of 0, do not sign more intermediate CAs
return nil, cferr.New(cferr.CertificateError, cferr.InvalidRequest)
}
}
OverrideHosts(&safeTemplate, req.Hosts)
safeTemplate.Subject = PopulateSubjectFromCSR(req.Subject, safeTemplate.Subject)

Просмотреть файл

@ -26,6 +26,9 @@ import (
// MaxPathLen is the default path length for a new CA certificate.
var MaxPathLen = 2
// MaxPathLenZero indicates whether a new CA certificate has pathlen=0
var MaxPathLenZero = false
// Subject contains the information that should be used to override the
// subject information when signing a certificate.
type Subject struct {
@ -50,13 +53,14 @@ type Extension struct {
// Extensions requested in the CSR are ignored, except for those processed by
// ParseCertificateRequest (mainly subjectAltName).
type SignRequest struct {
Hosts []string `json:"hosts"`
Request string `json:"certificate_request"`
Subject *Subject `json:"subject,omitempty"`
Profile string `json:"profile"`
Label string `json:"label"`
Serial *big.Int `json:"serial,omitempty"`
Extensions []Extension `json:"extensions,omitempty"`
Hosts []string `json:"hosts"`
Request string `json:"certificate_request"`
Subject *Subject `json:"subject,omitempty"`
Profile string `json:"profile"`
CRLOverride string `json:"crl_override"`
Label string `json:"label"`
Serial *big.Int `json:"serial,omitempty"`
Extensions []Extension `json:"extensions,omitempty"`
}
// appendIf appends to a if s is not an empty string.
@ -157,26 +161,46 @@ func DefaultSigAlgo(priv crypto.Signer) x509.SignatureAlgorithm {
// ParseCertificateRequest takes an incoming certificate request and
// builds a certificate template from it.
func ParseCertificateRequest(s Signer, csrBytes []byte) (template *x509.Certificate, err error) {
csr, err := x509.ParseCertificateRequest(csrBytes)
csrv, err := x509.ParseCertificateRequest(csrBytes)
if err != nil {
err = cferr.Wrap(cferr.CSRError, cferr.ParseFailed, err)
return
}
err = helpers.CheckSignature(csr, csr.SignatureAlgorithm, csr.RawTBSCertificateRequest, csr.Signature)
err = helpers.CheckSignature(csrv, csrv.SignatureAlgorithm, csrv.RawTBSCertificateRequest, csrv.Signature)
if err != nil {
err = cferr.Wrap(cferr.CSRError, cferr.KeyMismatch, err)
return
}
template = &x509.Certificate{
Subject: csr.Subject,
PublicKeyAlgorithm: csr.PublicKeyAlgorithm,
PublicKey: csr.PublicKey,
Subject: csrv.Subject,
PublicKeyAlgorithm: csrv.PublicKeyAlgorithm,
PublicKey: csrv.PublicKey,
SignatureAlgorithm: s.SigAlgo(),
DNSNames: csr.DNSNames,
IPAddresses: csr.IPAddresses,
EmailAddresses: csr.EmailAddresses,
DNSNames: csrv.DNSNames,
IPAddresses: csrv.IPAddresses,
EmailAddresses: csrv.EmailAddresses,
}
for _, val := range csrv.Extensions {
// Check the CSR for the X.509 BasicConstraints (RFC 5280, 4.2.1.9)
// extension and append to template if necessary
if val.Id.Equal(asn1.ObjectIdentifier{2, 5, 29, 19}) {
var constraints csr.BasicConstraints
var rest []byte
if rest, err = asn1.Unmarshal(val.Value, &constraints); err != nil {
return nil, cferr.Wrap(cferr.CSRError, cferr.ParseFailed, err)
} else if len(rest) != 0 {
return nil, cferr.Wrap(cferr.CSRError, cferr.ParseFailed, errors.New("x509: trailing data after X.509 BasicConstraints"))
}
template.BasicConstraintsValid = true
template.IsCA = constraints.IsCA
template.MaxPathLen = constraints.MaxPathLen
template.MaxPathLenZero = template.MaxPathLen == 0
}
}
return
@ -222,6 +246,7 @@ func FillTemplate(template *x509.Certificate, defaultProfile, profile *config.Si
notBefore time.Time
notAfter time.Time
crlURL, ocspURL string
issuerURL = profile.IssuerURL
)
// The third value returned from Usages is a list of unknown key usages.
@ -229,7 +254,7 @@ func FillTemplate(template *x509.Certificate, defaultProfile, profile *config.Si
// here.
ku, eku, _ = profile.Usages()
if profile.IssuerURL == nil {
profile.IssuerURL = defaultProfile.IssuerURL
issuerURL = defaultProfile.IssuerURL
}
if ku == 0 && len(eku) == 0 {
@ -279,8 +304,8 @@ func FillTemplate(template *x509.Certificate, defaultProfile, profile *config.Si
template.CRLDistributionPoints = []string{crlURL}
}
if len(profile.IssuerURL) != 0 {
template.IssuingCertificateURL = profile.IssuerURL
if len(issuerURL) != 0 {
template.IssuingCertificateURL = issuerURL
}
if len(profile.Policies) != 0 {
err = addPolicies(template, profile.Policies)

Просмотреть файл

@ -27,7 +27,7 @@ func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (ty
return response, err
}
// ContainerInspectWithRaw returns the container information and it's raw representation.
// ContainerInspectWithRaw returns the container information and its raw representation.
func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) {
query := url.Values{}
if getSize {

Просмотреть файл

@ -131,6 +131,11 @@ func (e nodeNotFoundError) Error() string {
return fmt.Sprintf("Error: No such node: %s", e.nodeID)
}
// NoFound indicates that this error type is of NotFound
func (e nodeNotFoundError) NotFound() bool {
return true
}
// IsErrNodeNotFound returns true if the error is caused
// when a node is not found.
func IsErrNodeNotFound(err error) bool {
@ -148,6 +153,11 @@ func (e serviceNotFoundError) Error() string {
return fmt.Sprintf("Error: No such service: %s", e.serviceID)
}
// NoFound indicates that this error type is of NotFound
func (e serviceNotFoundError) NotFound() bool {
return true
}
// IsErrServiceNotFound returns true if the error is caused
// when a service is not found.
func IsErrServiceNotFound(err error) bool {
@ -165,6 +175,11 @@ func (e taskNotFoundError) Error() string {
return fmt.Sprintf("Error: No such task: %s", e.taskID)
}
// NoFound indicates that this error type is of NotFound
func (e taskNotFoundError) NotFound() bool {
return true
}
// IsErrTaskNotFound returns true if the error is caused
// when a task is not found.
func IsErrTaskNotFound(err error) bool {

Просмотреть файл

@ -92,7 +92,7 @@ type NetworkAPIClient interface {
// NodeAPIClient defines API client methods for the nodes
type NodeAPIClient interface {
NodeInspect(ctx context.Context, nodeID string) (swarm.Node, error)
NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error)
NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error)
NodeRemove(ctx context.Context, nodeID string) error
NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error

Просмотреть файл

@ -16,7 +16,7 @@ func (cli *Client) NetworkInspect(ctx context.Context, networkID string) (types.
return networkResource, err
}
// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and it's raw representation.
// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation.
func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) {
var networkResource types.NetworkResource
resp, err := cli.get(ctx, "/networks/"+networkID, nil, nil)

Просмотреть файл

@ -1,25 +1,33 @@
package client
import (
"bytes"
"encoding/json"
"io/ioutil"
"net/http"
"github.com/docker/engine-api/types/swarm"
"golang.org/x/net/context"
)
// NodeInspect returns the node information.
func (cli *Client) NodeInspect(ctx context.Context, nodeID string) (swarm.Node, error) {
// NodeInspectWithRaw returns the node information.
func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) {
serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil)
if err != nil {
if serverResp.statusCode == http.StatusNotFound {
return swarm.Node{}, nodeNotFoundError{nodeID}
return swarm.Node{}, nil, nodeNotFoundError{nodeID}
}
return swarm.Node{}, err
return swarm.Node{}, nil, err
}
defer ensureReaderClosed(serverResp)
body, err := ioutil.ReadAll(serverResp.body)
if err != nil {
return swarm.Node{}, nil, err
}
var response swarm.Node
err = json.NewDecoder(serverResp.body).Decode(&response)
ensureReaderClosed(serverResp)
return response, err
rdr := bytes.NewReader(body)
err = json.NewDecoder(rdr).Decode(&response)
return response, body, err
}

Просмотреть файл

@ -31,6 +31,7 @@ func (cli *Client) PluginInstall(ctx context.Context, name string, options types
}
var privileges types.PluginPrivileges
if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil {
ensureReaderClosed(resp)
return err
}
ensureReaderClosed(resp)

Просмотреть файл

@ -16,7 +16,7 @@ func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Vo
return volume, err
}
// VolumeInspectWithRaw returns the information about a specific volume in the docker host and it's raw representation
// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation
func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) {
var volume types.Volume
resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil)

Просмотреть файл

@ -54,7 +54,7 @@ const (
MountPropagationSlave MountPropagation = "slave"
)
// BindOptions define options specific to mounts of type "bind".
// BindOptions defines options specific to mounts of type "bind".
type BindOptions struct {
Propagation MountPropagation `json:",omitempty"`
}

Просмотреть файл

@ -15,7 +15,7 @@ type ServiceSpec struct {
Annotations
// TaskTemplate defines how the service should construct new tasks when
// ochestrating this service.
// orchestrating this service.
TaskTemplate TaskSpec `json:",omitempty"`
Mode ServiceMode `json:",omitempty"`
UpdateConfig *UpdateConfig `json:",omitempty"`

Просмотреть файл

@ -32,7 +32,7 @@ type Policy struct {
Secret *string `json:",omitempty"`
}
// OrchestrationConfig represents ochestration configuration.
// OrchestrationConfig represents orchestration configuration.
type OrchestrationConfig struct {
TaskHistoryRetentionLimit int64 `json:",omitempty"`
}
@ -54,6 +54,20 @@ type DispatcherConfig struct {
// CAConfig represents CA configuration.
type CAConfig struct {
NodeCertExpiry time.Duration `json:",omitempty"`
ExternalCAs []*ExternalCA `json:",omitempty"`
}
// ExternalCAProtocol represents type of external CA.
type ExternalCAProtocol string
// ExternalCAProtocolCFSSL CFSSL
const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl"
// ExternalCA defines external CA to be used by the cluster.
type ExternalCA struct {
Protocol ExternalCAProtocol
URL string
Options map[string]string `json:",omitempty"`
}
// InitRequest is the request used to init a swarm.

Просмотреть файл

@ -504,10 +504,6 @@ type Checkpoint struct {
Name string // Name is the name of the checkpoint
}
// DefaultRuntimeName is the reserved name/alias used to represent the
// OCI runtime being shipped with the docker daemon package.
var DefaultRuntimeName = "default"
// Runtime describes an OCI runtime
type Runtime struct {
Path string `json:"path"`

Просмотреть файл

@ -102,7 +102,7 @@ func (c *controller) handleKeyChange(keys []*types.EncryptionKey) error {
deleted = cKey.Key
}
if cKey.Subsystem == subsysGossip /* subsysIPSec */ {
if cKey.Subsystem == subsysIPSec {
drvEnc.Prune = cKey.Key
drvEnc.PruneTag = cKey.LamportTime
}
@ -128,7 +128,7 @@ func (c *controller) handleKeyChange(keys []*types.EncryptionKey) error {
a.networkDB.SetKey(key.Key)
}
if key.Subsystem == subsysGossip /*subsysIPSec*/ {
if key.Subsystem == subsysIPSec {
drvEnc.Key = key.Key
drvEnc.Tag = key.LamportTime
}
@ -138,7 +138,7 @@ func (c *controller) handleKeyChange(keys []*types.EncryptionKey) error {
key, tag := c.getPrimaryKeyTag(subsysGossip)
a.networkDB.SetPrimaryKey(key)
//key, tag = c.getPrimaryKeyTag(subsysIPSec)
key, tag = c.getPrimaryKeyTag(subsysIPSec)
drvEnc.Primary = key
drvEnc.PrimaryTag = tag
@ -317,17 +317,12 @@ func (c *controller) agentInit(bindAddrOrInterface string) error {
return nil
}
drvEnc := discoverapi.DriverEncryptionConfig{}
keys, tags := c.getKeys(subsysGossip) // getKeys(subsysIPSec)
drvEnc.Keys = keys
drvEnc.Tags = tags
bindAddr, err := resolveAddr(bindAddrOrInterface)
if err != nil {
return err
}
keys, tags := c.getKeys(subsysGossip)
hostname, _ := os.Hostname()
nDB, err := networkdb.New(&networkdb.Config{
BindAddr: bindAddr,
@ -350,6 +345,11 @@ func (c *controller) agentInit(bindAddrOrInterface string) error {
go c.handleTableEvents(ch, c.handleEpTableEvent)
drvEnc := discoverapi.DriverEncryptionConfig{}
keys, tags = c.getKeys(subsysIPSec)
drvEnc.Keys = keys
drvEnc.Tags = tags
c.drvRegistry.WalkDrivers(func(name string, driver driverapi.Driver, capability driverapi.Capability) bool {
err := driver.DiscoverNew(discoverapi.EncryptionKeysConfig, drvEnc)
if err != nil {
@ -380,7 +380,7 @@ func (c *controller) agentDriverNotify(d driverapi.Driver) {
})
drvEnc := discoverapi.DriverEncryptionConfig{}
keys, tags := c.getKeys(subsysGossip) // getKeys(subsysIPSec)
keys, tags := c.getKeys(subsysIPSec)
drvEnc.Keys = keys
drvEnc.Tags = tags

Просмотреть файл

@ -144,7 +144,7 @@ type controller struct {
unWatchCh chan *endpoint
svcRecords map[string]svcInfo
nmap map[string]*netWatch
serviceBindings map[string]*service
serviceBindings map[serviceKey]*service
defOsSbox osl.Sandbox
ingressSandbox *sandbox
sboxOnce sync.Once
@ -167,7 +167,7 @@ func New(cfgOptions ...config.Option) (NetworkController, error) {
cfg: config.ParseConfigOptions(cfgOptions...),
sandboxes: sandboxTable{},
svcRecords: make(map[string]svcInfo),
serviceBindings: make(map[string]*service),
serviceBindings: make(map[serviceKey]*service),
agentInitDone: make(chan struct{}),
}

Просмотреть файл

@ -54,11 +54,12 @@ var (
)
type datastore struct {
scope string
store store.Store
cache *cache
watchCh chan struct{}
active bool
scope string
store store.Store
cache *cache
watchCh chan struct{}
active bool
sequential bool
sync.Mutex
}
@ -190,6 +191,10 @@ func newClient(scope string, kv string, addr string, config *store.Config, cache
if cached && scope != LocalScope {
return nil, fmt.Errorf("caching supported only for scope %s", LocalScope)
}
sequential := false
if scope == LocalScope {
sequential = true
}
if config == nil {
config = &store.Config{}
@ -216,7 +221,7 @@ func newClient(scope string, kv string, addr string, config *store.Config, cache
return nil, err
}
ds := &datastore{scope: scope, store: store, active: true, watchCh: make(chan struct{})}
ds := &datastore{scope: scope, store: store, active: true, watchCh: make(chan struct{}), sequential: sequential}
if cached {
ds.cache = newCache(ds)
}
@ -375,8 +380,10 @@ func (ds *datastore) PutObjectAtomic(kvObject KVObject) error {
pair *store.KVPair
err error
)
ds.Lock()
defer ds.Unlock()
if ds.sequential {
ds.Lock()
defer ds.Unlock()
}
if kvObject == nil {
return types.BadRequestErrorf("invalid KV Object : nil")
@ -420,8 +427,10 @@ add_cache:
// PutObject adds a new Record based on an object into the datastore
func (ds *datastore) PutObject(kvObject KVObject) error {
ds.Lock()
defer ds.Unlock()
if ds.sequential {
ds.Lock()
defer ds.Unlock()
}
if kvObject == nil {
return types.BadRequestErrorf("invalid KV Object : nil")
@ -456,8 +465,10 @@ func (ds *datastore) putObjectWithKey(kvObject KVObject, key ...string) error {
// GetObject returns a record matching the key
func (ds *datastore) GetObject(key string, o KVObject) error {
ds.Lock()
defer ds.Unlock()
if ds.sequential {
ds.Lock()
defer ds.Unlock()
}
if ds.cache != nil {
return ds.cache.get(key, o)
@ -490,8 +501,10 @@ func (ds *datastore) ensureParent(parent string) error {
}
func (ds *datastore) List(key string, kvObject KVObject) ([]KVObject, error) {
ds.Lock()
defer ds.Unlock()
if ds.sequential {
ds.Lock()
defer ds.Unlock()
}
if ds.cache != nil {
return ds.cache.list(kvObject)
@ -536,8 +549,10 @@ func (ds *datastore) List(key string, kvObject KVObject) ([]KVObject, error) {
// DeleteObject unconditionally deletes a record from the store
func (ds *datastore) DeleteObject(kvObject KVObject) error {
ds.Lock()
defer ds.Unlock()
if ds.sequential {
ds.Lock()
defer ds.Unlock()
}
// cleaup the cache first
if ds.cache != nil {
@ -555,8 +570,10 @@ func (ds *datastore) DeleteObject(kvObject KVObject) error {
// DeleteObjectAtomic performs atomic delete on a record
func (ds *datastore) DeleteObjectAtomic(kvObject KVObject) error {
ds.Lock()
defer ds.Unlock()
if ds.sequential {
ds.Lock()
defer ds.Unlock()
}
if kvObject == nil {
return types.BadRequestErrorf("invalid KV Object : nil")
@ -588,8 +605,10 @@ del_cache:
// DeleteTree unconditionally deletes a record from the store
func (ds *datastore) DeleteTree(kvObject KVObject) error {
ds.Lock()
defer ds.Unlock()
if ds.sequential {
ds.Lock()
defer ds.Unlock()
}
// cleaup the cache first
if ds.cache != nil {

Просмотреть файл

@ -1020,7 +1020,7 @@ func (d *driver) CreateEndpoint(nid, eid string, ifInfo driverapi.InterfaceInfo,
}
if err = d.storeUpdate(endpoint); err != nil {
return fmt.Errorf("failed to save bridge endpoint %s to store: %v", ep.id[0:7], err)
return fmt.Errorf("failed to save bridge endpoint %s to store: %v", endpoint.id[0:7], err)
}
return nil

Просмотреть файл

@ -183,7 +183,7 @@ func (ieie InvalidEndpointIDError) BadRequest() {}
type InvalidSandboxIDError string
func (isie InvalidSandboxIDError) Error() string {
return fmt.Sprintf("invalid sanbox id: %s", string(isie))
return fmt.Sprintf("invalid sandbox id: %s", string(isie))
}
// BadRequest denotes the type of this error

Просмотреть файл

@ -2,7 +2,6 @@ package ovmanager
import (
"fmt"
"log"
"net"
"strconv"
"strings"
@ -20,7 +19,7 @@ import (
const (
networkType = "overlay"
vxlanIDStart = 256
vxlanIDEnd = 1000
vxlanIDEnd = (1 << 24) - 1
)
type networkTable map[string]*network
@ -111,7 +110,8 @@ func (d *driver) NetworkAllocate(id string, option map[string]string, ipV4Data,
}
if err := n.obtainVxlanID(s); err != nil {
log.Printf("Could not obtain vxlan id for pool %s: %v", s.subnetIP, err)
n.releaseVxlanID()
return nil, fmt.Errorf("could not obtain vxlan id for pool %s: %v", s.subnetIP, err)
}
n.subnets = append(n.subnets, s)

Просмотреть файл

@ -294,7 +294,7 @@ func (d *driver) Type() string {
// DiscoverNew is a notification for a new discovery event, such as a new node joining a cluster
func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{}) error {
if dType != discoverapi.NodeDiscovery {
return fmt.Errorf("Unknown discovery type : %v", dType)
return nil
}
notif := &api.DiscoveryNotification{
DiscoveryType: dType,
@ -306,7 +306,7 @@ func (d *driver) DiscoverNew(dType discoverapi.DiscoveryType, data interface{})
// DiscoverDelete is a notification for a discovery delete event, such as a node leaving a cluster
func (d *driver) DiscoverDelete(dType discoverapi.DiscoveryType, data interface{}) error {
if dType != discoverapi.NodeDiscovery {
return fmt.Errorf("Unknown discovery type : %v", dType)
return nil
}
notif := &api.DiscoveryNotification{
DiscoveryType: dType,

Просмотреть файл

@ -722,18 +722,6 @@ func (ep *endpoint) sbLeave(sb *sandbox, force bool, options ...EndpointOption)
return nil
}
func (n *network) validateForceDelete(locator string) error {
if n.Scope() == datastore.LocalScope {
return nil
}
if locator == "" {
return fmt.Errorf("invalid endpoint locator identifier")
}
return nil
}
func (ep *endpoint) Delete(force bool) error {
var err error
n, err := ep.getNetworkFromStore()
@ -750,15 +738,8 @@ func (ep *endpoint) Delete(force bool) error {
epid := ep.id
name := ep.name
sbid := ep.sandboxID
locator := ep.locator
ep.Unlock()
if force {
if err = n.validateForceDelete(locator); err != nil {
return fmt.Errorf("unable to force delete endpoint %s: %v", name, err)
}
}
sb, _ := n.getController().SandboxByID(sbid)
if sb != nil && !force {
return &ActiveContainerError{name: name, id: epid}

Просмотреть файл

@ -83,17 +83,29 @@ func (n *networkNamespace) programGateway(gw net.IP, isAdd bool) error {
return fmt.Errorf("route for the gateway %s could not be found: %v", gw, err)
}
var linkIndex int
for _, gwRoute := range gwRoutes {
if gwRoute.Gw == nil {
linkIndex = gwRoute.LinkIndex
break
}
}
if linkIndex == 0 {
return fmt.Errorf("Direct route for the gateway %s could not be found", gw)
}
if isAdd {
return n.nlHandle.RouteAdd(&netlink.Route{
Scope: netlink.SCOPE_UNIVERSE,
LinkIndex: gwRoutes[0].LinkIndex,
LinkIndex: linkIndex,
Gw: gw,
})
}
return n.nlHandle.RouteDel(&netlink.Route{
Scope: netlink.SCOPE_UNIVERSE,
LinkIndex: gwRoutes[0].LinkIndex,
LinkIndex: linkIndex,
Gw: gw,
})
}

Просмотреть файл

@ -1,6 +1,7 @@
package libnetwork
import (
"fmt"
"net"
"sync"
)
@ -12,6 +13,27 @@ var (
fwMarkCtrMu sync.Mutex
)
type portConfigs []*PortConfig
func (p portConfigs) String() string {
if len(p) == 0 {
return ""
}
pc := p[0]
str := fmt.Sprintf("%d:%d/%s", pc.PublishedPort, pc.TargetPort, PortConfig_Protocol_name[int32(pc.Protocol)])
for _, pc := range p[1:] {
str = str + fmt.Sprintf(",%d:%d/%s", pc.PublishedPort, pc.TargetPort, PortConfig_Protocol_name[int32(pc.Protocol)])
}
return str
}
type serviceKey struct {
id string
ports string
}
type service struct {
name string // Service Name
id string // Service ID
@ -21,7 +43,7 @@ type service struct {
loadBalancers map[string]*loadBalancer
// List of ingress ports exposed by the service
ingressPorts []*PortConfig
ingressPorts portConfigs
sync.Mutex
}

Просмотреть файл

@ -48,13 +48,18 @@ func (c *controller) addServiceBinding(name, sid, nid, eid string, vip net.IP, i
return err
}
skey := serviceKey{
id: sid,
ports: portConfigs(ingressPorts).String(),
}
c.Lock()
s, ok := c.serviceBindings[sid]
s, ok := c.serviceBindings[skey]
if !ok {
// Create a new service if we are seeing this service
// for the first time.
s = newService(name, sid, ingressPorts)
c.serviceBindings[sid] = s
c.serviceBindings[skey] = s
}
c.Unlock()
@ -121,8 +126,13 @@ func (c *controller) rmServiceBinding(name, sid, nid, eid string, vip net.IP, in
return err
}
skey := serviceKey{
id: sid,
ports: portConfigs(ingressPorts).String(),
}
c.Lock()
s, ok := c.serviceBindings[sid]
s, ok := c.serviceBindings[skey]
if !ok {
c.Unlock()
return nil
@ -135,22 +145,19 @@ func (c *controller) rmServiceBinding(name, sid, nid, eid string, vip net.IP, in
n.(*network).deleteSvcRecords("tasks."+alias, ip, nil, false)
}
// Make sure to remove the right IP since if vip is
// not valid we would have added a DNS RR record.
svcIP := vip
if len(svcIP) == 0 {
svcIP = ip
}
n.(*network).deleteSvcRecords(name, svcIP, nil, false)
for _, alias := range aliases {
n.(*network).deleteSvcRecords(alias, svcIP, nil, false)
// If we are doing DNS RR add the endpoint IP to DNS record
// right away.
if len(vip) == 0 {
n.(*network).deleteSvcRecords(name, ip, nil, false)
for _, alias := range aliases {
n.(*network).deleteSvcRecords(alias, ip, nil, false)
}
}
s.Lock()
defer s.Unlock()
lb, ok := s.loadBalancers[nid]
if !ok {
s.Unlock()
return nil
}
@ -167,7 +174,7 @@ func (c *controller) rmServiceBinding(name, sid, nid, eid string, vip net.IP, in
if len(s.loadBalancers) == 0 {
// All loadbalancers for the service removed. Time to
// remove the service itself.
delete(c.serviceBindings, sid)
delete(c.serviceBindings, skey)
}
// Remove loadbalancer service(if needed) and backend in all
@ -175,6 +182,15 @@ func (c *controller) rmServiceBinding(name, sid, nid, eid string, vip net.IP, in
if len(vip) != 0 {
n.(*network).rmLBBackend(ip, vip, lb.fwMark, ingressPorts, rmService)
}
s.Unlock()
// Remove the DNS record for VIP only if we are removing the service
if rmService && len(vip) != 0 {
n.(*network).deleteSvcRecords(name, vip, nil, false)
for _, alias := range aliases {
n.(*network).deleteSvcRecords(alias, vip, nil, false)
}
}
return nil
}
@ -314,7 +330,7 @@ func (sb *sandbox) addLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*P
if addService {
var iPorts []*PortConfig
if sb.ingress {
iPorts = ingressPorts
iPorts = filterPortConfigs(ingressPorts, false)
if err := programIngress(gwIP, iPorts, false); err != nil {
logrus.Errorf("Failed to add ingress: %v", err)
return
@ -383,7 +399,7 @@ func (sb *sandbox) rmLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*Po
var iPorts []*PortConfig
if sb.ingress {
iPorts = ingressPorts
iPorts = filterPortConfigs(ingressPorts, true)
if err := programIngress(gwIP, iPorts, true); err != nil {
logrus.Errorf("Failed to delete ingress: %v", err)
}
@ -401,8 +417,47 @@ var (
ingressOnce sync.Once
ingressProxyMu sync.Mutex
ingressProxyTbl = make(map[string]io.Closer)
portConfigMu sync.Mutex
portConfigTbl = make(map[PortConfig]int)
)
func filterPortConfigs(ingressPorts []*PortConfig, isDelete bool) []*PortConfig {
portConfigMu.Lock()
iPorts := make([]*PortConfig, 0, len(ingressPorts))
for _, pc := range ingressPorts {
if isDelete {
if cnt, ok := portConfigTbl[*pc]; ok {
// This is the last reference to this
// port config. Delete the port config
// and add it to filtered list to be
// plumbed.
if cnt == 1 {
delete(portConfigTbl, *pc)
iPorts = append(iPorts, pc)
continue
}
portConfigTbl[*pc] = cnt - 1
}
continue
}
if cnt, ok := portConfigTbl[*pc]; ok {
portConfigTbl[*pc] = cnt + 1
continue
}
// We are adding it for the first time. Add it to the
// filter list to be plumbed.
portConfigTbl[*pc] = 1
iPorts = append(iPorts, pc)
}
portConfigMu.Unlock()
return iPorts
}
func programIngress(gwIP net.IP, ingressPorts []*PortConfig, isDelete bool) error {
addDelOpt := "-I"
if isDelete {

Просмотреть файл

@ -4,7 +4,6 @@ import (
"fmt"
"math/rand"
"reflect"
"sync"
"time"
"github.com/docker/swarmkit/api"
@ -37,7 +36,6 @@ type Agent struct {
stopped chan struct{} // requests shutdown
closed chan struct{} // only closed in run
err error // read only after closed is closed
mu sync.Mutex
}
// New returns a new agent, ready for task dispatch.

Просмотреть файл

@ -55,7 +55,7 @@ type ContainerStatuser interface {
// correct status depending on the tasks current state according to the result.
//
// Unlike Do, if an error is returned, the status should still be reported. The
// error merely reports the
// error merely reports the failure at getting the controller.
func Resolve(ctx context.Context, task *api.Task, executor Executor) (Controller, *api.TaskStatus, error) {
status := task.Status.Copy()

Просмотреть файл

@ -10,7 +10,7 @@ type Executor interface {
// Describe returns the underlying node description.
Describe(ctx context.Context) (*api.NodeDescription, error)
// Configure uses the node object state to propogate node
// Configure uses the node object state to propagate node
// state to the underlying executor.
Configure(ctx context.Context, node *api.Node) error

Просмотреть файл

@ -49,6 +49,10 @@ type NodeConfig struct {
// Secret to be used on the first certificate request.
Secret string
// ExternalCAs is a list of CAs to which a manager node
// will make certificate signing requests for node certificates.
ExternalCAs []*api.ExternalCA
// ForceNewCluster creates a new cluster from current raft state.
ForceNewCluster bool
@ -81,7 +85,6 @@ type Node struct {
config *NodeConfig
remotes *persistentRemotes
role string
roleCond *sync.Cond
conn *grpc.ClientConn
connCond *sync.Cond
nodeID string
@ -95,6 +98,7 @@ type Node struct {
agent *Agent
manager *manager.Manager
roleChangeReq chan api.NodeRole // used to send role updates from the dispatcher api on promotion/demotion
managerRoleCh chan struct{}
}
// NewNode returns new Node instance.
@ -124,8 +128,8 @@ func NewNode(c *NodeConfig) (*Node, error) {
ready: make(chan struct{}),
certificateRequested: make(chan struct{}),
roleChangeReq: make(chan api.NodeRole, 1),
managerRoleCh: make(chan struct{}, 32), // 32 just for the case
}
n.roleCond = sync.NewCond(n.RLocker())
n.connCond = sync.NewCond(n.RLocker())
if err := n.loadCertificates(); err != nil {
return nil, err
@ -174,6 +178,8 @@ func (n *Node) run(ctx context.Context) (err error) {
}
}()
// NOTE: When this node is created by NewNode(), our nodeID is set if
// n.loadCertificates() succeeded in loading TLS credentials.
if n.config.JoinAddr == "" && n.nodeID == "" {
if err := n.bootstrapCA(); err != nil {
return err
@ -234,6 +240,10 @@ func (n *Node) run(ctx context.Context) (err error) {
return err
}
if n.role == ca.ManagerRole {
n.managerRoleCh <- struct{}{}
}
forceCertRenewal := make(chan struct{})
go func() {
n.RLock()
@ -270,7 +280,9 @@ func (n *Node) run(ctx context.Context) (err error) {
}
n.Lock()
n.role = certUpdate.Role
n.roleCond.Broadcast()
if n.role == ca.ManagerRole {
n.managerRoleCh <- struct{}{}
}
n.Unlock()
case <-ctx.Done():
return
@ -419,34 +431,6 @@ func (n *Node) CertificateRequested() <-chan struct{} {
return n.certificateRequested
}
func (n *Node) waitRole(ctx context.Context, role string) <-chan struct{} {
c := make(chan struct{})
n.roleCond.L.Lock()
if role == n.role {
close(c)
n.roleCond.L.Unlock()
return c
}
go func() {
select {
case <-ctx.Done():
n.roleCond.Broadcast()
case <-c:
}
}()
go func() {
defer n.roleCond.L.Unlock()
defer close(c)
for role != n.role {
n.roleCond.Wait()
if ctx.Err() != nil {
return
}
}
}()
return c
}
func (n *Node) setControlSocket(conn *grpc.ClientConn) {
n.Lock()
n.conn = conn
@ -549,7 +533,6 @@ func (n *Node) loadCertificates() error {
n.role = clientTLSCreds.Role()
n.nodeID = clientTLSCreds.NodeID()
n.nodeMembership = api.NodeMembershipAccepted
n.roleCond.Broadcast()
n.Unlock()
return nil
@ -599,10 +582,17 @@ func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig
select {
case <-ctx.Done():
return ctx.Err()
case <-n.waitRole(ctx, ca.ManagerRole):
case <-n.managerRoleCh:
if ctx.Err() != nil {
return ctx.Err()
}
n.Lock()
// in case if we missed some notifications
if n.role != ca.ManagerRole {
n.Unlock()
continue
}
n.Unlock()
remoteAddr, _ := n.remotes.Select(n.nodeID)
m, err := manager.New(&manager.Config{
ForceNewCluster: n.config.ForceNewCluster,
@ -611,6 +601,7 @@ func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig
"unix": n.config.ListenControlAPI,
},
SecurityConfig: securityConfig,
ExternalCAs: n.config.ExternalCAs,
JoinRaft: remoteAddr.Addr,
StateDir: n.config.StateDir,
HeartbeatTick: n.config.HeartbeatTick,
@ -629,17 +620,21 @@ func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig
n.manager = m
n.Unlock()
go n.initManagerConnection(ctx, ready)
connCtx, connCancel := context.WithCancel(ctx)
go n.initManagerConnection(connCtx, ready)
go func() {
select {
case <-ready:
case <-ctx.Done():
}
if ctx.Err() == nil {
n.remotes.Observe(api.Peer{NodeID: n.nodeID, Addr: n.config.ListenRemoteAPI}, 5)
}
}()
// this happens only on initial start
if ready != nil {
go func(ready chan struct{}) {
select {
case <-ready:
n.remotes.Observe(api.Peer{NodeID: n.nodeID, Addr: n.config.ListenRemoteAPI}, 5)
case <-connCtx.Done():
}
}(ready)
}
ready = nil
select {
case <-ctx.Done():
@ -648,8 +643,8 @@ func (n *Node) runManager(ctx context.Context, securityConfig *ca.SecurityConfig
// in case of demotion manager will stop itself
case <-done:
}
connCancel()
ready = nil // ready event happens once, even on multiple starts
n.Lock()
n.manager = nil
if n.conn != nil {
@ -669,7 +664,6 @@ type persistentRemotes struct {
c *sync.Cond
picker.Remotes
storePath string
ch []chan api.Peer
lastSavedState []api.Peer
}

Просмотреть файл

@ -26,7 +26,6 @@ var (
// agent through errs, messages and tasks.
type session struct {
agent *Agent
nodeID string
sessionID string
session api.Dispatcher_SessionClient
errs chan error

Просмотреть файл

@ -1,8 +1,6 @@
package agent
import (
"bytes"
"github.com/boltdb/bolt"
"github.com/docker/swarmkit/api"
"github.com/gogo/protobuf/proto"
@ -22,12 +20,6 @@ var (
bucketKeyStatus = []byte("status")
)
type bucketKeyPath [][]byte
func (bk bucketKeyPath) String() string {
return string(bytes.Join([][]byte(bk), []byte("/")))
}
// InitDB prepares a database for writing task data.
//
// Proper buckets will be created if they don't already exist.

8
vendor/src/github.com/docker/swarmkit/api/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,8 @@
### Notice
Do not change .pb.go files directly. You need to change the corresponding .proto files and run the following command to regenerate the .pb.go files.
```
$ make generate
```
Click [here](https://github.com/google/protobuf) for more information about protobuf.

Просмотреть файл

@ -1,3 +1,3 @@
package api
//go:generate protoc -I.:../protobuf:../vendor:../vendor/github.com/gogo/protobuf --gogoswarm_out=plugins=grpc+deepcopy+raftproxy+authenticatedwrapper,import_path=github.com/docker/swarmkit/api,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto,Mtimestamp/timestamp.proto=github.com/docker/swarmkit/api/timestamp,Mduration/duration.proto=github.com/docker/swarmkit/api/duration,Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor,Mplugin/plugin.proto=github.com/docker/swarmkit/protobuf/plugin:. types.proto specs.proto objects.proto control.proto dispatcher.proto ca.proto snapshot.proto raft.proto
//go:generate protoc -I.:../protobuf:../vendor:../vendor/github.com/gogo/protobuf --gogoswarm_out=plugins=grpc+deepcopy+raftproxy+authenticatedwrapper,import_path=github.com/docker/swarmkit/api,Mgogoproto/gogo.proto=github.com/gogo/protobuf/gogoproto,Mtimestamp/timestamp.proto=github.com/docker/swarmkit/api/timestamp,Mduration/duration.proto=github.com/docker/swarmkit/api/duration,Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor,Mplugin/plugin.proto=github.com/docker/swarmkit/protobuf/plugin:. types.proto specs.proto objects.proto control.proto dispatcher.proto ca.proto snapshot.proto raft.proto health.proto

714
vendor/src/github.com/docker/swarmkit/api/health.pb.go поставляемый Normal file
Просмотреть файл

@ -0,0 +1,714 @@
// Code generated by protoc-gen-gogo.
// source: health.proto
// DO NOT EDIT!
package api
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import _ "github.com/docker/swarmkit/protobuf/plugin"
import strings "strings"
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
import sort "sort"
import strconv "strconv"
import reflect "reflect"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
import raftpicker "github.com/docker/swarmkit/manager/raftpicker"
import codes "google.golang.org/grpc/codes"
import metadata "google.golang.org/grpc/metadata"
import transport "google.golang.org/grpc/transport"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
type HealthCheckResponse_ServingStatus int32
const (
HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0
HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1
HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2
)
var HealthCheckResponse_ServingStatus_name = map[int32]string{
0: "UNKNOWN",
1: "SERVING",
2: "NOT_SERVING",
}
var HealthCheckResponse_ServingStatus_value = map[string]int32{
"UNKNOWN": 0,
"SERVING": 1,
"NOT_SERVING": 2,
}
func (x HealthCheckResponse_ServingStatus) String() string {
return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x))
}
func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) {
return fileDescriptorHealth, []int{1, 0}
}
type HealthCheckRequest struct {
Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
}
func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} }
func (*HealthCheckRequest) ProtoMessage() {}
func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptorHealth, []int{0} }
type HealthCheckResponse struct {
Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=docker.swarmkit.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"`
}
func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} }
func (*HealthCheckResponse) ProtoMessage() {}
func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptorHealth, []int{1} }
func init() {
proto.RegisterType((*HealthCheckRequest)(nil), "docker.swarmkit.v1.HealthCheckRequest")
proto.RegisterType((*HealthCheckResponse)(nil), "docker.swarmkit.v1.HealthCheckResponse")
proto.RegisterEnum("docker.swarmkit.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value)
}
type authenticatedWrapperHealthServer struct {
local HealthServer
authorize func(context.Context, []string) error
}
func NewAuthenticatedWrapperHealthServer(local HealthServer, authorize func(context.Context, []string) error) HealthServer {
return &authenticatedWrapperHealthServer{
local: local,
authorize: authorize,
}
}
func (p *authenticatedWrapperHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) {
if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil {
return nil, err
}
return p.local.Check(ctx, r)
}
func (m *HealthCheckRequest) Copy() *HealthCheckRequest {
if m == nil {
return nil
}
o := &HealthCheckRequest{
Service: m.Service,
}
return o
}
func (m *HealthCheckResponse) Copy() *HealthCheckResponse {
if m == nil {
return nil
}
o := &HealthCheckResponse{
Status: m.Status,
}
return o
}
func (this *HealthCheckRequest) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&api.HealthCheckRequest{")
s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *HealthCheckResponse) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&api.HealthCheckResponse{")
s = append(s, "Status: "+fmt.Sprintf("%#v", this.Status)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringHealth(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
func extensionToGoStringHealth(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
if e == nil {
return "nil"
}
s := "map[int32]proto.Extension{"
keys := make([]int, 0, len(e))
for k := range e {
keys = append(keys, int(k))
}
sort.Ints(keys)
ss := []string{}
for _, k := range keys {
ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
}
s += strings.Join(ss, ",") + "}"
return s
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion2
// Client API for Health service
type HealthClient interface {
Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error)
}
type healthClient struct {
cc *grpc.ClientConn
}
func NewHealthClient(cc *grpc.ClientConn) HealthClient {
return &healthClient{cc}
}
func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) {
out := new(HealthCheckResponse)
err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Health/Check", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Health service
type HealthServer interface {
Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error)
}
func RegisterHealthServer(s *grpc.Server, srv HealthServer) {
s.RegisterService(&_Health_serviceDesc, srv)
}
func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(HealthCheckRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(HealthServer).Check(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/docker.swarmkit.v1.Health/Check",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest))
}
return interceptor(ctx, in, info, handler)
}
var _Health_serviceDesc = grpc.ServiceDesc{
ServiceName: "docker.swarmkit.v1.Health",
HandlerType: (*HealthServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Check",
Handler: _Health_Check_Handler,
},
},
Streams: []grpc.StreamDesc{},
}
func (m *HealthCheckRequest) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *HealthCheckRequest) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Service) > 0 {
data[i] = 0xa
i++
i = encodeVarintHealth(data, i, uint64(len(m.Service)))
i += copy(data[i:], m.Service)
}
return i, nil
}
func (m *HealthCheckResponse) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *HealthCheckResponse) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Status != 0 {
data[i] = 0x8
i++
i = encodeVarintHealth(data, i, uint64(m.Status))
}
return i, nil
}
func encodeFixed64Health(data []byte, offset int, v uint64) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
data[offset+4] = uint8(v >> 32)
data[offset+5] = uint8(v >> 40)
data[offset+6] = uint8(v >> 48)
data[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Health(data []byte, offset int, v uint32) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintHealth(data []byte, offset int, v uint64) int {
for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
data[offset] = uint8(v)
return offset + 1
}
type raftProxyHealthServer struct {
local HealthServer
connSelector *raftpicker.ConnSelector
cluster raftpicker.RaftCluster
ctxMods []func(context.Context) (context.Context, error)
}
func NewRaftProxyHealthServer(local HealthServer, connSelector *raftpicker.ConnSelector, cluster raftpicker.RaftCluster, ctxMod func(context.Context) (context.Context, error)) HealthServer {
redirectChecker := func(ctx context.Context) (context.Context, error) {
s, ok := transport.StreamFromContext(ctx)
if !ok {
return ctx, grpc.Errorf(codes.InvalidArgument, "remote addr is not found in context")
}
addr := s.ServerTransport().RemoteAddr().String()
md, ok := metadata.FromContext(ctx)
if ok && len(md["redirect"]) != 0 {
return ctx, grpc.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"])
}
if !ok {
md = metadata.New(map[string]string{})
}
md["redirect"] = append(md["redirect"], addr)
return metadata.NewContext(ctx, md), nil
}
mods := []func(context.Context) (context.Context, error){redirectChecker}
mods = append(mods, ctxMod)
return &raftProxyHealthServer{
local: local,
cluster: cluster,
connSelector: connSelector,
ctxMods: mods,
}
}
func (p *raftProxyHealthServer) runCtxMods(ctx context.Context) (context.Context, error) {
var err error
for _, mod := range p.ctxMods {
ctx, err = mod(ctx)
if err != nil {
return ctx, err
}
}
return ctx, nil
}
func (p *raftProxyHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) {
if p.cluster.IsLeader() {
return p.local.Check(ctx, r)
}
ctx, err := p.runCtxMods(ctx)
if err != nil {
return nil, err
}
conn, err := p.connSelector.Conn()
if err != nil {
return nil, err
}
return NewHealthClient(conn).Check(ctx, r)
}
func (m *HealthCheckRequest) Size() (n int) {
var l int
_ = l
l = len(m.Service)
if l > 0 {
n += 1 + l + sovHealth(uint64(l))
}
return n
}
func (m *HealthCheckResponse) Size() (n int) {
var l int
_ = l
if m.Status != 0 {
n += 1 + sovHealth(uint64(m.Status))
}
return n
}
func sovHealth(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozHealth(x uint64) (n int) {
return sovHealth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *HealthCheckRequest) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&HealthCheckRequest{`,
`Service:` + fmt.Sprintf("%v", this.Service) + `,`,
`}`,
}, "")
return s
}
func (this *HealthCheckResponse) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&HealthCheckResponse{`,
`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
`}`,
}, "")
return s
}
func valueToStringHealth(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *HealthCheckRequest) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowHealth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: HealthCheckRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: HealthCheckRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowHealth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthHealth
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Service = string(data[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipHealth(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthHealth
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *HealthCheckResponse) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowHealth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: HealthCheckResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: HealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
}
m.Status = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowHealth
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Status |= (HealthCheckResponse_ServingStatus(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipHealth(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthHealth
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipHealth(data []byte) (n int, err error) {
l := len(data)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowHealth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowHealth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if data[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowHealth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthHealth
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowHealth
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipHealth(data[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthHealth = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowHealth = fmt.Errorf("proto: integer overflow")
)
var fileDescriptorHealth = []byte{
// 284 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0xc9, 0x48, 0x4d, 0xcc,
0x29, 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4a, 0xc9, 0x4f, 0xce, 0x4e, 0x2d,
0xd2, 0x2b, 0x2e, 0x4f, 0x2c, 0xca, 0xcd, 0xce, 0x2c, 0xd1, 0x2b, 0x33, 0x94, 0x12, 0x49, 0xcf,
0x4f, 0xcf, 0x07, 0x4b, 0xeb, 0x83, 0x58, 0x10, 0x95, 0x52, 0xc2, 0x05, 0x39, 0xa5, 0xe9, 0x99,
0x79, 0xfa, 0x10, 0x0a, 0x22, 0xa8, 0xa4, 0xc7, 0x25, 0xe4, 0x01, 0x36, 0xce, 0x39, 0x23, 0x35,
0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, 0x82, 0x8b, 0xbd, 0x38, 0xb5, 0xa8,
0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc6, 0x55, 0x5a, 0xc0, 0xc8,
0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0xc8, 0x97, 0x8b, 0xad, 0xb8, 0x24,
0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x54, 0x0f, 0xd3, 0x5d, 0x7a, 0x58, 0x34, 0xea,
0x05, 0x83, 0x0c, 0xce, 0x4b, 0x0f, 0x06, 0x6b, 0x0e, 0x82, 0x1a, 0xa2, 0x64, 0xc5, 0xc5, 0x8b,
0x22, 0x21, 0xc4, 0xcd, 0xc5, 0x1e, 0xea, 0xe7, 0xed, 0xe7, 0x1f, 0xee, 0x27, 0xc0, 0x00, 0xe2,
0x04, 0xbb, 0x06, 0x85, 0x79, 0xfa, 0xb9, 0x0b, 0x30, 0x0a, 0xf1, 0x73, 0x71, 0xfb, 0xf9, 0x87,
0xc4, 0xc3, 0x04, 0x98, 0x8c, 0x2a, 0xb9, 0xd8, 0x20, 0x16, 0x09, 0xe5, 0x73, 0xb1, 0x82, 0x2d,
0x13, 0x52, 0x23, 0xe8, 0x1a, 0xb0, 0xbf, 0xa5, 0xd4, 0x89, 0x74, 0xb5, 0x92, 0xe8, 0xa9, 0x75,
0xef, 0x66, 0x30, 0xf1, 0x73, 0xf1, 0x82, 0x15, 0xea, 0xe6, 0x26, 0xe6, 0x25, 0xa6, 0xa7, 0x16,
0x39, 0xc9, 0x9c, 0x78, 0x28, 0xc7, 0x70, 0x03, 0x88, 0x3f, 0x3c, 0x94, 0x63, 0x6c, 0x78, 0x24,
0xc7, 0x78, 0x02, 0x88, 0x2f, 0x00, 0xf1, 0x03, 0x20, 0x4e, 0x62, 0x03, 0x07, 0xb9, 0x31, 0x20,
0x00, 0x00, 0xff, 0xff, 0xf7, 0x14, 0x7c, 0x23, 0xc1, 0x01, 0x00, 0x00,
}

34
vendor/src/github.com/docker/swarmkit/api/health.proto поставляемый Normal file
Просмотреть файл

@ -0,0 +1,34 @@
syntax = "proto3";
// See: https://github.com/grpc/grpc-go/blob/master/health/grpc_health_v1/health.proto
//
// We use the same health check service proto description defined in the gRPC documentation,
// including the authorization check. This requires our own implementation of the health
// package located in `manager/health`.
//
// For more infos, refer to:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md
package docker.swarmkit.v1;
import "gogoproto/gogo.proto";
import "plugin/plugin.proto";
service Health {
rpc Check(HealthCheckRequest) returns (HealthCheckResponse) {
option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" };
};
}
message HealthCheckRequest {
string service = 1;
}
message HealthCheckResponse {
enum ServingStatus {
UNKNOWN = 0;
SERVING = 1;
NOT_SERVING = 2;
}
ServingStatus status = 1;
}

Просмотреть файл

@ -90,8 +90,13 @@ func (*JoinRequest) ProtoMessage() {}
func (*JoinRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} }
type JoinResponse struct {
RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"`
// RaftID is the ID assigned to the new member.
RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"`
// Members is the membership set of the cluster.
Members []*RaftMember `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"`
// RemovedMembers is a list of members that have been removed from
// the cluster, so the new node can avoid communicating with them.
RemovedMembers []uint64 `protobuf:"varint,3,rep,name=removed_members,json=removedMembers" json:"removed_members,omitempty"`
}
func (m *JoinResponse) Reset() { *m = JoinResponse{} }
@ -489,6 +494,13 @@ func (m *JoinResponse) Copy() *JoinResponse {
}
}
if m.RemovedMembers != nil {
o.RemovedMembers = make([]uint64, 0, len(m.RemovedMembers))
for _, v := range m.RemovedMembers {
o.RemovedMembers = append(o.RemovedMembers, v)
}
}
return o
}
@ -639,12 +651,13 @@ func (this *JoinResponse) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 6)
s := make([]string, 0, 7)
s = append(s, "&api.JoinResponse{")
s = append(s, "RaftID: "+fmt.Sprintf("%#v", this.RaftID)+",\n")
if this.Members != nil {
s = append(s, "Members: "+fmt.Sprintf("%#v", this.Members)+",\n")
}
s = append(s, "RemovedMembers: "+fmt.Sprintf("%#v", this.RemovedMembers)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
@ -1111,6 +1124,13 @@ func (m *JoinResponse) MarshalTo(data []byte) (int, error) {
i += n
}
}
if len(m.RemovedMembers) > 0 {
for _, num := range m.RemovedMembers {
data[i] = 0x18
i++
i = encodeVarintRaft(data, i, uint64(num))
}
}
return i, nil
}
@ -1611,6 +1631,11 @@ func (m *JoinResponse) Size() (n int) {
n += 1 + l + sovRaft(uint64(l))
}
}
if len(m.RemovedMembers) > 0 {
for _, e := range m.RemovedMembers {
n += 1 + sovRaft(uint64(e))
}
}
return n
}
@ -1781,6 +1806,7 @@ func (this *JoinResponse) String() string {
s := strings.Join([]string{`&JoinResponse{`,
`RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`,
`Members:` + strings.Replace(fmt.Sprintf("%v", this.Members), "RaftMember", "RaftMember", 1) + `,`,
`RemovedMembers:` + fmt.Sprintf("%v", this.RemovedMembers) + `,`,
`}`,
}, "")
return s
@ -2238,6 +2264,26 @@ func (m *JoinResponse) Unmarshal(data []byte) error {
return err
}
iNdEx = postIndex
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RemovedMembers", wireType)
}
var v uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowRaft
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
v |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.RemovedMembers = append(m.RemovedMembers, v)
default:
iNdEx = preIndex
skippy, err := skipRaft(data[iNdEx:])
@ -3108,58 +3154,59 @@ var (
)
var fileDescriptorRaft = []byte{
// 833 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x95, 0x4d, 0x53, 0xdb, 0x46,
0x18, 0xc7, 0x2d, 0x59, 0xc8, 0xed, 0x9a, 0xb7, 0x59, 0x0a, 0x35, 0x2a, 0x63, 0x40, 0x74, 0xa6,
0x85, 0x29, 0xf2, 0xd4, 0x3d, 0xb4, 0xd3, 0xf6, 0x62, 0x1b, 0xcf, 0xd4, 0x05, 0x6c, 0x46, 0xd8,
0x2d, 0x37, 0x2a, 0x4b, 0x8b, 0x51, 0x6d, 0x6b, 0x5d, 0xed, 0xda, 0x4c, 0x2f, 0x19, 0x8e, 0x19,
0xae, 0x39, 0x24, 0x97, 0x9c, 0x92, 0x33, 0x1f, 0x20, 0x9f, 0x80, 0xc9, 0x29, 0xb7, 0xe4, 0x44,
0x02, 0x1f, 0x20, 0xc9, 0x47, 0xc8, 0xae, 0x5e, 0x0c, 0x31, 0xb2, 0xf1, 0x41, 0xb0, 0xec, 0xfe,
0xfe, 0xcf, 0xff, 0xd9, 0x67, 0xf7, 0x59, 0x00, 0x70, 0x8d, 0x23, 0xaa, 0x75, 0x5c, 0x4c, 0x31,
0x84, 0x16, 0x36, 0x9b, 0xc8, 0xd5, 0xc8, 0x89, 0xe1, 0xb6, 0x9b, 0x36, 0xd5, 0x7a, 0x3f, 0x2a,
0x53, 0xb8, 0xfe, 0x2f, 0x32, 0x29, 0xf1, 0x11, 0x25, 0x49, 0xff, 0xef, 0xa0, 0xf0, 0x8f, 0xcd,
0x86, 0x4d, 0x8f, 0xbb, 0x75, 0xcd, 0xc4, 0xed, 0x8c, 0x89, 0x5d, 0x84, 0x49, 0x06, 0x51, 0xd3,
0xca, 0xf0, 0x90, 0xde, 0x8f, 0x4e, 0x3d, 0x73, 0x13, 0x5e, 0xf9, 0xaa, 0x81, 0x1b, 0xd8, 0x1b,
0x66, 0xf8, 0x28, 0x98, 0x9d, 0xeb, 0xb4, 0xba, 0x0d, 0xdb, 0xc9, 0xf8, 0xbf, 0xfc, 0x49, 0xf5,
0x5c, 0x00, 0x40, 0x67, 0xca, 0x5d, 0xd4, 0xae, 0x23, 0x17, 0xae, 0x81, 0x04, 0x8f, 0x73, 0x68,
0x5b, 0x29, 0x61, 0x45, 0xf8, 0x5e, 0xca, 0x83, 0xeb, 0xcb, 0x65, 0x99, 0x03, 0xa5, 0x2d, 0x5d,
0xe6, 0x4b, 0x25, 0x8b, 0x43, 0x0e, 0xb6, 0x10, 0x87, 0x44, 0x06, 0x7d, 0xe9, 0x43, 0x65, 0x36,
0xc5, 0x21, 0xbe, 0xc4, 0x20, 0x08, 0x24, 0xc3, 0xb2, 0xdc, 0x54, 0x9c, 0x13, 0xba, 0x37, 0x86,
0x79, 0x20, 0x13, 0x6a, 0xd0, 0x2e, 0x49, 0x49, 0x6c, 0x36, 0x99, 0xfd, 0x56, 0xbb, 0x5b, 0x07,
0xed, 0x26, 0x9b, 0x7d, 0x8f, 0xcd, 0x4b, 0x17, 0x97, 0xcb, 0x31, 0x3d, 0x50, 0xaa, 0xab, 0x20,
0xf9, 0x27, 0xb6, 0x1d, 0x1d, 0xfd, 0xd7, 0x45, 0x84, 0xf6, 0x6d, 0x84, 0x1b, 0x1b, 0xb5, 0x0d,
0x26, 0x7d, 0x84, 0x74, 0xb0, 0x43, 0xd0, 0x78, 0x9b, 0xfa, 0x05, 0x24, 0xda, 0x9e, 0x2b, 0x61,
0x9b, 0x8a, 0xb3, 0xe4, 0xd2, 0xa3, 0x93, 0xd3, 0x43, 0x5c, 0xcd, 0x83, 0xc9, 0x1d, 0x64, 0xf4,
0x50, 0x98, 0x52, 0x16, 0x48, 0xbc, 0x06, 0x9e, 0xd7, 0xfd, 0x61, 0x3c, 0x56, 0x9d, 0x01, 0x53,
0x41, 0x0c, 0x3f, 0x67, 0x75, 0x07, 0x2c, 0xee, 0xb9, 0xd8, 0x44, 0x84, 0xf8, 0x2c, 0x21, 0x46,
0xa3, 0xef, 0xb0, 0xce, 0x73, 0xf5, 0x66, 0x02, 0x93, 0x19, 0xcd, 0xbf, 0x04, 0x5a, 0x08, 0x86,
0xeb, 0xbf, 0x4a, 0xa7, 0x8f, 0xd5, 0x98, 0xba, 0x04, 0x94, 0xa8, 0x68, 0x81, 0xd7, 0xef, 0x60,
0x9e, 0x8d, 0x71, 0xab, 0x87, 0x72, 0xac, 0x7c, 0x1c, 0x0a, 0x7c, 0xc6, 0x29, 0x9c, 0xfa, 0x03,
0x58, 0x18, 0x54, 0x07, 0x75, 0x8f, 0x3a, 0x9b, 0x23, 0x30, 0x57, 0x72, 0x28, 0x72, 0x1d, 0xa3,
0xc5, 0xe3, 0x84, 0x4e, 0x0b, 0x40, 0xec, 0x9b, 0xc8, 0xcc, 0x44, 0x64, 0x06, 0x6c, 0x06, 0xfe,
0x0c, 0x64, 0xc3, 0xa4, 0x36, 0x76, 0x82, 0x43, 0x59, 0x8e, 0xaa, 0xe6, 0x3e, 0x65, 0x2d, 0x91,
0xf3, 0x30, 0x3d, 0xc0, 0xd5, 0xb7, 0x22, 0x48, 0xde, 0x9a, 0x87, 0xbf, 0xf5, 0x03, 0x71, 0x93,
0xe9, 0xec, 0xda, 0x3d, 0x81, 0xb6, 0x6d, 0xc7, 0x0a, 0x83, 0x41, 0x2d, 0x38, 0x51, 0xd1, 0x2b,
0x76, 0x2a, 0x4a, 0xca, 0x6f, 0xff, 0x1f, 0x31, 0xff, 0x34, 0x59, 0xd6, 0x09, 0x82, 0xdc, 0x9e,
0x6d, 0x22, 0xef, 0xfa, 0x27, 0xb3, 0xdf, 0x44, 0xba, 0xf9, 0x08, 0x53, 0x85, 0x34, 0x37, 0xa2,
0x06, 0x69, 0x06, 0xed, 0x11, 0x69, 0x54, 0x65, 0xeb, 0xdc, 0x88, 0x73, 0xdc, 0xc8, 0x41, 0xf4,
0x04, 0xbb, 0xcd, 0xd4, 0xc4, 0x70, 0xa3, 0xb2, 0x8f, 0x70, 0xa3, 0x80, 0xe6, 0x42, 0xb3, 0xd5,
0x25, 0xec, 0x20, 0x52, 0xf2, 0x70, 0x61, 0xc1, 0x47, 0xb8, 0x30, 0xa0, 0xf3, 0x5f, 0x00, 0x99,
0x1a, 0x6e, 0x03, 0xd1, 0x8d, 0x0f, 0x02, 0x98, 0x19, 0x28, 0x18, 0xfc, 0x0e, 0x24, 0x6a, 0xe5,
0xed, 0x72, 0xe5, 0xef, 0xf2, 0x6c, 0x4c, 0x51, 0xce, 0x9e, 0xae, 0x2c, 0x0c, 0x10, 0x35, 0xa7,
0xe9, 0xe0, 0x13, 0x87, 0xf5, 0xc8, 0xdc, 0x7e, 0xb5, 0xa2, 0x17, 0x0f, 0x73, 0x85, 0x6a, 0xa9,
0x52, 0x3e, 0x2c, 0xe8, 0xc5, 0x5c, 0xb5, 0x38, 0x2b, 0x28, 0x8b, 0x4c, 0x34, 0x3f, 0x20, 0x2a,
0xb8, 0xc8, 0xa0, 0xe8, 0x8e, 0xa6, 0xb6, 0xb7, 0xc5, 0x35, 0x62, 0xa4, 0xa6, 0xd6, 0xb1, 0xa2,
0x34, 0x7a, 0x71, 0xb7, 0xf2, 0x57, 0x71, 0x36, 0x1e, 0xa9, 0xd1, 0x51, 0x1b, 0xf7, 0x90, 0xf2,
0xf5, 0xc3, 0x67, 0xe9, 0xd8, 0x8b, 0xe7, 0xe9, 0xc1, 0xdd, 0x65, 0x1f, 0x89, 0x40, 0xe2, 0x97,
0x16, 0x9e, 0x09, 0x00, 0xde, 0xed, 0x27, 0xb8, 0x19, 0x55, 0xc3, 0xa1, 0x5d, 0xac, 0x68, 0xe3,
0xe2, 0x41, 0x9b, 0xce, 0xbf, 0x3c, 0x7f, 0xff, 0x44, 0x64, 0x2f, 0x85, 0xc7, 0x6f, 0xb6, 0x0d,
0x87, 0xad, 0xba, 0xf0, 0x01, 0x98, 0xfe, 0xbc, 0xff, 0xe0, 0x7a, 0xe4, 0x93, 0x13, 0xd5, 0xe1,
0xca, 0xc6, 0x38, 0xe8, 0x48, 0xff, 0xec, 0x6b, 0x81, 0x25, 0xd0, 0x7f, 0xcf, 0xc8, 0xb1, 0xdd,
0x81, 0xff, 0x00, 0x89, 0x3f, 0xc0, 0x30, 0xb2, 0x5b, 0x6f, 0xbd, 0xde, 0xca, 0xca, 0x70, 0x60,
0xf4, 0xa6, 0x4d, 0x30, 0xe1, 0xbd, 0x97, 0x30, 0x32, 0xc2, 0xed, 0xe7, 0x58, 0x59, 0x1d, 0x41,
0x8c, 0x34, 0xc9, 0x2f, 0x5d, 0x5c, 0xa5, 0x63, 0x6f, 0xd8, 0xf7, 0xf1, 0x2a, 0x2d, 0x9c, 0x5e,
0xa7, 0x85, 0x0b, 0xf6, 0xbd, 0x62, 0xdf, 0x3b, 0xf6, 0x1d, 0xc4, 0x0f, 0xa4, 0xba, 0xec, 0xfd,
0x13, 0xfd, 0xe9, 0x53, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa4, 0xfb, 0x14, 0x74, 0xdc, 0x07, 0x00,
0x00,
// 852 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x95, 0x4f, 0x53, 0x23, 0x45,
0x18, 0xc6, 0x33, 0x93, 0x61, 0xa2, 0x1d, 0x20, 0x54, 0x23, 0x18, 0x46, 0x2a, 0xc0, 0x60, 0x95,
0x42, 0xc9, 0xa4, 0x8c, 0x07, 0x2d, 0xf5, 0x92, 0x84, 0x54, 0x19, 0x81, 0x84, 0x1a, 0x12, 0xe5,
0x16, 0x27, 0x33, 0x4d, 0x18, 0x93, 0x4c, 0xc7, 0xe9, 0x4e, 0x28, 0x2f, 0x16, 0x47, 0x8b, 0xab,
0x55, 0xea, 0xc5, 0x93, 0x9e, 0xf9, 0x00, 0x7e, 0x02, 0x6a, 0x4f, 0x7b, 0xdb, 0x3d, 0xb1, 0x0b,
0x1f, 0x60, 0x77, 0x3f, 0xc2, 0x76, 0xcf, 0x9f, 0xc0, 0x86, 0x49, 0xc8, 0xa1, 0xa1, 0x79, 0xfb,
0xf7, 0xbc, 0x4f, 0xf7, 0xdb, 0xf3, 0x36, 0x00, 0xb8, 0xc6, 0x09, 0xd5, 0x7a, 0x2e, 0xa6, 0x18,
0x42, 0x0b, 0x9b, 0x6d, 0xe4, 0x6a, 0xe4, 0xcc, 0x70, 0xbb, 0x6d, 0x9b, 0x6a, 0x83, 0xcf, 0x95,
0x39, 0xdc, 0xfc, 0x19, 0x99, 0x94, 0xf8, 0x88, 0x92, 0xa4, 0xbf, 0xf6, 0x50, 0xf8, 0xc7, 0x4e,
0xcb, 0xa6, 0xa7, 0xfd, 0xa6, 0x66, 0xe2, 0x6e, 0xd6, 0xc4, 0x2e, 0xc2, 0x24, 0x8b, 0xa8, 0x69,
0x65, 0x79, 0x4a, 0xef, 0x47, 0xaf, 0x99, 0xbd, 0x4b, 0xaf, 0x7c, 0xd0, 0xc2, 0x2d, 0xec, 0x4d,
0xb3, 0x7c, 0x16, 0x44, 0x17, 0x7b, 0x9d, 0x7e, 0xcb, 0x76, 0xb2, 0xfe, 0x2f, 0x3f, 0xa8, 0x5e,
0x0a, 0x00, 0xe8, 0x4c, 0x79, 0x80, 0xba, 0x4d, 0xe4, 0xc2, 0x4d, 0x90, 0xe0, 0x79, 0x1a, 0xb6,
0x95, 0x16, 0xd6, 0x85, 0x4f, 0xa5, 0x02, 0xb8, 0xbd, 0x5e, 0x93, 0x39, 0x50, 0xde, 0xd5, 0x65,
0xbe, 0x54, 0xb6, 0x38, 0xe4, 0x60, 0x0b, 0x71, 0x48, 0x64, 0xd0, 0xfb, 0x3e, 0x54, 0x61, 0x21,
0x0e, 0xf1, 0x25, 0x06, 0x41, 0x20, 0x19, 0x96, 0xe5, 0xa6, 0xe3, 0x9c, 0xd0, 0xbd, 0x39, 0x2c,
0x00, 0x99, 0x50, 0x83, 0xf6, 0x49, 0x5a, 0x62, 0xd1, 0x64, 0xee, 0x63, 0xed, 0x61, 0x1d, 0xb4,
0xbb, 0xdd, 0x1c, 0x79, 0x6c, 0x41, 0xba, 0xba, 0x5e, 0x8b, 0xe9, 0x81, 0x52, 0xdd, 0x00, 0xc9,
0xef, 0xb1, 0xed, 0xe8, 0xe8, 0x97, 0x3e, 0x22, 0x74, 0x68, 0x23, 0xdc, 0xd9, 0xa8, 0x7f, 0x0a,
0x60, 0xd6, 0x67, 0x48, 0x0f, 0x3b, 0x04, 0x4d, 0x77, 0xaa, 0xaf, 0x40, 0xa2, 0xeb, 0xd9, 0x12,
0x76, 0xaa, 0x38, 0xdb, 0x5d, 0x66, 0xf2, 0xee, 0xf4, 0x10, 0x87, 0x9f, 0x80, 0x94, 0x8b, 0xba,
0x78, 0x80, 0xac, 0x46, 0x98, 0x21, 0xce, 0x32, 0x48, 0xfa, 0x7c, 0x10, 0xf6, 0x05, 0x44, 0x2d,
0x80, 0xd9, 0x7d, 0x64, 0x0c, 0x50, 0xb8, 0xf9, 0x1c, 0x90, 0x78, 0xb5, 0xbc, 0x4d, 0x3d, 0xee,
0xe7, 0xb1, 0x6a, 0x0a, 0xcc, 0x05, 0x39, 0xfc, 0xc3, 0xa9, 0xfb, 0x60, 0xe5, 0xd0, 0xc5, 0x26,
0x22, 0xc4, 0x67, 0x09, 0x31, 0x5a, 0x43, 0x87, 0x2d, 0x7e, 0x28, 0x2f, 0x12, 0x98, 0xa4, 0x34,
0xff, 0x73, 0xd1, 0x42, 0x30, 0x5c, 0xff, 0x5a, 0x3a, 0xff, 0x4b, 0x8d, 0xa9, 0xab, 0x40, 0x89,
0xca, 0x16, 0x78, 0x7d, 0x0b, 0x96, 0xd8, 0x1c, 0x77, 0x06, 0x28, 0xcf, 0x0a, 0xcd, 0xa1, 0xc0,
0x67, 0x9a, 0x0a, 0xab, 0x9f, 0x81, 0xe5, 0x51, 0x75, 0x70, 0x41, 0x51, 0xb7, 0x78, 0x02, 0x16,
0xcb, 0x0e, 0x45, 0xae, 0x63, 0x74, 0x78, 0x9e, 0xd0, 0x69, 0x19, 0x88, 0x43, 0x13, 0x99, 0x99,
0x88, 0xcc, 0x80, 0x45, 0xe0, 0x97, 0x40, 0x36, 0x4c, 0x6a, 0x63, 0x27, 0xb8, 0xbd, 0xb5, 0xa8,
0x6a, 0x1e, 0x51, 0xd6, 0x3c, 0x79, 0x0f, 0xd3, 0x03, 0x5c, 0x7d, 0x21, 0x82, 0xe4, 0xbd, 0x38,
0xfc, 0x66, 0x98, 0x88, 0x9b, 0xcc, 0xe7, 0x36, 0x1f, 0x49, 0xb4, 0x67, 0x3b, 0x56, 0x98, 0x0c,
0x6a, 0xc1, 0x8d, 0x8a, 0x5e, 0xb1, 0xd3, 0x51, 0x52, 0xde, 0x27, 0xdf, 0xc5, 0xfc, 0xdb, 0x64,
0xbb, 0x4e, 0x10, 0xe4, 0x0e, 0x6c, 0x13, 0x79, 0x8d, 0x92, 0xcc, 0x7d, 0x14, 0xe9, 0xe6, 0x23,
0x4c, 0x15, 0xd2, 0xdc, 0x88, 0x1a, 0xa4, 0x1d, 0x34, 0x52, 0xa4, 0x51, 0x8d, 0xad, 0x73, 0x23,
0xce, 0x71, 0x23, 0x07, 0xd1, 0x33, 0xec, 0xb6, 0xd3, 0x33, 0xe3, 0x8d, 0x2a, 0x3e, 0xc2, 0x8d,
0x02, 0x9a, 0x0b, 0xcd, 0x4e, 0x9f, 0xb0, 0x8b, 0x48, 0xcb, 0xe3, 0x85, 0x45, 0x1f, 0xe1, 0xc2,
0x80, 0x2e, 0xbc, 0x07, 0x64, 0x6a, 0xb8, 0x2d, 0x44, 0xb7, 0x5f, 0x0b, 0x20, 0x35, 0x52, 0x30,
0xd6, 0x33, 0x89, 0x7a, 0x65, 0xaf, 0x52, 0xfd, 0xb1, 0xb2, 0x10, 0x53, 0x94, 0x8b, 0x7f, 0xd6,
0x97, 0x47, 0x88, 0xba, 0xd3, 0x76, 0xf0, 0x99, 0xc3, 0x7a, 0x64, 0xf1, 0xa8, 0x56, 0xd5, 0x4b,
0x8d, 0x7c, 0xb1, 0x56, 0xae, 0x56, 0x1a, 0x45, 0xbd, 0x94, 0xaf, 0x95, 0x16, 0x04, 0x65, 0x85,
0x89, 0x96, 0x46, 0x44, 0x45, 0x17, 0x19, 0x14, 0x3d, 0xd0, 0xd4, 0x0f, 0x77, 0xb9, 0x46, 0x8c,
0xd4, 0xd4, 0x7b, 0x56, 0x94, 0x46, 0x2f, 0x1d, 0x54, 0x7f, 0x28, 0x2d, 0xc4, 0x23, 0x35, 0xba,
0xd7, 0xd7, 0xca, 0x87, 0xbf, 0xff, 0x9b, 0x89, 0xfd, 0xff, 0x5f, 0x66, 0xf4, 0x74, 0xb9, 0x3f,
0x44, 0x20, 0xf1, 0x8f, 0x16, 0x5e, 0x08, 0x00, 0x3e, 0xec, 0x27, 0xb8, 0x13, 0x55, 0xc3, 0xb1,
0x5d, 0xac, 0x68, 0xd3, 0xe2, 0x41, 0x9b, 0x2e, 0x3d, 0xb9, 0x7c, 0xf5, 0xb7, 0xc8, 0x5e, 0x0a,
0x8f, 0xdf, 0xe9, 0x1a, 0x0e, 0x5b, 0x75, 0xe1, 0x6f, 0x60, 0xfe, 0xdd, 0xfe, 0x83, 0x5b, 0x91,
0x4f, 0x4e, 0x54, 0x87, 0x2b, 0xdb, 0xd3, 0xa0, 0x13, 0xfd, 0x73, 0xcf, 0x04, 0xb6, 0x81, 0xe1,
0x7b, 0x46, 0x4e, 0xed, 0x1e, 0xfc, 0x09, 0x48, 0xfc, 0xa5, 0x86, 0x91, 0xdd, 0x7a, 0xef, 0x9d,
0x57, 0xd6, 0xc7, 0x03, 0x93, 0x0f, 0x6d, 0x82, 0x19, 0xef, 0xbd, 0x84, 0x91, 0x19, 0xee, 0x3f,
0xc7, 0xca, 0xc6, 0x04, 0x62, 0xa2, 0x49, 0x61, 0xf5, 0xea, 0x26, 0x13, 0x7b, 0xce, 0xc6, 0x9b,
0x9b, 0x8c, 0x70, 0x7e, 0x9b, 0x11, 0xae, 0xd8, 0x78, 0xca, 0xc6, 0x4b, 0x36, 0x8e, 0xe3, 0xc7,
0x52, 0x53, 0xf6, 0xfe, 0xdd, 0x7e, 0xf1, 0x36, 0x00, 0x00, 0xff, 0xff, 0xd7, 0x61, 0x3c, 0x43,
0x06, 0x08, 0x00, 0x00,
}

Просмотреть файл

@ -58,8 +58,15 @@ message JoinRequest {
}
message JoinResponse {
// RaftID is the ID assigned to the new member.
uint64 raft_id = 1 [(gogoproto.customname) = "RaftID"];
// Members is the membership set of the cluster.
repeated RaftMember members = 2;
// RemovedMembers is a list of members that have been removed from
// the cluster, so the new node can avoid communicating with them.
repeated uint64 removed_members = 3;
}
message LeaveRequest {

Просмотреть файл

@ -268,7 +268,7 @@ func _ServiceSpec_OneofSizer(msg proto.Message) (n int) {
// instructing Swarm on how this service should work on the particular
// network.
type ServiceSpec_NetworkAttachmentConfig struct {
// Target specifies the target network for attachement. This value may be a
// Target specifies the target network for attachment. This value may be a
// network name or identifier. Only identifiers are supported at this time.
Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"`
// Aliases specifies a list of discoverable alternate names for the service on this Target.
@ -281,7 +281,7 @@ func (*ServiceSpec_NetworkAttachmentConfig) Descriptor() ([]byte, []int) {
return fileDescriptorSpecs, []int{1, 0}
}
// ReplicatedService set the reconcilation target to certain number of replicas.
// ReplicatedService sets the reconciliation target to certain number of replicas.
type ReplicatedService struct {
Replicas uint64 `protobuf:"varint,1,opt,name=replicas,proto3" json:"replicas,omitempty"`
}
@ -290,7 +290,7 @@ func (m *ReplicatedService) Reset() { *m = ReplicatedService{
func (*ReplicatedService) ProtoMessage() {}
func (*ReplicatedService) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{2} }
// GlobalService represent global service.
// GlobalService represents global service.
type GlobalService struct {
}
@ -415,9 +415,12 @@ type ContainerSpec struct {
// executable and the following elements are treated as arguments.
//
// If command is empty, execution will fall back to the image's entrypoint.
//
// Command should only be used when overriding entrypoint.
Command []string `protobuf:"bytes,3,rep,name=command" json:"command,omitempty"`
// Args specifies arguments provided to the image's entrypoint.
// Ignored if command is specified.
//
// If Command and Args are provided, Args will be appended to Command.
Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"`
// Env specifies the environment variables for the container in NAME=VALUE
// format. These must be compliant with [IEEE Std

Просмотреть файл

@ -78,7 +78,7 @@ message ServiceSpec {
// instructing Swarm on how this service should work on the particular
// network.
message NetworkAttachmentConfig {
// Target specifies the target network for attachement. This value may be a
// Target specifies the target network for attachment. This value may be a
// network name or identifier. Only identifiers are supported at this time.
string target = 1;
// Aliases specifies a list of discoverable alternate names for the service on this Target.
@ -91,12 +91,12 @@ message ServiceSpec {
EndpointSpec endpoint = 8;
}
// ReplicatedService set the reconcilation target to certain number of replicas.
// ReplicatedService sets the reconciliation target to certain number of replicas.
message ReplicatedService {
uint64 replicas = 1;
}
// GlobalService represent global service.
// GlobalService represents global service.
message GlobalService {
// Empty message for now.
}
@ -138,10 +138,13 @@ message ContainerSpec {
// executable and the following elements are treated as arguments.
//
// If command is empty, execution will fall back to the image's entrypoint.
//
// Command should only be used when overriding entrypoint.
repeated string command = 3;
// Args specifies arguments provided to the image's entrypoint.
// Ignored if command is specified.
//
// If Command and Args are provided, Args will be appended to Command.
repeated string args = 4;
// Env specifies the environment variables for the container in NAME=VALUE

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -447,9 +447,29 @@ message AcceptancePolicy {
}
message ExternalCA {
enum CAProtocol {
CFSSL = 0 [(gogoproto.enumvalue_customname) = "CAProtocolCFSSL"];
}
// Protocol is the protocol used by this external CA.
CAProtocol protocol = 1;
// URL is the URL where the external CA can be reached.
string url = 2 [(gogoproto.customname) = "URL"];
// Options is a set of additional key/value pairs whose interpretation
// depends on the specified CA type.
map<string, string> options = 3;
}
message CAConfig {
// NodeCertExpiry is the duration certificates should be issued for
Duration node_cert_expiry = 1;
// ExternalCAs is a list of CAs to which a manager node will make
// certificate signing requests for node certificates.
repeated ExternalCA external_cas = 2 [(gogoproto.customname) = "ExternalCAs"];
}
// OrchestrationConfig defines cluster-level orchestration settings.

Просмотреть файл

@ -167,7 +167,16 @@ func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, paths Cert
}
// Get the remote manager to issue a CA signed certificate for this node
signedCert, err := GetRemoteSignedCertificate(ctx, csr, role, secret, rca.Pool, picker, transport, nodeInfo)
// Retry up to 5 times in case the manager we first try to contact isn't
// responding properly (for example, it may have just been demoted).
var signedCert []byte
for i := 0; i != 5; i++ {
signedCert, err = GetRemoteSignedCertificate(ctx, csr, role, secret, rca.Pool, picker, transport, nodeInfo)
if err == nil {
break
}
log.Warningf("error fetching signed node certificate: %v", err)
}
if err != nil {
return nil, err
}
@ -192,6 +201,12 @@ func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, paths Cert
return nil, err
}
// Create a valid TLSKeyPair out of the PEM encoded private key and certificate
tlsKeyPair, err := tls.X509KeyPair(signedCert, key)
if err != nil {
return nil, err
}
log.Infof("Downloaded new TLS credentials with role: %s.", role)
// Ensure directory exists
@ -210,13 +225,27 @@ func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, paths Cert
return nil, err
}
// Create a valid TLSKeyPair out of the PEM encoded private key and certificate
tlsKeyPair, err := tls.X509KeyPair(signedCert, key)
if err != nil {
return nil, err
return &tlsKeyPair, nil
}
// PrepareCSR creates a CFSSL Sign Request based on the given raw CSR and
// overrides the Subject and Hosts with the given extra args.
func PrepareCSR(csrBytes []byte, cn, ou, org string) cfsigner.SignRequest {
// All managers get added the subject-alt-name of CA, so they can be
// used for cert issuance.
hosts := []string{ou}
if ou == ManagerRole {
hosts = append(hosts, CARole)
}
return &tlsKeyPair, nil
return cfsigner.SignRequest{
Request: string(csrBytes),
// OU is used for Authentication of the node type. The CN has the random
// node ID.
Subject: &cfsigner.Subject{CN: cn, Names: []cfcsr.Name{{OU: ou, O: org}}},
// Adding ou as DNS alt name, so clients can connect to ManagerRole and CARole
Hosts: hosts,
}
}
// ParseValidateAndSignCSR returns a signed certificate from a particular rootCA and a CSR.
@ -225,25 +254,21 @@ func (rca *RootCA) ParseValidateAndSignCSR(csrBytes []byte, cn, ou, org string)
return nil, ErrNoValidSigner
}
// All managers get added the subject-alt-name of CA, so they can be used for cert issuance
hosts := []string{ou}
if ou == ManagerRole {
hosts = append(hosts, CARole)
}
signRequest := PrepareCSR(csrBytes, cn, ou, org)
cert, err := rca.Signer.Sign(cfsigner.SignRequest{
Request: string(csrBytes),
// OU is used for Authentication of the node type. The CN has the random
// node ID.
Subject: &cfsigner.Subject{CN: cn, Names: []cfcsr.Name{{OU: ou, O: org}}},
// Adding ou as DNS alt name, so clients can connect to ManagerRole and CARole
Hosts: hosts,
})
cert, err := rca.Signer.Sign(signRequest)
if err != nil {
log.Debugf("failed to sign node certificate: %v", err)
return nil, err
}
return rca.AppendFirstRootPEM(cert)
}
// AppendFirstRootPEM appends the first certificate from this RootCA's cert
// bundle to the given cert bundle (which should already be encoded as a series
// of PEM-encoded certificate blocks).
func (rca *RootCA) AppendFirstRootPEM(cert []byte) ([]byte, error) {
// Append the first root CA Cert to the certificate, to create a valid chain
// Get the first Root CA Cert on the bundle
firstRootCA, _, err := helpers.ParseOneCertificateFromPEM(rca.Cert)
@ -390,7 +415,7 @@ func GetLocalRootCA(baseDir string) (RootCA, error) {
rootCA, err := NewRootCA(cert, key, DefaultNodeCertExpiration)
if err == nil {
log.Debugf("successfully loaded the signer for the Root CA: %s", paths.RootCA.Cert)
log.Debugf("successfully loaded the Root CA: %s", paths.RootCA.Cert)
}
return rootCA, err
@ -602,7 +627,7 @@ func GetRemoteSignedCertificate(ctx context.Context, csr []byte, role, secret st
}
defer conn.Close()
// Create a CAClient to retreive a new Certificate
// Create a CAClient to retrieve a new Certificate
caClient := api.NewNodeCAClient(conn)
// Convert our internal string roles into an API role
@ -644,7 +669,15 @@ func GetRemoteSignedCertificate(ctx context.Context, csr []byte, role, secret st
if statusResponse.Certificate == nil {
return nil, fmt.Errorf("no certificate in CertificateStatus response")
}
return statusResponse.Certificate.Certificate, nil
// The certificate in the response must match the CSR
// we submitted. If we are getting a response for a
// certificate that was previously issued, we need to
// retry until the certificate gets updated per our
// current request.
if bytes.Equal(statusResponse.Certificate.CSR, csr) {
return statusResponse.Certificate.Certificate, nil
}
}
// If we're still pending, the issuance failed, or the state is unknown

Просмотреть файл

@ -45,7 +45,8 @@ const (
type SecurityConfig struct {
mu sync.Mutex
rootCA *RootCA
rootCA *RootCA
externalCA *ExternalCA
ServerTLSCreds *MutableTLSCreds
ClientTLSCreds *MutableTLSCreds
@ -60,8 +61,19 @@ type CertificateUpdate struct {
// NewSecurityConfig initializes and returns a new SecurityConfig.
func NewSecurityConfig(rootCA *RootCA, clientTLSCreds, serverTLSCreds *MutableTLSCreds) *SecurityConfig {
// Make a new TLS config for the external CA client without a
// ServerName value set.
clientTLSConfig := clientTLSCreds.Config()
externalCATLSConfig := &tls.Config{
Certificates: clientTLSConfig.Certificates,
RootCAs: clientTLSConfig.RootCAs,
MinVersion: tls.VersionTLS12,
}
return &SecurityConfig{
rootCA: rootCA,
externalCA: NewExternalCA(rootCA, externalCATLSConfig),
ClientTLSCreds: clientTLSCreds,
ServerTLSCreds: serverTLSCreds,
}
@ -164,8 +176,18 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, caHash, secret
return nil, err
}
// Get the remote CA certificate, verify integrity with the hash provided
rootCA, err = GetRemoteCA(ctx, d, picker)
// Get the remote CA certificate, verify integrity with the
// hash provided. Retry up to 5 times, in case the manager we
// first try to contact is not responding properly (it may have
// just been demoted, for example).
for i := 0; i != 5; i++ {
rootCA, err = GetRemoteCA(ctx, d, picker)
if err == nil {
break
}
log.Warningf("failed to retrieve remote root CA certificate: %v", err)
}
if err != nil {
return nil, err
}
@ -180,9 +202,9 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, caHash, secret
return nil, err
}
// At this point we've successfully loaded the CA details from disk, or successfully
// downloaded them remotely.
// The next step is to try to load our certificates.
// At this point we've successfully loaded the CA details from disk, or
// successfully downloaded them remotely. The next step is to try to
// load our certificates.
clientTLSCreds, serverTLSCreds, err = LoadTLSCreds(rootCA, paths.Node)
if err != nil {
log.Debugf("no valid local TLS credentials found: %v", err)
@ -204,6 +226,9 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, caHash, secret
}
}
tlsKeyPair, err = rootCA.IssueAndSaveNewCertificates(paths.Node, cn, proposedRole, org)
if err != nil {
return nil, err
}
} else {
// There was an error loading our Credentials, let's get a new certificate issued
// Last argument is nil because at this point we don't have any valid TLS creds
@ -211,7 +236,6 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, caHash, secret
if err != nil {
return nil, err
}
}
// Create the Server TLS Credentials for this node. These will not be used by agents.
serverTLSCreds, err = rootCA.NewServerTLSCredentials(tlsKeyPair)
@ -236,12 +260,7 @@ func LoadOrCreateSecurityConfig(ctx context.Context, baseCertDir, caHash, secret
log.Debugf("loaded local TLS credentials: %s.", paths.Node.Cert)
}
return &SecurityConfig{
rootCA: &rootCA,
ServerTLSCreds: serverTLSCreds,
ClientTLSCreds: clientTLSCreds,
}, nil
return NewSecurityConfig(&rootCA, clientTLSCreds, serverTLSCreds), nil
}
// RenewTLSConfig will continuously monitor for the necessity of renewing the local certificates, either by
@ -317,6 +336,14 @@ func RenewTLSConfig(ctx context.Context, s *SecurityConfig, baseCertDir string,
updates <- CertificateUpdate{Err: err}
}
// Update the external CA to use the new client TLS
// config using a copy without a serverName specified.
s.externalCA.UpdateTLSConfig(&tls.Config{
Certificates: clientTLSConfig.Certificates,
RootCAs: clientTLSConfig.RootCAs,
MinVersion: tls.VersionTLS12,
})
err = s.ServerTLSCreds.LoadNewTLSConfig(serverTLSConfig)
if err != nil {
log.Debugf("failed to update the server TLS credentials: %v", err)
@ -405,7 +432,7 @@ func LoadTLSCreds(rootCA RootCA, paths CertPaths) (*MutableTLSCreds, *MutableTLS
}
keyPair, newErr = tls.X509KeyPair(cert, key)
if err != nil {
if newErr != nil {
return nil, nil, err
}
}

141
vendor/src/github.com/docker/swarmkit/ca/external.go поставляемый Normal file
Просмотреть файл

@ -0,0 +1,141 @@
package ca
import (
"bytes"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"sync"
log "github.com/Sirupsen/logrus"
"github.com/cloudflare/cfssl/api"
"github.com/cloudflare/cfssl/signer"
)
// ErrNoExternalCAURLs is an error used it indicate that an ExternalCA is
// configured with no URLs to which it can proxy certificate signing requests.
var ErrNoExternalCAURLs = errors.New("no external CA URLs")
// ExternalCA is able to make certificate signing requests to one of a list
// remote CFSSL API endpoints.
type ExternalCA struct {
mu sync.Mutex
rootCA *RootCA
urls []string
client *http.Client
}
// NewExternalCA creates a new ExternalCA which uses the given tlsConfig to
// authenticate to any of the given URLS of CFSSL API endpoints.
func NewExternalCA(rootCA *RootCA, tlsConfig *tls.Config, urls ...string) *ExternalCA {
return &ExternalCA{
rootCA: rootCA,
urls: urls,
client: &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
},
},
}
}
// UpdateTLSConfig updates the HTTP Client for this ExternalCA by creating
// a new client which uses the given tlsConfig.
func (eca *ExternalCA) UpdateTLSConfig(tlsConfig *tls.Config) {
eca.mu.Lock()
defer eca.mu.Unlock()
eca.client = &http.Client{
Transport: &http.Transport{
TLSClientConfig: tlsConfig,
},
}
}
// UpdateURLs updates the list of CSR API endpoints by setting it to the given
// urls.
func (eca *ExternalCA) UpdateURLs(urls ...string) {
eca.mu.Lock()
defer eca.mu.Unlock()
eca.urls = urls
}
// Sign signs a new certificate by proxying the given certificate signing
// request to an external CFSSL API server.
func (eca *ExternalCA) Sign(req signer.SignRequest) (cert []byte, err error) {
// Get the current HTTP client and list of URLs in a small critical
// section. We will use these to make certificate signing requests.
eca.mu.Lock()
urls := eca.urls
client := eca.client
eca.mu.Unlock()
if len(urls) == 0 {
return nil, ErrNoExternalCAURLs
}
csrJSON, err := json.Marshal(req)
if err != nil {
return nil, fmt.Errorf("unable to JSON-encode CFSSL signing request: %s", err)
}
// Try each configured proxy URL. Return after the first success. If
// all fail then the last error will be returned.
for _, url := range urls {
cert, err = makeExternalSignRequest(client, url, csrJSON)
if err == nil {
return eca.rootCA.AppendFirstRootPEM(cert)
}
log.Debugf("unable to proxy certificate signing request to %s: %s", url, err)
}
return nil, err
}
func makeExternalSignRequest(client *http.Client, url string, csrJSON []byte) (cert []byte, err error) {
resp, err := client.Post(url, "application/json", bytes.NewReader(csrJSON))
if err != nil {
return nil, fmt.Errorf("unable to perform certificate signing request: %s", err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("unable to read CSR response body: %s", err)
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status code in CSR response: %d - %s", resp.StatusCode, string(body))
}
var apiResponse api.Response
if err := json.Unmarshal(body, &apiResponse); err != nil {
log.Debugf("unable to JSON-parse CFSSL API response body: %s", string(body))
return nil, fmt.Errorf("unable to parse JSON response: %s", err)
}
if !apiResponse.Success || apiResponse.Result == nil {
if len(apiResponse.Errors) > 0 {
return nil, fmt.Errorf("response errors: %v", apiResponse.Errors)
}
return nil, fmt.Errorf("certificate signing request failed")
}
result, ok := apiResponse.Result.(map[string]interface{})
if !ok {
return nil, fmt.Errorf("invalid result type: %T", apiResponse.Result)
}
certPEM, ok := result["certificate"].(string)
if !ok {
return nil, fmt.Errorf("invalid result certificate field type: %T", result["certificate"])
}
return []byte(certPEM), nil
}

Просмотреть файл

@ -355,7 +355,7 @@ func (s *Server) Run(ctx context.Context) error {
s.mu.Lock()
if s.isRunning() {
s.mu.Unlock()
return fmt.Errorf("CA signer is stopped")
return fmt.Errorf("CA signer is already running")
}
s.wg.Add(1)
defer s.wg.Done()
@ -443,12 +443,14 @@ func (s *Server) Run(ctx context.Context) error {
func (s *Server) Stop() error {
s.mu.Lock()
if !s.isRunning() {
s.mu.Unlock()
return fmt.Errorf("CA signer is already stopped")
}
s.cancel()
s.mu.Unlock()
// wait for all handlers to finish their CA deals,
s.wg.Wait()
s.started = make(chan struct{})
return nil
}
@ -530,6 +532,21 @@ func (s *Server) updateCluster(ctx context.Context, cluster *api.Cluster) {
}).Debugf("Root CA updated successfully")
}
}
// Update our security config with the list of External CA URLs
// from the new cluster state.
// TODO(aaronl): In the future, this will be abstracted with an
// ExternalCA interface that has different implementations for
// different CA types. At the moment, only CFSSL is supported.
var cfsslURLs []string
for _, ca := range cluster.Spec.CAConfig.ExternalCAs {
if ca.Protocol == api.ExternalCA_CAProtocolCFSSL {
cfsslURLs = append(cfsslURLs, ca.URL)
}
}
s.securityConfig.externalCA.UpdateURLs(cfsslURLs...)
}
// evaluateAndSignNodeCert implements the logic of which certificates to sign
@ -555,13 +572,8 @@ func (s *Server) evaluateAndSignNodeCert(ctx context.Context, node *api.Node) {
// signNodeCert does the bulk of the work for signing a certificate
func (s *Server) signNodeCert(ctx context.Context, node *api.Node) {
if !s.securityConfig.RootCA().CanSign() {
log.G(ctx).WithFields(logrus.Fields{
"node.id": node.ID,
"method": "(*Server).signNodeCert",
}).Errorf("no valid signer found")
return
}
rootCA := s.securityConfig.RootCA()
externalCA := s.securityConfig.externalCA
node = node.Copy()
nodeID := node.ID
@ -576,7 +588,20 @@ func (s *Server) signNodeCert(ctx context.Context, node *api.Node) {
}
// Attempt to sign the CSR
cert, err := s.securityConfig.RootCA().ParseValidateAndSignCSR(node.Certificate.CSR, node.Certificate.CN, role, s.securityConfig.ClientTLSCreds.Organization())
var (
rawCSR = node.Certificate.CSR
cn = node.Certificate.CN
ou = role
org = s.securityConfig.ClientTLSCreds.Organization()
)
// Try using the external CA first.
cert, err := externalCA.Sign(PrepareCSR(rawCSR, cn, ou, org))
if err == ErrNoExternalCAURLs {
// No external CA servers configured. Try using the local CA.
cert, err = rootCA.ParseValidateAndSignCSR(rawCSR, cn, ou, org)
}
if err != nil {
log.G(ctx).WithFields(logrus.Fields{
"node.id": node.ID,

Просмотреть файл

@ -124,6 +124,14 @@ func (c *MutableTLSCreds) LoadNewTLSConfig(newConfig *tls.Config) error {
return nil
}
// Config returns the current underlying TLS config.
func (c *MutableTLSCreds) Config() *tls.Config {
c.Lock()
defer c.Unlock()
return c.config
}
// Role returns the OU for the certificate encapsulated in this TransportAuthenticator
func (c *MutableTLSCreds) Role() string {
c.Lock()

Просмотреть файл

@ -52,6 +52,16 @@ type networkContext struct {
// A table of unallocated tasks which will be revisited if any thing
// changes in system state that might help task allocation.
unallocatedTasks map[string]*api.Task
// A table of unallocated services which will be revisited if
// any thing changes in system state that might help service
// allocation.
unallocatedServices map[string]*api.Service
// A table of unallocated networks which will be revisited if
// any thing changes in system state that might help network
// allocation.
unallocatedNetworks map[string]*api.Network
}
func (a *Allocator) doNetworkInit(ctx context.Context) error {
@ -61,8 +71,10 @@ func (a *Allocator) doNetworkInit(ctx context.Context) error {
}
nc := &networkContext{
nwkAllocator: na,
unallocatedTasks: make(map[string]*api.Task),
nwkAllocator: na,
unallocatedTasks: make(map[string]*api.Task),
unallocatedServices: make(map[string]*api.Service),
unallocatedNetworks: make(map[string]*api.Network),
}
// Check if we have the ingress network. If not found create
@ -326,6 +338,8 @@ func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
case state.EventCreateTask, state.EventUpdateTask, state.EventDeleteTask:
a.doTaskAlloc(ctx, nc, ev)
case state.EventCommit:
a.procUnallocatedNetworks(ctx, nc)
a.procUnallocatedServices(ctx, nc)
a.procUnallocatedTasksNetwork(ctx, nc)
return
}
@ -554,29 +568,34 @@ func (a *Allocator) allocateNode(ctx context.Context, nc *networkContext, node *
}
func (a *Allocator) allocateService(ctx context.Context, nc *networkContext, s *api.Service) error {
// The service is trying to expose ports to the external
// world. Automatically attach the service to the ingress
// network only if it is not already done.
if s.Spec.Endpoint != nil && len(s.Spec.Endpoint.Ports) != 0 {
if s.Spec.Endpoint != nil {
if s.Endpoint == nil {
s.Endpoint = &api.Endpoint{}
}
var found bool
for _, vip := range s.Endpoint.VirtualIPs {
if vip.NetworkID == ingressNetwork.ID {
found = true
break
s.Endpoint = &api.Endpoint{
Spec: s.Spec.Endpoint.Copy(),
}
}
if !found {
s.Endpoint.VirtualIPs = append(s.Endpoint.VirtualIPs,
&api.Endpoint_VirtualIP{NetworkID: ingressNetwork.ID})
// The service is trying to expose ports to the external
// world. Automatically attach the service to the ingress
// network only if it is not already done.
if len(s.Spec.Endpoint.Ports) != 0 {
var found bool
for _, vip := range s.Endpoint.VirtualIPs {
if vip.NetworkID == ingressNetwork.ID {
found = true
break
}
}
if !found {
s.Endpoint.VirtualIPs = append(s.Endpoint.VirtualIPs,
&api.Endpoint_VirtualIP{NetworkID: ingressNetwork.ID})
}
}
}
if err := nc.nwkAllocator.ServiceAllocate(s); err != nil {
nc.unallocatedServices[s.ID] = s
return err
}
@ -611,6 +630,7 @@ func (a *Allocator) allocateService(ctx context.Context, nc *networkContext, s *
func (a *Allocator) allocateNetwork(ctx context.Context, nc *networkContext, n *api.Network) error {
if err := nc.nwkAllocator.Allocate(n); err != nil {
nc.unallocatedNetworks[n.ID] = n
return fmt.Errorf("failed during network allocation for network %s: %v", n.ID, err)
}
@ -666,6 +686,8 @@ func (a *Allocator) allocateTask(ctx context.Context, nc *networkContext, tx sto
if !nc.nwkAllocator.IsAllocated(n) {
return nil, fmt.Errorf("network %s attached to task %s not allocated yet", n.ID, t.ID)
}
na.Network = n
}
if err := nc.nwkAllocator.AllocateTask(t); err != nil {
@ -696,6 +718,32 @@ func (a *Allocator) allocateTask(ctx context.Context, nc *networkContext, tx sto
return storeT, nil
}
func (a *Allocator) procUnallocatedNetworks(ctx context.Context, nc *networkContext) {
for _, n := range nc.unallocatedNetworks {
if !nc.nwkAllocator.IsAllocated(n) {
if err := a.allocateNetwork(ctx, nc, n); err != nil {
log.G(ctx).Debugf("Failed allocation of unallocated network %s: %v", n.ID, err)
continue
}
}
delete(nc.unallocatedNetworks, n.ID)
}
}
func (a *Allocator) procUnallocatedServices(ctx context.Context, nc *networkContext) {
for _, s := range nc.unallocatedServices {
if serviceAllocationNeeded(s, nc) {
if err := a.allocateService(ctx, nc, s); err != nil {
log.G(ctx).Debugf("Failed allocation of unallocated service %s: %v", s.ID, err)
continue
}
}
delete(nc.unallocatedServices, s.ID)
}
}
func (a *Allocator) procUnallocatedTasksNetwork(ctx context.Context, nc *networkContext) {
tasks := make([]*api.Task, 0, len(nc.unallocatedTasks))

Просмотреть файл

@ -14,7 +14,10 @@ import (
)
const (
defaultDriver = "overlay"
// DefaultDriver defines the name of the driver to be used by
// default if a network without any driver name specified is
// created.
DefaultDriver = "overlay"
)
var (
@ -69,7 +72,7 @@ func New() (*NetworkAllocator, error) {
}
// Add the manager component of overlay driver to the registry.
if err := reg.AddDriver(defaultDriver, defaultDriverInitFunc, nil); err != nil {
if err := reg.AddDriver(DefaultDriver, defaultDriverInitFunc, nil); err != nil {
return nil, err
}
@ -96,6 +99,7 @@ func (na *NetworkAllocator) Allocate(n *api.Network) error {
}
if err := na.allocateDriverState(n); err != nil {
na.freePools(n, pools)
return fmt.Errorf("failed while allocating driver state for network %s: %v", n.ID, err)
}
@ -146,7 +150,9 @@ func (na *NetworkAllocator) ServiceAllocate(s *api.Service) (err error) {
}
if s.Endpoint == nil {
s.Endpoint = &api.Endpoint{}
s.Endpoint = &api.Endpoint{
Spec: s.Spec.Endpoint.Copy(),
}
}
// First allocate VIPs for all the pre-populated endpoint attachments
@ -520,7 +526,7 @@ func (na *NetworkAllocator) allocateDriverState(n *api.Network) error {
// Resolve network driver
func (na *NetworkAllocator) resolveDriver(n *api.Network) (driverapi.Driver, string, error) {
dName := defaultDriver
dName := DefaultDriver
if n.Spec.DriverConfig != nil && n.Spec.DriverConfig.Name != "" {
dName = n.Spec.DriverConfig.Name
}

Просмотреть файл

@ -3,8 +3,10 @@ package controlapi
import (
"net"
"github.com/docker/libnetwork/ipamapi"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/identity"
"github.com/docker/swarmkit/manager/allocator/networkallocator"
"github.com/docker/swarmkit/manager/state/store"
"golang.org/x/net/context"
"google.golang.org/grpc"
@ -57,6 +59,10 @@ func validateIPAM(ipam *api.IPAMOptions) error {
return err
}
if ipam.Driver != nil && ipam.Driver.Name != ipamapi.DefaultIPAM {
return grpc.Errorf(codes.InvalidArgument, "invalid IPAM specified")
}
for _, ipamConf := range ipam.Configs {
if err := validateIPAMConfiguration(ipamConf); err != nil {
return err
@ -79,6 +85,10 @@ func validateNetworkSpec(spec *api.NetworkSpec) error {
return err
}
if spec.DriverConfig != nil && spec.DriverConfig.Name != networkallocator.DefaultDriver {
return grpc.Errorf(codes.InvalidArgument, "invalid driver specified")
}
if err := validateIPAM(spec.IPAM); err != nil {
return err
}

Просмотреть файл

@ -8,6 +8,7 @@ import (
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/identity"
"github.com/docker/swarmkit/manager/state/store"
"github.com/docker/swarmkit/protobuf/ptypes"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
@ -15,6 +16,7 @@ import (
var (
errNetworkUpdateNotSupported = errors.New("changing network in service is not supported")
errModeChangeNotAllowed = errors.New("service mode change is not allowed")
)
func validateResources(r *api.Resources) error {
@ -45,21 +47,70 @@ func validateResourceRequirements(r *api.ResourceRequirements) error {
return nil
}
func validateServiceSpecTemplate(spec *api.ServiceSpec) error {
if err := validateResourceRequirements(spec.Task.Resources); err != nil {
func validateRestartPolicy(rp *api.RestartPolicy) error {
if rp == nil {
return nil
}
if rp.Delay != nil {
delay, err := ptypes.Duration(rp.Delay)
if err != nil {
return err
}
if delay < 0 {
return grpc.Errorf(codes.InvalidArgument, "TaskSpec: restart-delay cannot be negative")
}
}
if rp.Window != nil {
win, err := ptypes.Duration(rp.Window)
if err != nil {
return err
}
if win < 0 {
return grpc.Errorf(codes.InvalidArgument, "TaskSpec: restart-window cannot be negative")
}
}
return nil
}
func validateUpdate(uc *api.UpdateConfig) error {
if uc == nil {
return nil
}
delay, err := ptypes.Duration(&uc.Delay)
if err != nil {
return err
}
if spec.Task.GetRuntime() == nil {
if delay < 0 {
return grpc.Errorf(codes.InvalidArgument, "TaskSpec: update-delay cannot be negative")
}
return nil
}
func validateTask(taskSpec api.TaskSpec) error {
if err := validateResourceRequirements(taskSpec.Resources); err != nil {
return err
}
if err := validateRestartPolicy(taskSpec.Restart); err != nil {
return err
}
if taskSpec.GetRuntime() == nil {
return grpc.Errorf(codes.InvalidArgument, "TaskSpec: missing runtime")
}
_, ok := spec.Task.GetRuntime().(*api.TaskSpec_Container)
_, ok := taskSpec.GetRuntime().(*api.TaskSpec_Container)
if !ok {
return grpc.Errorf(codes.Unimplemented, "RuntimeSpec: unimplemented runtime in service spec")
}
container := spec.Task.GetContainer()
container := taskSpec.GetContainer()
if container == nil {
return grpc.Errorf(codes.InvalidArgument, "ContainerSpec: missing in service spec")
}
@ -99,7 +150,13 @@ func validateServiceSpec(spec *api.ServiceSpec) error {
if err := validateAnnotations(spec.Annotations); err != nil {
return err
}
if err := validateServiceSpecTemplate(spec); err != nil {
if err := validateTask(spec.Task); err != nil {
return err
}
if err := validateUpdate(spec.Update); err != nil {
return err
}
if err := validateEndpointSpec(spec.Endpoint); err != nil {
return err
}
return nil
@ -179,6 +236,12 @@ func (s *Server) UpdateService(ctx context.Context, request *api.UpdateServiceRe
return errNetworkUpdateNotSupported
}
// orchestrator is designed to be stateless, so it should not deal
// with service mode change (comparing current config with previous config).
// proper way to change service mode is to delete and re-add.
if request.Spec != nil && reflect.TypeOf(service.Spec.Mode) != reflect.TypeOf(request.Spec.Mode) {
return errModeChangeNotAllowed
}
service.Meta.Version = *request.ServiceVersion
service.Spec = *request.Spec.Copy()
return store.UpdateService(tx, service)

Просмотреть файл

@ -29,6 +29,7 @@ const (
DefaultHeartBeatPeriod = 5 * time.Second
defaultHeartBeatEpsilon = 500 * time.Millisecond
defaultGracePeriodMultiplier = 3
defaultRateLimitPeriod = 16 * time.Second
// maxBatchItems is the threshold of queued writes that should
// trigger an actual transaction to commit them to the shared store.
@ -59,9 +60,12 @@ var (
// DefautConfig.
type Config struct {
// Addr configures the address the dispatcher reports to agents.
Addr string
HeartbeatPeriod time.Duration
HeartbeatEpsilon time.Duration
Addr string
HeartbeatPeriod time.Duration
HeartbeatEpsilon time.Duration
// RateLimitPeriod specifies how often node with same ID can try to register
// new session.
RateLimitPeriod time.Duration
GracePeriodMultiplier int
}
@ -70,6 +74,7 @@ func DefaultConfig() *Config {
return &Config{
HeartbeatPeriod: DefaultHeartBeatPeriod,
HeartbeatEpsilon: defaultHeartBeatEpsilon,
RateLimitPeriod: defaultRateLimitPeriod,
GracePeriodMultiplier: defaultGracePeriodMultiplier,
}
}
@ -116,12 +121,11 @@ func (b weightedPeerByNodeID) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func New(cluster Cluster, c *Config) *Dispatcher {
return &Dispatcher{
addr: c.Addr,
nodes: newNodeStore(c.HeartbeatPeriod, c.HeartbeatEpsilon, c.GracePeriodMultiplier),
nodes: newNodeStore(c.HeartbeatPeriod, c.HeartbeatEpsilon, c.GracePeriodMultiplier, c.RateLimitPeriod),
store: cluster.MemoryStore(),
cluster: cluster,
mgrQueue: watch.NewQueue(16),
keyMgrQueue: watch.NewQueue(16),
lastSeenManagers: getWeightedPeers(cluster),
taskUpdates: make(map[string]*api.TaskStatus),
processTaskUpdatesTrigger: make(chan struct{}, 1),
config: c,
@ -149,12 +153,12 @@ func (d *Dispatcher) Run(ctx context.Context) error {
d.mu.Lock()
if d.isRunning() {
d.mu.Unlock()
return fmt.Errorf("dispatcher is stopped")
return fmt.Errorf("dispatcher is already running")
}
logger := log.G(ctx).WithField("module", "dispatcher")
ctx = log.WithLogger(ctx, logger)
if err := d.markNodesUnknown(ctx); err != nil {
logger.Errorf("failed to mark all nodes unknown: %v", err)
logger.Errorf(`failed to move all nodes to "unknown" state: %v`, err)
}
configWatcher, cancel, err := store.ViewAndWatch(
d.store,
@ -177,6 +181,7 @@ func (d *Dispatcher) Run(ctx context.Context) error {
state.EventUpdateCluster{},
)
if err != nil {
d.mu.Unlock()
return err
}
defer cancel()
@ -238,6 +243,7 @@ func (d *Dispatcher) Run(ctx context.Context) error {
func (d *Dispatcher) Stop() error {
d.mu.Lock()
if !d.isRunning() {
d.mu.Unlock()
return fmt.Errorf("dispatcher is already stopped")
}
d.cancel()
@ -280,20 +286,20 @@ func (d *Dispatcher) markNodesUnknown(ctx context.Context) error {
}
node.Status = api.NodeStatus{
State: api.NodeStatus_UNKNOWN,
Message: "Node marked as unknown due to leadership change in cluster",
Message: `Node moved to "unknown" state due to leadership change in cluster`,
}
nodeID := node.ID
expireFunc := func() {
log := log.WithField("node", nodeID)
nodeStatus := api.NodeStatus{State: api.NodeStatus_DOWN, Message: "heartbeat failure for unknown node"}
nodeStatus := api.NodeStatus{State: api.NodeStatus_DOWN, Message: `heartbeat failure for node in "unknown" state`}
log.Debugf("heartbeat expiration for unknown node")
if err := d.nodeRemove(nodeID, nodeStatus); err != nil {
log.WithError(err).Errorf("failed deregistering node after heartbeat expiration for unknown node")
log.WithError(err).Errorf(`failed deregistering node after heartbeat expiration for node in "unknown" state`)
}
}
if err := d.nodes.AddUnknown(node, expireFunc); err != nil {
return fmt.Errorf("add unknown node failed: %v", err)
return fmt.Errorf(`adding node in "unknown" state to node store failed: %v`, err)
}
if err := store.UpdateNode(tx, node); err != nil {
return fmt.Errorf("update failed %v", err)
@ -301,7 +307,7 @@ func (d *Dispatcher) markNodesUnknown(ctx context.Context) error {
return nil
})
if err != nil {
log.WithField("node", n.ID).WithError(err).Errorf("failed to mark node as unknown")
log.WithField("node", n.ID).WithError(err).Errorf(`failed to move node to "unknown" state`)
}
}
return nil
@ -328,6 +334,10 @@ func (d *Dispatcher) register(ctx context.Context, nodeID string, description *a
return "", "", err
}
if err := d.nodes.CheckRateLimit(nodeID); err != nil {
return "", "", err
}
// create or update node in store
// TODO(stevvooe): Validate node specification.
var node *api.Node

Просмотреть файл

@ -15,6 +15,7 @@ import (
type registeredNode struct {
SessionID string
Heartbeat *heartbeat.Heartbeat
Registered time.Time
Node *api.Node
Disconnect chan struct{} // signal to disconnect
mu sync.Mutex
@ -41,15 +42,17 @@ func (rn *registeredNode) checkSessionID(sessionID string) error {
type nodeStore struct {
periodChooser *periodChooser
gracePeriodMultiplier time.Duration
rateLimitPeriod time.Duration
nodes map[string]*registeredNode
mu sync.RWMutex
}
func newNodeStore(hbPeriod, hbEpsilon time.Duration, graceMultiplier int) *nodeStore {
func newNodeStore(hbPeriod, hbEpsilon time.Duration, graceMultiplier int, rateLimitPeriod time.Duration) *nodeStore {
return &nodeStore{
nodes: make(map[string]*registeredNode),
periodChooser: newPeriodChooser(hbPeriod, hbEpsilon),
gracePeriodMultiplier: time.Duration(graceMultiplier),
rateLimitPeriod: rateLimitPeriod,
}
}
@ -77,6 +80,19 @@ func (s *nodeStore) AddUnknown(n *api.Node, expireFunc func()) error {
return nil
}
// CheckRateLimit returs error if node with specified id is allowed to re-register
// again.
func (s *nodeStore) CheckRateLimit(id string) error {
s.mu.Lock()
defer s.mu.Unlock()
if existRn, ok := s.nodes[id]; ok {
if time.Since(existRn.Registered) < s.rateLimitPeriod {
return grpc.Errorf(codes.Unavailable, "node %s attempted registration too recently", id)
}
}
return nil
}
// Add adds new node and returns it, it replaces existing without notification.
func (s *nodeStore) Add(n *api.Node, expireFunc func()) *registeredNode {
s.mu.Lock()
@ -88,6 +104,7 @@ func (s *nodeStore) Add(n *api.Node, expireFunc func()) *registeredNode {
rn := &registeredNode{
SessionID: identity.NewID(), // session ID is local to the dispatcher.
Node: n,
Registered: time.Now(),
Disconnect: make(chan struct{}),
}
s.nodes[n.ID] = rn

58
vendor/src/github.com/docker/swarmkit/manager/health/health.go поставляемый Normal file
Просмотреть файл

@ -0,0 +1,58 @@
// Package health provides some utility functions to health-check a server. The implementation
// is based on protobuf. Users need to write their own implementations if other IDLs are used.
//
// See original source: https://github.com/grpc/grpc-go/blob/master/health/health.go
//
// We use our own implementation of grpc server health check to include the authorization
// wrapper necessary for the Managers.
package health
import (
"sync"
"github.com/docker/swarmkit/api"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// Server represents a Health Check server to check
// if a service is running or not on some host.
type Server struct {
mu sync.Mutex
// statusMap stores the serving status of the services this HealthServer monitors.
statusMap map[string]api.HealthCheckResponse_ServingStatus
}
// NewHealthServer creates a new health check server for grpc services.
func NewHealthServer() *Server {
return &Server{
statusMap: make(map[string]api.HealthCheckResponse_ServingStatus),
}
}
// Check checks if the grpc server is healthy and running.
func (s *Server) Check(ctx context.Context, in *api.HealthCheckRequest) (*api.HealthCheckResponse, error) {
s.mu.Lock()
defer s.mu.Unlock()
if in.Service == "" {
// check the server overall health status.
return &api.HealthCheckResponse{
Status: api.HealthCheckResponse_SERVING,
}, nil
}
if status, ok := s.statusMap[in.Service]; ok {
return &api.HealthCheckResponse{
Status: status,
}, nil
}
return nil, grpc.Errorf(codes.NotFound, "unknown service")
}
// SetServingStatus is called when need to reset the serving status of a service
// or insert a new service entry into the statusMap.
func (s *Server) SetServingStatus(service string, status api.HealthCheckResponse_ServingStatus) {
s.mu.Lock()
s.statusMap[service] = status
s.mu.Unlock()
}

Просмотреть файл

@ -32,6 +32,8 @@ const (
// DefaultSubsystem is gossip
DefaultSubsystem = SubsystemGossip
// number of keys to mainrain in the key ring.
keyringSize = 3
)
// map of subsystems and corresponding encryption algorithm. Initially only
@ -59,7 +61,6 @@ type KeyManager struct {
config *Config
store *store.MemoryStore
keyRing *keyRing
ticker *time.Ticker
ctx context.Context
cancel context.CancelFunc
@ -72,7 +73,7 @@ func DefaultConfig() *Config {
ClusterName: store.DefaultClusterName,
Keylen: DefaultKeyLen,
RotationInterval: DefaultKeyRotationInterval,
Subsystems: []string{DefaultSubsystem},
Subsystems: []string{SubsystemGossip, SubsystemIPSec},
}
}
@ -148,7 +149,7 @@ func (k *KeyManager) rotateKey(ctx context.Context) error {
// We maintain the latest key and the one before in the key ring to allow
// agents to communicate without disruption on key change.
for subsys, keys := range subsysKeys {
if len(keys) > 1 {
if len(keys) == keyringSize {
min := 0
for i, key := range keys[1:] {
if key.LamportTime < keys[min].LamportTime {
@ -189,7 +190,9 @@ func (k *KeyManager) Run(ctx context.Context) error {
cluster := clusters[0]
if len(cluster.NetworkBootstrapKeys) == 0 {
for _, subsys := range k.config.Subsystems {
k.keyRing.keys = append(k.keyRing.keys, k.allocateKey(ctx, subsys))
for i := 0; i < keyringSize; i++ {
k.keyRing.keys = append(k.keyRing.keys, k.allocateKey(ctx, subsys))
}
}
if err := k.updateKey(cluster); err != nil {
log.Errorf("store update failed %v", err)

Просмотреть файл

@ -19,6 +19,7 @@ import (
"github.com/docker/swarmkit/manager/allocator"
"github.com/docker/swarmkit/manager/controlapi"
"github.com/docker/swarmkit/manager/dispatcher"
"github.com/docker/swarmkit/manager/health"
"github.com/docker/swarmkit/manager/keymanager"
"github.com/docker/swarmkit/manager/orchestrator"
"github.com/docker/swarmkit/manager/raftpicker"
@ -39,6 +40,10 @@ const (
type Config struct {
SecurityConfig *ca.SecurityConfig
// ExternalCAs is a list of initial CAs to which a manager node
// will make certificate signing requests for node certificates.
ExternalCAs []*api.ExternalCA
ProtoAddr map[string]string
// ProtoListener will be used for grpc serving if it's not nil,
// ProtoAddr fields will be used to create listeners otherwise.
@ -83,8 +88,7 @@ type Manager struct {
localserver *grpc.Server
RaftNode *raft.Node
mu sync.Mutex
once sync.Once
mu sync.Mutex
stopped chan struct{}
}
@ -202,13 +206,7 @@ func New(config *Config) (*Manager, error) {
ForceNewCluster: config.ForceNewCluster,
TLSCredentials: config.SecurityConfig.ClientTLSCreds,
}
RaftNode, err := raft.NewNode(context.TODO(), newNodeOpts)
if err != nil {
for _, lis := range listeners {
lis.Close()
}
return nil, fmt.Errorf("can't create raft node: %v", err)
}
RaftNode := raft.NewNode(context.TODO(), newNodeOpts)
opts := []grpc.ServerOption{
grpc.Creds(config.SecurityConfig.ServerTLSCreds)}
@ -275,6 +273,10 @@ func (m *Manager) Run(parent context.Context) error {
raftCfg.HeartbeatTick = uint32(m.RaftNode.Config.HeartbeatTick)
clusterID := m.config.SecurityConfig.ClientTLSCreds.Organization()
initialCAConfig := ca.DefaultCAConfig()
initialCAConfig.ExternalCAs = m.config.ExternalCAs
s.Update(func(tx store.Tx) error {
// Add a default cluster object to the
// store. Don't check the error because
@ -294,7 +296,7 @@ func (m *Manager) Run(parent context.Context) error {
HeartbeatPeriod: ptypes.DurationProto(dispatcher.DefaultHeartBeatPeriod),
},
Raft: raftCfg,
CAConfig: ca.DefaultCAConfig(),
CAConfig: initialCAConfig,
},
RootCA: api.RootCA{
CAKey: rootCA.Key,
@ -327,7 +329,7 @@ func (m *Manager) Run(parent context.Context) error {
log.G(ctx).WithError(err).Error("root key-encrypting-key rotation failed")
}
m.replicatedOrchestrator = orchestrator.New(s)
m.replicatedOrchestrator = orchestrator.NewReplicatedOrchestrator(s)
m.globalOrchestrator = orchestrator.NewGlobalOrchestrator(s)
m.taskReaper = orchestrator.NewTaskReaper(s)
m.scheduler = scheduler.New(s)
@ -421,14 +423,6 @@ func (m *Manager) Run(parent context.Context) error {
}
}()
go func() {
err := m.RaftNode.Run(ctx)
if err != nil {
log.G(ctx).Error(err)
m.Stop(ctx)
}
}()
proxyOpts := []grpc.DialOption{
grpc.WithBackoffMaxDelay(2 * time.Second),
grpc.WithTransportCredentials(m.config.SecurityConfig.ClientTLSCreds),
@ -443,12 +437,14 @@ func (m *Manager) Run(parent context.Context) error {
}
baseControlAPI := controlapi.NewServer(m.RaftNode.MemoryStore(), m.RaftNode)
healthServer := health.NewHealthServer()
authenticatedControlAPI := api.NewAuthenticatedWrapperControlServer(baseControlAPI, authorize)
authenticatedDispatcherAPI := api.NewAuthenticatedWrapperDispatcherServer(m.Dispatcher, authorize)
authenticatedCAAPI := api.NewAuthenticatedWrapperCAServer(m.caserver, authorize)
authenticatedNodeCAAPI := api.NewAuthenticatedWrapperNodeCAServer(m.caserver, authorize)
authenticatedRaftAPI := api.NewAuthenticatedWrapperRaftServer(m.RaftNode, authorize)
authenticatedHealthAPI := api.NewAuthenticatedWrapperHealthServer(healthServer, authorize)
authenticatedRaftMembershipAPI := api.NewAuthenticatedWrapperRaftMembershipServer(m.RaftNode, authorize)
proxyDispatcherAPI := api.NewRaftProxyDispatcherServer(authenticatedDispatcherAPI, cs, m.RaftNode, ca.WithMetadataForwardTLSInfo)
@ -470,6 +466,7 @@ func (m *Manager) Run(parent context.Context) error {
api.RegisterCAServer(m.server, proxyCAAPI)
api.RegisterNodeCAServer(m.server, proxyNodeCAAPI)
api.RegisterRaftServer(m.server, authenticatedRaftAPI)
api.RegisterHealthServer(m.server, authenticatedHealthAPI)
api.RegisterRaftMembershipServer(m.server, proxyRaftMembershipAPI)
api.RegisterControlServer(m.localserver, localProxyControlAPI)
api.RegisterControlServer(m.server, authenticatedControlAPI)
@ -492,6 +489,24 @@ func (m *Manager) Run(parent context.Context) error {
}(proto, l)
}
// Set the raft server as serving for the health server
healthServer.SetServingStatus("Raft", api.HealthCheckResponse_SERVING)
if err := m.RaftNode.JoinAndStart(); err != nil {
for _, lis := range m.listeners {
lis.Close()
}
return fmt.Errorf("can't initialize raft node: %v", err)
}
go func() {
err := m.RaftNode.Run(ctx)
if err != nil {
log.G(ctx).Error(err)
m.Stop(ctx)
}
}()
if err := raft.WaitForLeader(ctx, m.RaftNode); err != nil {
m.server.Stop()
return err

Просмотреть файл

@ -255,7 +255,7 @@ func (g *GlobalOrchestrator) reconcileOneNode(ctx context.Context, node *api.Nod
return
}
// typically there are only a few global services on a node
// iterate thru all of them one by one. If raft store visits become a concern,
// iterate through all of them one by one. If raft store visits become a concern,
// it can be optimized.
for _, service := range g.globalServices {
g.reconcileServiceOneNode(ctx, service.ID, node.ID)

Просмотреть файл

@ -29,8 +29,8 @@ type ReplicatedOrchestrator struct {
restarts *RestartSupervisor
}
// New creates a new ReplicatedOrchestrator.
func New(store *store.MemoryStore) *ReplicatedOrchestrator {
// NewReplicatedOrchestrator creates a new ReplicatedOrchestrator.
func NewReplicatedOrchestrator(store *store.MemoryStore) *ReplicatedOrchestrator {
restartSupervisor := NewRestartSupervisor(store)
updater := NewUpdateSupervisor(store, restartSupervisor)
return &ReplicatedOrchestrator{
@ -114,6 +114,9 @@ func newTask(service *api.Service, instance uint64) *api.Task {
Timestamp: ptypes.MustTimestampProto(time.Now()),
Message: "created",
},
Endpoint: &api.Endpoint{
Spec: service.Spec.Endpoint.Copy(),
},
DesiredState: api.TaskStateRunning,
}
}

Просмотреть файл

@ -1,6 +1,8 @@
package orchestrator
import (
"sort"
"github.com/docker/go-events"
"github.com/docker/swarmkit/api"
"github.com/docker/swarmkit/log"
@ -68,6 +70,27 @@ func (r *ReplicatedOrchestrator) resolveService(ctx context.Context, task *api.T
return service
}
type taskWithIndex struct {
task *api.Task
// index is a counter that counts this task as the nth instance of
// the service on its node. This is used for sorting the tasks so that
// when scaling down we leave tasks more evenly balanced.
index int
}
type tasksByIndex []taskWithIndex
func (ts tasksByIndex) Len() int { return len(ts) }
func (ts tasksByIndex) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] }
func (ts tasksByIndex) Less(i, j int) bool {
if ts[i].index < 0 {
return false
}
return ts[i].index < ts[j].index
}
func (r *ReplicatedOrchestrator) reconcile(ctx context.Context, service *api.Service) {
var (
tasks []*api.Task
@ -97,8 +120,6 @@ func (r *ReplicatedOrchestrator) reconcile(ctx context.Context, service *api.Ser
deploy := service.Spec.GetMode().(*api.ServiceSpec_Replicated)
specifiedInstances := int(deploy.Replicated.Replicas)
// TODO(aaronl): Add support for restart delays.
switch {
case specifiedInstances > numTasks:
log.G(ctx).Debugf("Service %s was scaled up from %d to %d instances", service.ID, numTasks, specifiedInstances)
@ -115,9 +136,35 @@ func (r *ReplicatedOrchestrator) reconcile(ctx context.Context, service *api.Ser
case specifiedInstances < numTasks:
// Update up to N tasks then remove the extra
log.G(ctx).Debugf("Service %s was scaled down from %d to %d instances", service.ID, numTasks, specifiedInstances)
r.updater.Update(ctx, service, runningTasks[:specifiedInstances])
// Preferentially remove tasks on the nodes that have the most
// copies of this service, to leave a more balanced result.
// Assign each task an index that counts it as the nth copy of
// of the service on its node (1, 2, 3, ...), and sort the
// tasks by this counter value.
instancesByNode := make(map[string]int)
tasksWithIndices := make(tasksByIndex, 0, numTasks)
for _, t := range runningTasks {
if t.NodeID != "" {
instancesByNode[t.NodeID]++
tasksWithIndices = append(tasksWithIndices, taskWithIndex{task: t, index: instancesByNode[t.NodeID]})
} else {
tasksWithIndices = append(tasksWithIndices, taskWithIndex{task: t, index: -1})
}
}
sort.Sort(tasksWithIndices)
sortedTasks := make([]*api.Task, 0, numTasks)
for _, t := range tasksWithIndices {
sortedTasks = append(sortedTasks, t.task)
}
r.updater.Update(ctx, service, sortedTasks[:specifiedInstances])
_, err = r.store.Batch(func(batch *store.Batch) error {
r.removeTasks(ctx, batch, service, runningTasks[specifiedInstances:])
r.removeTasks(ctx, batch, service, sortedTasks[specifiedInstances:])
return nil
})
if err != nil {

Просмотреть файл

@ -104,7 +104,8 @@ func (u *Updater) Run(ctx context.Context, service *api.Service, tasks []*api.Ta
dirtyTasks := []*api.Task{}
for _, t := range tasks {
if !reflect.DeepEqual(service.Spec.Task, t.Spec) ||
!reflect.DeepEqual(service.Endpoint, t.Endpoint) {
(t.Endpoint != nil &&
!reflect.DeepEqual(service.Spec.Endpoint, t.Endpoint.Spec)) {
dirtyTasks = append(dirtyTasks, t)
}
}
@ -191,6 +192,9 @@ func (u *Updater) updateTask(ctx context.Context, service *api.Service, original
if t == nil {
return fmt.Errorf("task %s not found while trying to update it", original.ID)
}
if t.DesiredState > api.TaskStateRunning {
return fmt.Errorf("task %s was already shut down when reached by updater", original.ID)
}
t.DesiredState = api.TaskStateShutdown
if err := store.UpdateTask(tx, t); err != nil {
return err

Просмотреть файл

@ -48,13 +48,6 @@ func (nh *nodeHeap) alloc(n int) {
nh.index = make(map[string]int, n)
}
func (nh *nodeHeap) peek() *NodeInfo {
if len(nh.heap) == 0 {
return nil
}
return &nh.heap[0]
}
// nodeInfo returns the NodeInfo struct for a given node identified by its ID.
func (nh *nodeHeap) nodeInfo(nodeID string) NodeInfo {
index, ok := nh.index[nodeID]
@ -95,9 +88,7 @@ func (nh *nodeHeap) updateNode(n NodeInfo) {
func (nh *nodeHeap) remove(nodeID string) {
index, ok := nh.index[nodeID]
if ok {
nh.heap[index].Tasks = nil
heap.Fix(nh, index)
heap.Pop(nh)
heap.Remove(nh, index)
}
}

Просмотреть файл

@ -27,8 +27,6 @@ var (
// Cluster represents a set of active
// raft Members
type Cluster struct {
id uint64
mu sync.RWMutex
members map[uint64]*Member
@ -103,17 +101,15 @@ func (c *Cluster) RemoveMember(id uint64) error {
c.mu.Lock()
defer c.mu.Unlock()
if c.members[id] == nil {
return ErrIDNotFound
}
conn := c.members[id].Conn
if conn != nil {
_ = conn.Close()
if c.members[id] != nil {
conn := c.members[id].Conn
if conn != nil {
_ = conn.Close()
}
delete(c.members, id)
}
c.removed[id] = true
delete(c.members, id)
return nil
}

Просмотреть файл

@ -31,6 +31,11 @@ import (
)
var (
// ErrHealthCheckFailure is returned when there is an issue with the initial handshake which means
// that the address provided must be invalid or there is ongoing connectivity issues at join time.
ErrHealthCheckFailure = errors.New("raft: could not connect to prospective new cluster member using its advertised address")
// ErrNoRaftMember is thrown when the node is not yet part of a raft cluster
ErrNoRaftMember = errors.New("raft: node is not yet part of a raft cluster")
// ErrConfChangeRefused is returned when there is an issue with the configuration change
ErrConfChangeRefused = errors.New("raft: propose configuration change refused")
// ErrApplyNotSpecified is returned during the creation of a raft node when no apply method was provided
@ -83,12 +88,13 @@ type Node struct {
raftStore *raft.MemoryStorage
memoryStore *store.MemoryStore
Config *raft.Config
opts NewNodeOptions
reqIDGen *idutil.Generator
wait *wait
wal *wal.WAL
snapshotter *snap.Snapshotter
wasLeader bool
removed uint32
isMember uint32
joinAddr string
// waitProp waits for all the proposals to be terminated before
@ -103,14 +109,15 @@ type Node struct {
appliedIndex uint64
snapshotIndex uint64
ticker clock.Ticker
sendTimeout time.Duration
stopCh chan struct{}
doneCh chan struct{}
ticker clock.Ticker
sendTimeout time.Duration
stopCh chan struct{}
doneCh chan struct{}
// removeRaftCh notifies about node deletion from raft cluster
removeRaftCh chan struct{}
removeRaftOnce sync.Once
leadershipBroadcast *events.Broadcaster
startNodePeers []raft.Peer
// used to coordinate shutdown
stopMu sync.RWMutex
// used for membership management checks
@ -153,7 +160,7 @@ func init() {
}
// NewNode generates a new Raft node
func NewNode(ctx context.Context, opts NewNodeOptions) (*Node, error) {
func NewNode(ctx context.Context, opts NewNodeOptions) *Node {
cfg := opts.Config
if cfg == nil {
cfg = DefaultNodeConfig()
@ -173,6 +180,7 @@ func NewNode(ctx context.Context, opts NewNodeOptions) (*Node, error) {
tlsCredentials: opts.TLSCredentials,
raftStore: raftStore,
Address: opts.Addr,
opts: opts,
Config: &raft.Config{
ElectionTick: cfg.ElectionTick,
HeartbeatTick: cfg.HeartbeatTick,
@ -184,6 +192,7 @@ func NewNode(ctx context.Context, opts NewNodeOptions) (*Node, error) {
forceNewCluster: opts.ForceNewCluster,
stopCh: make(chan struct{}),
doneCh: make(chan struct{}),
removeRaftCh: make(chan struct{}),
StateDir: opts.StateDir,
joinAddr: opts.JoinAddr,
sendTimeout: 2 * time.Second,
@ -200,13 +209,21 @@ func NewNode(ctx context.Context, opts NewNodeOptions) (*Node, error) {
n.sendTimeout = opts.SendTimeout
}
loadAndStartErr := n.loadAndStart(ctx, opts.ForceNewCluster)
n.reqIDGen = idutil.NewGenerator(uint16(n.Config.ID), time.Now())
n.wait = newWait()
return n
}
// JoinAndStart joins and starts the raft server
func (n *Node) JoinAndStart() error {
loadAndStartErr := n.loadAndStart(n.Ctx, n.opts.ForceNewCluster)
if loadAndStartErr != nil && loadAndStartErr != errNoWAL {
n.ticker.Stop()
return nil, loadAndStartErr
return loadAndStartErr
}
snapshot, err := raftStore.Snapshot()
snapshot, err := n.raftStore.Snapshot()
// Snapshot never returns an error
if err != nil {
panic("could not get snapshot of raft store")
@ -215,14 +232,12 @@ func NewNode(ctx context.Context, opts NewNodeOptions) (*Node, error) {
n.confState = snapshot.Metadata.ConfState
n.appliedIndex = snapshot.Metadata.Index
n.snapshotIndex = snapshot.Metadata.Index
n.reqIDGen = idutil.NewGenerator(uint16(n.Config.ID), time.Now())
n.wait = newWait()
if loadAndStartErr == errNoWAL {
if n.joinAddr != "" {
c, err := n.ConnectToMember(n.joinAddr, 10*time.Second)
if err != nil {
return nil, err
return err
}
client := api.NewRaftMembershipClient(c.Conn)
defer func() {
@ -235,40 +250,42 @@ func NewNode(ctx context.Context, opts NewNodeOptions) (*Node, error) {
Addr: n.Address,
})
if err != nil {
return nil, err
return err
}
n.Config.ID = resp.RaftID
if _, err := n.createWAL(opts.ID); err != nil {
return nil, err
if _, err := n.createWAL(n.opts.ID); err != nil {
return err
}
n.Node = raft.StartNode(n.Config, []raft.Peer{})
if err := n.registerNodes(resp.Members); err != nil {
return nil, err
return err
}
} else {
// First member in the cluster, self-assign ID
n.Config.ID = uint64(rand.Int63()) + 1
peer, err := n.createWAL(opts.ID)
peer, err := n.createWAL(n.opts.ID)
if err != nil {
return nil, err
return err
}
n.Node = raft.StartNode(n.Config, []raft.Peer{peer})
if err := n.Campaign(n.Ctx); err != nil {
return nil, err
return err
}
}
return n, nil
atomic.StoreUint32(&n.isMember, 1)
return nil
}
if n.joinAddr != "" {
n.Config.Logger.Warning("ignoring request to join cluster, because raft state already exists")
}
n.Node = raft.RestartNode(n.Config)
return n, nil
atomic.StoreUint32(&n.isMember, 1)
return nil
}
// DefaultNodeConfig returns the default config for a
@ -377,21 +394,6 @@ func (n *Node) Run(ctx context.Context) error {
}
}
// If the node was removed from other members,
// send back an error to the caller to start
// the shutdown process.
if n.mustStop() {
n.stop()
// Move WAL and snapshot out of the way, since
// they are no longer usable.
if err := n.moveWALAndSnap(); err != nil {
n.Config.Logger.Error(err)
}
return ErrMemberRemoved
}
// Advance the state machine
n.Advance()
@ -400,6 +402,19 @@ func (n *Node) Run(ctx context.Context) error {
n.snapshotIndex = snapshotIndex
}
n.snapshotInProgress = nil
case <-n.removeRaftCh:
// If the node was removed from other members,
// send back an error to the caller to start
// the shutdown process.
n.stop()
// Move WAL and snapshot out of the way, since
// they are no longer usable.
if err := n.moveWALAndSnap(); err != nil {
n.Config.Logger.Error(err)
}
return ErrMemberRemoved
case <-n.stopCh:
n.stop()
return nil
@ -434,6 +449,7 @@ func (n *Node) stop() {
}
}
n.Stop()
n.ticker.Stop()
if err := n.wal.Close(); err != nil {
n.Config.Logger.Errorf("raft: error closing WAL: %v", err)
}
@ -442,6 +458,10 @@ func (n *Node) stop() {
// IsLeader checks if we are the leader or not
func (n *Node) IsLeader() bool {
if !n.IsMember() {
return false
}
if n.Node.Status().Lead == n.Config.ID {
return true
}
@ -450,6 +470,9 @@ func (n *Node) IsLeader() bool {
// Leader returns the id of the leader
func (n *Node) Leader() uint64 {
if !n.IsMember() {
return 0
}
return n.Node.Status().Lead
}
@ -479,7 +502,11 @@ func (n *Node) Join(ctx context.Context, req *api.JoinRequest) (*api.JoinRespons
n.membershipLock.Lock()
defer n.membershipLock.Unlock()
if n.Node == nil {
if !n.IsMember() {
return nil, ErrNoRaftMember
}
if n.IsStopped() {
log.WithError(ErrStopped).Errorf(ErrStopped.Error())
return nil, ErrStopped
}
@ -497,6 +524,12 @@ func (n *Node) Join(ctx context.Context, req *api.JoinRequest) (*api.JoinRespons
}
}
// We do not bother submitting a configuration change for the
// new member if we can't contact it back using its address
if err := n.checkHealth(ctx, req.Addr, 5*time.Second); err != nil {
return nil, err
}
err = n.addMember(ctx, req.Addr, raftID, nodeInfo.NodeID)
if err != nil {
log.WithError(err).Errorf("failed to add member")
@ -516,6 +549,28 @@ func (n *Node) Join(ctx context.Context, req *api.JoinRequest) (*api.JoinRespons
return &api.JoinResponse{Members: nodes, RaftID: raftID}, nil
}
// checkHealth tries to contact an aspiring member through its advertised address
// and checks if its raft server is running.
func (n *Node) checkHealth(ctx context.Context, addr string, timeout time.Duration) error {
conn, err := dial(addr, "tcp", n.tlsCredentials, timeout)
if err != nil {
return err
}
client := api.NewHealthClient(conn)
defer conn.Close()
resp, err := client.Check(ctx, &api.HealthCheckRequest{Service: "Raft"})
if err != nil {
return ErrHealthCheckFailure
}
if resp != nil && resp.Status != api.HealthCheckResponse_SERVING {
return ErrHealthCheckFailure
}
return nil
}
// addMember submits a configuration change to add a new member on the raft cluster.
func (n *Node) addMember(ctx context.Context, addr string, raftID uint64, nodeID string) error {
node := api.RaftMember{
@ -563,7 +618,11 @@ func (n *Node) Leave(ctx context.Context, req *api.LeaveRequest) (*api.LeaveResp
n.stopMu.RLock()
defer n.stopMu.RUnlock()
if n.Node == nil {
if !n.IsMember() {
return nil, ErrNoRaftMember
}
if n.IsStopped() {
return nil, ErrStopped
}
@ -612,7 +671,12 @@ func (n *Node) ProcessRaftMessage(ctx context.Context, msg *api.ProcessRaftMessa
// can't stop the raft node while an async RPC is in progress
n.stopMu.RLock()
defer n.stopMu.RUnlock()
if n.Node == nil {
if !n.IsMember() {
return nil, ErrNoRaftMember
}
if n.IsStopped() {
return nil, ErrStopped
}
@ -625,6 +689,10 @@ func (n *Node) ProcessRaftMessage(ctx context.Context, msg *api.ProcessRaftMessa
// ResolveAddress returns the address reaching for a given node ID.
func (n *Node) ResolveAddress(ctx context.Context, msg *api.ResolveAddressRequest) (*api.ResolveAddressResponse, error) {
if !n.IsMember() {
return nil, ErrNoRaftMember
}
nodeInfo, err := ca.RemoteNode(ctx)
if err != nil {
return nil, err
@ -656,7 +724,7 @@ func (n *Node) LeaderAddr() (string, error) {
if err := WaitForLeader(ctx, n); err != nil {
return "", ErrNoClusterLeader
}
if n.Node == nil {
if n.IsStopped() {
return "", ErrStopped
}
ms := n.cluster.Members()
@ -671,7 +739,7 @@ func (n *Node) LeaderAddr() (string, error) {
func (n *Node) registerNode(node *api.RaftMember) error {
member := &membership.Member{}
if n.cluster.GetMember(node.RaftID) != nil {
if n.cluster.GetMember(node.RaftID) != nil || n.cluster.IsIDRemoved(node.RaftID) {
// member already exists
return nil
}
@ -760,11 +828,18 @@ func (n *Node) GetMemberlist() map[uint64]*api.RaftMember {
return memberlist
}
// mustStop checks if the raft node must be stopped
// because it was removed from the cluster from
// other members
func (n *Node) mustStop() bool {
return atomic.LoadUint32(&n.removed) == 1
// IsMember checks if the raft node has effectively joined
// a cluster of existing members.
func (n *Node) IsMember() bool {
return atomic.LoadUint32(&n.isMember) == 1
}
// IsStopped checks if the raft node is stopped or not
func (n *Node) IsStopped() bool {
if n.Node == nil {
return true
}
return false
}
// canSubmitProposal defines if any more proposals
@ -882,12 +957,14 @@ func (n *Node) sendToMember(members map[uint64]*membership.Member, m raftpb.Mess
_, err := conn.ProcessRaftMessage(ctx, &api.ProcessRaftMessageRequest{Message: &m})
if err != nil {
if grpc.ErrorDesc(err) == ErrMemberRemoved.Error() {
atomic.StoreUint32(&n.removed, 1)
n.removeRaftOnce.Do(func() {
close(n.removeRaftCh)
})
}
if m.Type == raftpb.MsgSnap {
n.ReportSnapshot(m.To, raft.SnapshotFailure)
}
if n.Node == nil {
if n.IsStopped() {
panic("node is nil")
}
n.ReportUnreachable(m.To)

Просмотреть файл

@ -162,6 +162,22 @@ func (n *Node) readWAL(ctx context.Context, snapshot *raftpb.Snapshot, forceNewC
}
n.Config.ID = raftNode.RaftID
// All members that are no longer part of the cluster must be added to
// the removed list right away, so that we don't try to connect to them
// before processing the configuration change entries, which could make
// us get stuck.
for _, ent := range ents {
if ent.Index <= st.Commit && ent.Type == raftpb.EntryConfChange {
var cc raftpb.ConfChange
if err := cc.Unmarshal(ent.Data); err != nil {
return fmt.Errorf("error unmarshalling config change: %v", err)
}
if cc.Type == raftpb.ConfChangeRemoveNode {
n.cluster.RemoveMember(cc.NodeID)
}
}
}
if forceNewCluster {
// discard the previously uncommitted entries
for i, ent := range ents {
@ -174,6 +190,23 @@ func (n *Node) readWAL(ctx context.Context, snapshot *raftpb.Snapshot, forceNewC
// force append the configuration change entries
toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(n.Config.ID), st.Term, st.Commit)
// All members that are being removed as part of the
// force-new-cluster process must be added to the
// removed list right away, so that we don't try to
// connect to them before processing the configuration
// change entries, which could make us get stuck.
for _, ccEnt := range toAppEnts {
if ccEnt.Type == raftpb.EntryConfChange {
var cc raftpb.ConfChange
if err := cc.Unmarshal(ccEnt.Data); err != nil {
return fmt.Errorf("error unmarshalling force-new-cluster config change: %v", err)
}
if cc.Type == raftpb.ConfChangeRemoveNode {
n.cluster.RemoveMember(cc.NodeID)
}
}
}
ents = append(ents, toAppEnts...)
// force commit newly appended entries
@ -347,9 +380,10 @@ func (n *Node) restoreFromSnapshot(data []byte, forceNewCluster bool) error {
return err
}
}
for _, removedMember := range snapshot.Membership.Removed {
n.cluster.RemoveMember(removedMember)
}
}
for _, removedMember := range snapshot.Membership.Removed {
n.cluster.RemoveMember(removedMember)
}
return nil

Просмотреть файл

@ -23,7 +23,6 @@ const (
indexID = "id"
indexName = "name"
indexServiceID = "serviceid"
indexServiceMode = "servicemode"
indexNodeID = "nodeid"
indexSlot = "slot"
indexCN = "cn"

Просмотреть файл

@ -272,7 +272,7 @@ func (p *Picker) PickAddr() (string, error) {
p.mu.Lock()
p.peer = peer
p.mu.Unlock()
return p.peer.Addr, err
return peer.Addr, err
}
// State returns the connectivity state of the underlying connections.