зеркало из https://github.com/microsoft/docker.git
Mutex style change.
For structs protected by a single mutex, embed the mutex for more concise usage. Also use a sync.Mutex directly, rather than a pointer, to avoid the need for initialization (because a Mutex's zero-value is valid and ready to be used).
This commit is contained in:
Родитель
c9e1c65c64
Коммит
1cf9c80e97
|
@ -3,7 +3,6 @@ package docker
|
|||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -105,7 +104,6 @@ func TestBuild(t *testing.T) {
|
|||
|
||||
srv := &Server{
|
||||
runtime: runtime,
|
||||
lock: &sync.Mutex{},
|
||||
pullingPool: make(map[string]struct{}),
|
||||
pushingPool: make(map[string]struct{}),
|
||||
}
|
||||
|
|
12
container.go
12
container.go
|
@ -466,8 +466,8 @@ func (container *Container) Attach(stdin io.ReadCloser, stdinCloser io.Closer, s
|
|||
}
|
||||
|
||||
func (container *Container) Start(hostConfig *HostConfig) error {
|
||||
container.State.lock()
|
||||
defer container.State.unlock()
|
||||
container.State.Lock()
|
||||
defer container.State.Unlock()
|
||||
|
||||
if container.State.Running {
|
||||
return fmt.Errorf("The container %s is already running.", container.ID)
|
||||
|
@ -821,8 +821,8 @@ func (container *Container) kill() error {
|
|||
}
|
||||
|
||||
func (container *Container) Kill() error {
|
||||
container.State.lock()
|
||||
defer container.State.unlock()
|
||||
container.State.Lock()
|
||||
defer container.State.Unlock()
|
||||
if !container.State.Running {
|
||||
return nil
|
||||
}
|
||||
|
@ -830,8 +830,8 @@ func (container *Container) Kill() error {
|
|||
}
|
||||
|
||||
func (container *Container) Stop(seconds int) error {
|
||||
container.State.lock()
|
||||
defer container.State.unlock()
|
||||
container.State.Lock()
|
||||
defer container.State.Unlock()
|
||||
if !container.State.Running {
|
||||
return nil
|
||||
}
|
||||
|
|
10
network.go
10
network.go
|
@ -301,9 +301,9 @@ func newPortMapper() (*PortMapper, error) {
|
|||
|
||||
// Port allocator: Atomatically allocate and release networking ports
|
||||
type PortAllocator struct {
|
||||
sync.Mutex
|
||||
inUse map[int]struct{}
|
||||
fountain chan (int)
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func (alloc *PortAllocator) runFountain() {
|
||||
|
@ -317,9 +317,9 @@ func (alloc *PortAllocator) runFountain() {
|
|||
// FIXME: Release can no longer fail, change its prototype to reflect that.
|
||||
func (alloc *PortAllocator) Release(port int) error {
|
||||
utils.Debugf("Releasing %d", port)
|
||||
alloc.lock.Lock()
|
||||
alloc.Lock()
|
||||
delete(alloc.inUse, port)
|
||||
alloc.lock.Unlock()
|
||||
alloc.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -334,8 +334,8 @@ func (alloc *PortAllocator) Acquire(port int) (int, error) {
|
|||
}
|
||||
return -1, fmt.Errorf("Port generator ended unexpectedly")
|
||||
}
|
||||
alloc.lock.Lock()
|
||||
defer alloc.lock.Unlock()
|
||||
alloc.Lock()
|
||||
defer alloc.Unlock()
|
||||
if _, inUse := alloc.inUse[port]; inUse {
|
||||
return -1, fmt.Errorf("Port already in use: %d", port)
|
||||
}
|
||||
|
|
|
@ -108,9 +108,6 @@ func (runtime *Runtime) Register(container *Container) error {
|
|||
// init the wait lock
|
||||
container.waitLock = make(chan struct{})
|
||||
|
||||
// Even if not running, we init the lock (prevents races in start/stop/kill)
|
||||
container.State.initLock()
|
||||
|
||||
container.runtime = runtime
|
||||
|
||||
// Attach to stdout and stderr
|
||||
|
|
|
@ -89,7 +89,6 @@ func init() {
|
|||
srv := &Server{
|
||||
runtime: runtime,
|
||||
enableCors: false,
|
||||
lock: &sync.Mutex{},
|
||||
pullingPool: make(map[string]struct{}),
|
||||
pushingPool: make(map[string]struct{}),
|
||||
}
|
||||
|
|
|
@ -450,8 +450,8 @@ func (srv *Server) pullRepository(r *registry.Registry, out io.Writer, local, re
|
|||
}
|
||||
|
||||
func (srv *Server) poolAdd(kind, key string) error {
|
||||
srv.lock.Lock()
|
||||
defer srv.lock.Unlock()
|
||||
srv.Lock()
|
||||
defer srv.Unlock()
|
||||
|
||||
if _, exists := srv.pullingPool[key]; exists {
|
||||
return fmt.Errorf("%s %s is already in progress", key, kind)
|
||||
|
@ -1119,7 +1119,6 @@ func NewServer(flGraphPath string, autoRestart, enableCors bool, dns ListOpts) (
|
|||
srv := &Server{
|
||||
runtime: runtime,
|
||||
enableCors: enableCors,
|
||||
lock: &sync.Mutex{},
|
||||
pullingPool: make(map[string]struct{}),
|
||||
pushingPool: make(map[string]struct{}),
|
||||
}
|
||||
|
@ -1128,9 +1127,9 @@ func NewServer(flGraphPath string, autoRestart, enableCors bool, dns ListOpts) (
|
|||
}
|
||||
|
||||
type Server struct {
|
||||
sync.Mutex
|
||||
runtime *Runtime
|
||||
enableCors bool
|
||||
lock *sync.Mutex
|
||||
pullingPool map[string]struct{}
|
||||
pushingPool map[string]struct{}
|
||||
}
|
||||
|
|
14
state.go
14
state.go
|
@ -8,11 +8,11 @@ import (
|
|||
)
|
||||
|
||||
type State struct {
|
||||
sync.Mutex
|
||||
Running bool
|
||||
Pid int
|
||||
ExitCode int
|
||||
StartedAt time.Time
|
||||
l *sync.Mutex
|
||||
Ghost bool
|
||||
}
|
||||
|
||||
|
@ -39,15 +39,3 @@ func (s *State) setStopped(exitCode int) {
|
|||
s.Pid = 0
|
||||
s.ExitCode = exitCode
|
||||
}
|
||||
|
||||
func (s *State) initLock() {
|
||||
s.l = &sync.Mutex{}
|
||||
}
|
||||
|
||||
func (s *State) lock() {
|
||||
s.l.Lock()
|
||||
}
|
||||
|
||||
func (s *State) unlock() {
|
||||
s.l.Unlock()
|
||||
}
|
||||
|
|
|
@ -188,10 +188,10 @@ func NopWriteCloser(w io.Writer) io.WriteCloser {
|
|||
}
|
||||
|
||||
type bufReader struct {
|
||||
sync.Mutex
|
||||
buf *bytes.Buffer
|
||||
reader io.Reader
|
||||
err error
|
||||
l sync.Mutex
|
||||
wait sync.Cond
|
||||
}
|
||||
|
||||
|
@ -200,7 +200,7 @@ func NewBufReader(r io.Reader) *bufReader {
|
|||
buf: &bytes.Buffer{},
|
||||
reader: r,
|
||||
}
|
||||
reader.wait.L = &reader.l
|
||||
reader.wait.L = &reader.Mutex
|
||||
go reader.drain()
|
||||
return reader
|
||||
}
|
||||
|
@ -209,14 +209,14 @@ func (r *bufReader) drain() {
|
|||
buf := make([]byte, 1024)
|
||||
for {
|
||||
n, err := r.reader.Read(buf)
|
||||
r.l.Lock()
|
||||
r.Lock()
|
||||
if err != nil {
|
||||
r.err = err
|
||||
} else {
|
||||
r.buf.Write(buf[0:n])
|
||||
}
|
||||
r.wait.Signal()
|
||||
r.l.Unlock()
|
||||
r.Unlock()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
@ -224,8 +224,8 @@ func (r *bufReader) drain() {
|
|||
}
|
||||
|
||||
func (r *bufReader) Read(p []byte) (n int, err error) {
|
||||
r.l.Lock()
|
||||
defer r.l.Unlock()
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
for {
|
||||
n, err = r.buf.Read(p)
|
||||
if n > 0 {
|
||||
|
@ -247,27 +247,27 @@ func (r *bufReader) Close() error {
|
|||
}
|
||||
|
||||
type WriteBroadcaster struct {
|
||||
mu sync.Mutex
|
||||
sync.Mutex
|
||||
writers map[io.WriteCloser]struct{}
|
||||
}
|
||||
|
||||
func (w *WriteBroadcaster) AddWriter(writer io.WriteCloser) {
|
||||
w.mu.Lock()
|
||||
w.Lock()
|
||||
w.writers[writer] = struct{}{}
|
||||
w.mu.Unlock()
|
||||
w.Unlock()
|
||||
}
|
||||
|
||||
// FIXME: Is that function used?
|
||||
// FIXME: This relies on the concrete writer type used having equality operator
|
||||
func (w *WriteBroadcaster) RemoveWriter(writer io.WriteCloser) {
|
||||
w.mu.Lock()
|
||||
w.Lock()
|
||||
delete(w.writers, writer)
|
||||
w.mu.Unlock()
|
||||
w.Unlock()
|
||||
}
|
||||
|
||||
func (w *WriteBroadcaster) Write(p []byte) (n int, err error) {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
w.Lock()
|
||||
defer w.Unlock()
|
||||
for writer := range w.writers {
|
||||
if n, err := writer.Write(p); err != nil || n != len(p) {
|
||||
// On error, evict the writer
|
||||
|
@ -278,8 +278,8 @@ func (w *WriteBroadcaster) Write(p []byte) (n int, err error) {
|
|||
}
|
||||
|
||||
func (w *WriteBroadcaster) CloseWriters() error {
|
||||
w.mu.Lock()
|
||||
defer w.mu.Unlock()
|
||||
w.Lock()
|
||||
defer w.Unlock()
|
||||
for writer := range w.writers {
|
||||
writer.Close()
|
||||
}
|
||||
|
|
Загрузка…
Ссылка в новой задаче