зеркало из https://github.com/microsoft/docker.git
Fix logrus formatting
This fix tries to fix logrus formatting by removing `f` from `logrus.[Error|Warn|Debug|Fatal|Panic|Info]f` when formatting string is not present. This fix fixes #23459. Signed-off-by: Yong Tang <yong.tang.github@outlook.com>
This commit is contained in:
Родитель
ec1790d7f1
Коммит
a72b45dbec
|
@ -46,7 +46,7 @@ func (cli *DockerCli) HoldHijackedConnection(ctx context.Context, tty bool, inpu
|
||||||
_, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader)
|
_, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader)
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Debugf("[hijack] End of stdout")
|
logrus.Debug("[hijack] End of stdout")
|
||||||
receiveStdout <- err
|
receiveStdout <- err
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
@ -62,7 +62,7 @@ func (cli *DockerCli) HoldHijackedConnection(ctx context.Context, tty bool, inpu
|
||||||
cli.restoreTerminal(inputStream)
|
cli.restoreTerminal(inputStream)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
logrus.Debugf("[hijack] End of stdin")
|
logrus.Debug("[hijack] End of stdin")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := resp.CloseWrite(); err != nil {
|
if err := resp.CloseWrite(); err != nil {
|
||||||
|
|
|
@ -163,7 +163,7 @@ func (s *Server) InitRouter(enableProfiler bool, routers ...router.Router) {
|
||||||
func (s *Server) createMux() *mux.Router {
|
func (s *Server) createMux() *mux.Router {
|
||||||
m := mux.NewRouter()
|
m := mux.NewRouter()
|
||||||
|
|
||||||
logrus.Debugf("Registering routers")
|
logrus.Debug("Registering routers")
|
||||||
for _, apiRouter := range s.routers {
|
for _, apiRouter := range s.routers {
|
||||||
for _, r := range apiRouter.Routes() {
|
for _, r := range apiRouter.Routes() {
|
||||||
f := s.makeHTTPHandler(r.Handler())
|
f := s.makeHTTPHandler(r.Handler())
|
||||||
|
|
|
@ -284,12 +284,12 @@ func (h *handler) Execute(_ []string, r <-chan svc.ChangeRequest, s chan<- svc.S
|
||||||
// Wait for initialization to complete.
|
// Wait for initialization to complete.
|
||||||
failed := <-h.tosvc
|
failed := <-h.tosvc
|
||||||
if failed {
|
if failed {
|
||||||
logrus.Debugf("Aborting service start due to failure during initializtion")
|
logrus.Debug("Aborting service start due to failure during initializtion")
|
||||||
return true, 1
|
return true, 1
|
||||||
}
|
}
|
||||||
|
|
||||||
s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)}
|
s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)}
|
||||||
logrus.Debugf("Service running")
|
logrus.Debug("Service running")
|
||||||
Loop:
|
Loop:
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
|
|
|
@ -393,7 +393,7 @@ func AttachStreams(ctx context.Context, streamConfig *runconfig.StreamConfig, op
|
||||||
if stdin == nil || !openStdin {
|
if stdin == nil || !openStdin {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
logrus.Debugf("attach: stdin: begin")
|
logrus.Debug("attach: stdin: begin")
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
if tty {
|
if tty {
|
||||||
|
@ -419,7 +419,7 @@ func AttachStreams(ctx context.Context, streamConfig *runconfig.StreamConfig, op
|
||||||
cStderr.Close()
|
cStderr.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logrus.Debugf("attach: stdin: end")
|
logrus.Debug("attach: stdin: end")
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ func (s *Health) String() string {
|
||||||
// it returns nil.
|
// it returns nil.
|
||||||
func (s *Health) OpenMonitorChannel() chan struct{} {
|
func (s *Health) OpenMonitorChannel() chan struct{} {
|
||||||
if s.stop == nil {
|
if s.stop == nil {
|
||||||
logrus.Debugf("OpenMonitorChannel")
|
logrus.Debug("OpenMonitorChannel")
|
||||||
s.stop = make(chan struct{})
|
s.stop = make(chan struct{})
|
||||||
return s.stop
|
return s.stop
|
||||||
}
|
}
|
||||||
|
@ -38,12 +38,12 @@ func (s *Health) OpenMonitorChannel() chan struct{} {
|
||||||
// CloseMonitorChannel closes any existing monitor channel.
|
// CloseMonitorChannel closes any existing monitor channel.
|
||||||
func (s *Health) CloseMonitorChannel() {
|
func (s *Health) CloseMonitorChannel() {
|
||||||
if s.stop != nil {
|
if s.stop != nil {
|
||||||
logrus.Debugf("CloseMonitorChannel: waiting for probe to stop")
|
logrus.Debug("CloseMonitorChannel: waiting for probe to stop")
|
||||||
// This channel does not buffer. Once the write succeeds, the monitor
|
// This channel does not buffer. Once the write succeeds, the monitor
|
||||||
// has read the stop request and will not make any further updates
|
// has read the stop request and will not make any further updates
|
||||||
// to c.State.Health.
|
// to c.State.Health.
|
||||||
s.stop <- struct{}{}
|
s.stop <- struct{}{}
|
||||||
s.stop = nil
|
s.stop = nil
|
||||||
logrus.Debugf("CloseMonitorChannel done")
|
logrus.Debug("CloseMonitorChannel done")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -114,7 +114,7 @@ func (daemon *Daemon) containerAttach(c *container.Container, stdin io.ReadClose
|
||||||
r, w := io.Pipe()
|
r, w := io.Pipe()
|
||||||
go func() {
|
go func() {
|
||||||
defer w.Close()
|
defer w.Close()
|
||||||
defer logrus.Debugf("Closing buffered stdin pipe")
|
defer logrus.Debug("Closing buffered stdin pipe")
|
||||||
io.Copy(w, stdin)
|
io.Copy(w, stdin)
|
||||||
}()
|
}()
|
||||||
stdinPipe = r
|
stdinPipe = r
|
||||||
|
|
|
@ -175,7 +175,7 @@ func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.R
|
||||||
r, w := io.Pipe()
|
r, w := io.Pipe()
|
||||||
go func() {
|
go func() {
|
||||||
defer w.Close()
|
defer w.Close()
|
||||||
defer logrus.Debugf("Closing buffered stdin pipe")
|
defer logrus.Debug("Closing buffered stdin pipe")
|
||||||
pools.Copy(w, stdin)
|
pools.Copy(w, stdin)
|
||||||
}()
|
}()
|
||||||
cStdin = r
|
cStdin = r
|
||||||
|
|
|
@ -699,7 +699,7 @@ func (devices *DeviceSet) startDeviceDeletionWorker() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Debugf("devmapper: Worker to cleanup deleted devices started")
|
logrus.Debug("devmapper: Worker to cleanup deleted devices started")
|
||||||
for range devices.deletionWorkerTicker.C {
|
for range devices.deletionWorkerTicker.C {
|
||||||
devices.cleanupDeletedDevices()
|
devices.cleanupDeletedDevices()
|
||||||
}
|
}
|
||||||
|
@ -1002,7 +1002,7 @@ func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (devices *DeviceSet) createBaseImage() error {
|
func (devices *DeviceSet) createBaseImage() error {
|
||||||
logrus.Debugf("devmapper: Initializing base device-mapper thin volume")
|
logrus.Debug("devmapper: Initializing base device-mapper thin volume")
|
||||||
|
|
||||||
// Create initial device
|
// Create initial device
|
||||||
info, err := devices.createRegisterDevice("")
|
info, err := devices.createRegisterDevice("")
|
||||||
|
@ -1010,7 +1010,7 @@ func (devices *DeviceSet) createBaseImage() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Debugf("devmapper: Creating filesystem on base device-mapper thin volume")
|
logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume")
|
||||||
|
|
||||||
if err := devices.activateDeviceIfNeeded(info, false); err != nil {
|
if err := devices.activateDeviceIfNeeded(info, false); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -1188,7 +1188,7 @@ func (devices *DeviceSet) setupBaseImage() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Debugf("devmapper: Removing uninitialized base image")
|
logrus.Debug("devmapper: Removing uninitialized base image")
|
||||||
// If previous base device is in deferred delete state,
|
// If previous base device is in deferred delete state,
|
||||||
// that needs to be cleaned up first. So don't try
|
// that needs to be cleaned up first. So don't try
|
||||||
// deferred deletion.
|
// deferred deletion.
|
||||||
|
@ -1455,7 +1455,7 @@ func (devices *DeviceSet) refreshTransaction(DeviceID int) error {
|
||||||
|
|
||||||
func (devices *DeviceSet) closeTransaction() error {
|
func (devices *DeviceSet) closeTransaction() error {
|
||||||
if err := devices.updatePoolTransactionID(); err != nil {
|
if err := devices.updatePoolTransactionID(); err != nil {
|
||||||
logrus.Debugf("devmapper: Failed to close Transaction")
|
logrus.Debug("devmapper: Failed to close Transaction")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -1644,7 +1644,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
||||||
if !devicemapper.LibraryDeferredRemovalSupport {
|
if !devicemapper.LibraryDeferredRemovalSupport {
|
||||||
return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it")
|
return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it")
|
||||||
}
|
}
|
||||||
logrus.Debugf("devmapper: Deferred removal support enabled.")
|
logrus.Debug("devmapper: Deferred removal support enabled.")
|
||||||
devices.deferredRemove = true
|
devices.deferredRemove = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1652,7 +1652,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
||||||
if !devices.deferredRemove {
|
if !devices.deferredRemove {
|
||||||
return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter")
|
return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter")
|
||||||
}
|
}
|
||||||
logrus.Debugf("devmapper: Deferred deletion support enabled.")
|
logrus.Debug("devmapper: Deferred deletion support enabled.")
|
||||||
devices.deferredDelete = true
|
devices.deferredDelete = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1716,7 +1716,7 @@ func (devices *DeviceSet) initDevmapper(doInit bool) error {
|
||||||
|
|
||||||
// If the pool doesn't exist, create it
|
// If the pool doesn't exist, create it
|
||||||
if !poolExists && devices.thinPoolDevice == "" {
|
if !poolExists && devices.thinPoolDevice == "" {
|
||||||
logrus.Debugf("devmapper: Pool doesn't exist. Creating it.")
|
logrus.Debug("devmapper: Pool doesn't exist. Creating it.")
|
||||||
|
|
||||||
var (
|
var (
|
||||||
dataFile *os.File
|
dataFile *os.File
|
||||||
|
@ -2044,8 +2044,8 @@ func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (devices *DeviceSet) deactivatePool() error {
|
func (devices *DeviceSet) deactivatePool() error {
|
||||||
logrus.Debugf("devmapper: deactivatePool()")
|
logrus.Debug("devmapper: deactivatePool()")
|
||||||
defer logrus.Debugf("devmapper: deactivatePool END")
|
defer logrus.Debug("devmapper: deactivatePool END")
|
||||||
devname := devices.getPoolDevName()
|
devname := devices.getPoolDevName()
|
||||||
|
|
||||||
devinfo, err := devicemapper.GetInfo(devname)
|
devinfo, err := devicemapper.GetInfo(devname)
|
||||||
|
@ -2304,7 +2304,7 @@ func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {
|
||||||
if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil {
|
if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
logrus.Debugf("devmapper: Unmount done")
|
logrus.Debug("devmapper: Unmount done")
|
||||||
|
|
||||||
if err := devices.deactivateDevice(info); err != nil {
|
if err := devices.deactivateDevice(info); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -132,7 +132,7 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (s
|
||||||
options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
|
options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
|
||||||
GIDMaps: gdw.gidMaps}
|
GIDMaps: gdw.gidMaps}
|
||||||
start := time.Now().UTC()
|
start := time.Now().UTC()
|
||||||
logrus.Debugf("Start untar layer")
|
logrus.Debug("Start untar layer")
|
||||||
if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil {
|
if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -154,10 +154,10 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-stop:
|
||||||
logrus.Debugf("Stop healthcheck monitoring (received while idle)")
|
logrus.Debug("Stop healthcheck monitoring (received while idle)")
|
||||||
return
|
return
|
||||||
case <-time.After(probeInterval):
|
case <-time.After(probeInterval):
|
||||||
logrus.Debugf("Running health check...")
|
logrus.Debug("Running health check...")
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout)
|
ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout)
|
||||||
results := make(chan *types.HealthcheckResult)
|
results := make(chan *types.HealthcheckResult)
|
||||||
|
@ -180,7 +180,7 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe)
|
||||||
}()
|
}()
|
||||||
select {
|
select {
|
||||||
case <-stop:
|
case <-stop:
|
||||||
logrus.Debugf("Stop healthcheck monitoring (received while probing)")
|
logrus.Debug("Stop healthcheck monitoring (received while probing)")
|
||||||
// Stop timeout and kill probe, but don't wait for probe to exit.
|
// Stop timeout and kill probe, but don't wait for probe to exit.
|
||||||
cancelProbe()
|
cancelProbe()
|
||||||
return
|
return
|
||||||
|
@ -189,7 +189,7 @@ func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe)
|
||||||
// Stop timeout
|
// Stop timeout
|
||||||
cancelProbe()
|
cancelProbe()
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
logrus.Debugf("Health check taking too long")
|
logrus.Debug("Health check taking too long")
|
||||||
handleProbeResult(d, c, &types.HealthcheckResult{
|
handleProbeResult(d, c, &types.HealthcheckResult{
|
||||||
ExitCode: -1,
|
ExitCode: -1,
|
||||||
Output: fmt.Sprintf("Health check exceeded timeout (%v)", probeTimeout),
|
Output: fmt.Sprintf("Health check exceeded timeout (%v)", probeTimeout),
|
||||||
|
|
|
@ -85,7 +85,7 @@ func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, c
|
||||||
return nil
|
return nil
|
||||||
case msg, ok := <-logs.Msg:
|
case msg, ok := <-logs.Msg:
|
||||||
if !ok {
|
if !ok {
|
||||||
logrus.Debugf("logs: end stream")
|
logrus.Debug("logs: end stream")
|
||||||
logs.Close()
|
logs.Close()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -89,7 +89,7 @@ func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) erro
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
logrus.Debugf("Retrieving the tag list")
|
logrus.Debug("Retrieving the tag list")
|
||||||
var tagsList map[string]string
|
var tagsList map[string]string
|
||||||
if !isTagged {
|
if !isTagged {
|
||||||
tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo)
|
tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo)
|
||||||
|
|
|
@ -208,7 +208,7 @@ func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progre
|
||||||
size = 0
|
size = 0
|
||||||
} else {
|
} else {
|
||||||
if size != 0 && offset > size {
|
if size != 0 && offset > size {
|
||||||
logrus.Debugf("Partial download is larger than full blob. Starting over")
|
logrus.Debug("Partial download is larger than full blob. Starting over")
|
||||||
offset = 0
|
offset = 0
|
||||||
if err := ld.truncateDownloadFile(); err != nil {
|
if err := ld.truncateDownloadFile(); err != nil {
|
||||||
return nil, 0, xfer.DoNotRetry{Err: err}
|
return nil, 0, xfer.DoNotRetry{Err: err}
|
||||||
|
|
|
@ -130,7 +130,7 @@ func DetectCompression(source []byte) Compression {
|
||||||
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
|
Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00},
|
||||||
} {
|
} {
|
||||||
if len(source) < len(m) {
|
if len(source) < len(m) {
|
||||||
logrus.Debugf("Len too short")
|
logrus.Debug("Len too short")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if bytes.Compare(m, source[:len(m)]) == 0 {
|
if bytes.Compare(m, source[:len(m)]) == 0 {
|
||||||
|
@ -408,7 +408,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||||
}
|
}
|
||||||
|
|
||||||
case tar.TypeXGlobalHeader:
|
case tar.TypeXGlobalHeader:
|
||||||
logrus.Debugf("PAX Global Extended Headers found and ignored")
|
logrus.Debug("PAX Global Extended Headers found and ignored")
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
|
|
@ -155,7 +155,7 @@ func (rm *responseModifier) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||||
func (rm *responseModifier) CloseNotify() <-chan bool {
|
func (rm *responseModifier) CloseNotify() <-chan bool {
|
||||||
closeNotifier, ok := rm.rw.(http.CloseNotifier)
|
closeNotifier, ok := rm.rw.(http.CloseNotifier)
|
||||||
if !ok {
|
if !ok {
|
||||||
logrus.Errorf("Internal response writer doesn't support the CloseNotifier interface")
|
logrus.Error("Internal response writer doesn't support the CloseNotifier interface")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return closeNotifier.CloseNotify()
|
return closeNotifier.CloseNotify()
|
||||||
|
@ -165,7 +165,7 @@ func (rm *responseModifier) CloseNotify() <-chan bool {
|
||||||
func (rm *responseModifier) Flush() {
|
func (rm *responseModifier) Flush() {
|
||||||
flusher, ok := rm.rw.(http.Flusher)
|
flusher, ok := rm.rw.(http.Flusher)
|
||||||
if !ok {
|
if !ok {
|
||||||
logrus.Errorf("Internal response writer doesn't support the Flusher interface")
|
logrus.Error("Internal response writer doesn't support the Flusher interface")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -279,7 +279,7 @@ func LogInit(logger DevmapperLogger) {
|
||||||
// SetDevDir sets the dev folder for the device mapper library (usually /dev).
|
// SetDevDir sets the dev folder for the device mapper library (usually /dev).
|
||||||
func SetDevDir(dir string) error {
|
func SetDevDir(dir string) error {
|
||||||
if res := DmSetDevDir(dir); res != 1 {
|
if res := DmSetDevDir(dir); res != 1 {
|
||||||
logrus.Debugf("devicemapper: Error dm_set_dev_dir")
|
logrus.Debug("devicemapper: Error dm_set_dev_dir")
|
||||||
return ErrSetDevDir
|
return ErrSetDevDir
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -47,7 +47,7 @@ func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.Fil
|
||||||
fi, err := os.Stat(target)
|
fi, err := os.Stat(target)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
logrus.Errorf("There are no more loopback devices available.")
|
logrus.Error("There are no more loopback devices available.")
|
||||||
}
|
}
|
||||||
return nil, ErrAttachLoopbackDevice
|
return nil, ErrAttachLoopbackDevice
|
||||||
}
|
}
|
||||||
|
@ -127,7 +127,7 @@ func AttachLoopDevice(sparseName string) (loop *os.File, err error) {
|
||||||
|
|
||||||
// If the call failed, then free the loopback device
|
// If the call failed, then free the loopback device
|
||||||
if err := ioctlLoopClrFd(loopFile.Fd()); err != nil {
|
if err := ioctlLoopClrFd(loopFile.Fd()); err != nil {
|
||||||
logrus.Errorf("Error while cleaning up the loopback device")
|
logrus.Error("Error while cleaning up the loopback device")
|
||||||
}
|
}
|
||||||
loopFile.Close()
|
loopFile.Close()
|
||||||
return nil, ErrAttachLoopbackDevice
|
return nil, ErrAttachLoopbackDevice
|
||||||
|
|
|
@ -49,11 +49,11 @@ func Trap(cleanup func()) {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// 3 SIGTERM/INT signals received; force exit without cleanup
|
// 3 SIGTERM/INT signals received; force exit without cleanup
|
||||||
logrus.Infof("Forcing docker daemon shutdown without cleanup; 3 interrupts received")
|
logrus.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received")
|
||||||
}
|
}
|
||||||
case syscall.SIGQUIT:
|
case syscall.SIGQUIT:
|
||||||
DumpStacks()
|
DumpStacks()
|
||||||
logrus.Infof("Forcing docker daemon shutdown without cleanup on SIGQUIT")
|
logrus.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT")
|
||||||
}
|
}
|
||||||
//for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal #
|
//for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal #
|
||||||
os.Exit(128 + int(sig.(syscall.Signal)))
|
os.Exit(128 + int(sig.(syscall.Signal)))
|
||||||
|
|
|
@ -302,10 +302,10 @@ func (r *Session) GetRemoteImageLayer(imgID, registry string, imgSize int64) (io
|
||||||
}
|
}
|
||||||
|
|
||||||
if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 {
|
if res.Header.Get("Accept-Ranges") == "bytes" && imgSize > 0 {
|
||||||
logrus.Debugf("server supports resume")
|
logrus.Debug("server supports resume")
|
||||||
return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil
|
return httputils.ResumableRequestReaderWithInitialResponse(r.client, req, 5, imgSize, res), nil
|
||||||
}
|
}
|
||||||
logrus.Debugf("server doesn't support resume")
|
logrus.Debug("server doesn't support resume")
|
||||||
return res.Body, nil
|
return res.Body, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче