Patch moby-engine to address CVE-2023-44487 (#9276)

This commit is contained in:
Mitch Zhu 2024-06-03 10:52:43 -07:00 коммит произвёл GitHub
Родитель a6539502f3
Коммит a264db1f75
Не найден ключ, соответствующий данной подписи
Идентификатор ключа GPG: B5690EEEBB952194
2 изменённых файлов: 205 добавлений и 1 удалений

Просмотреть файл

@ -0,0 +1,200 @@
From acdb7b9731b3d1eb14352328d2976d4b7baaafea Mon Sep 17 00:00:00 2001
From: Mitch Zhu <mitchzhu@microsoft.com>
Date: Fri, 31 May 2024 17:00:00 +0000
Subject: [PATCH] Address CVE-2023-44487
---
.../grpc/internal/transport/http2_server.go | 11 +--
vendor/google.golang.org/grpc/server.go | 77 +++++++++++++------
2 files changed, 57 insertions(+), 31 deletions(-)
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 3dd1564..9d9a3fd 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -165,15 +165,10 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
ID: http2.SettingMaxFrameSize,
Val: http2MaxFrameLen,
}}
- // TODO(zhaoq): Have a better way to signal "no limit" because 0 is
- // permitted in the HTTP2 spec.
- maxStreams := config.MaxStreams
- if maxStreams == 0 {
- maxStreams = math.MaxUint32
- } else {
+ if config.MaxStreams != math.MaxUint32 {
isettings = append(isettings, http2.Setting{
ID: http2.SettingMaxConcurrentStreams,
- Val: maxStreams,
+ Val: config.MaxStreams,
})
}
dynamicWindow := true
@@ -252,7 +247,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
framer: framer,
readerDone: make(chan struct{}),
writerDone: make(chan struct{}),
- maxStreams: maxStreams,
+ maxStreams: config.MaxStreams,
inTapHandle: config.InTapHandle,
fc: &trInFlow{limit: uint32(icwz)},
state: reachable,
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index f4dde72..17d39cf 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -115,12 +115,6 @@ type serviceInfo struct {
mdata interface{}
}
-type serverWorkerData struct {
- st transport.ServerTransport
- wg *sync.WaitGroup
- stream *transport.Stream
-}
-
// Server is a gRPC server to serve RPC requests.
type Server struct {
opts serverOptions
@@ -145,7 +139,7 @@ type Server struct {
channelzID *channelz.Identifier
czData *channelzData
- serverWorkerChannels []chan *serverWorkerData
+ serverWorkerChannel chan func()
}
type serverOptions struct {
@@ -177,6 +171,7 @@ type serverOptions struct {
}
var defaultServerOptions = serverOptions{
+ maxConcurrentStreams: math.MaxUint32,
maxReceiveMessageSize: defaultServerMaxReceiveMessageSize,
maxSendMessageSize: defaultServerMaxSendMessageSize,
connectionTimeout: 120 * time.Second,
@@ -387,6 +382,9 @@ func MaxSendMsgSize(m int) ServerOption {
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
// of concurrent streams to each ServerTransport.
func MaxConcurrentStreams(n uint32) ServerOption {
+ if n == 0 {
+ n = math.MaxUint32
+ }
return newFuncServerOption(func(o *serverOptions) {
o.maxConcurrentStreams = n
})
@@ -565,35 +563,31 @@ const serverWorkerResetThreshold = 1 << 16
// re-allocations (see the runtime.morestack problem [1]).
//
// [1] https://github.com/golang/go/issues/18138
-func (s *Server) serverWorker(ch chan *serverWorkerData) {
+func (s *Server) serverWorker() {
// To make sure all server workers don't reset at the same time, choose a
// random number of iterations before resetting.
threshold := serverWorkerResetThreshold + grpcrand.Intn(serverWorkerResetThreshold)
for completed := 0; completed < threshold; completed++ {
- data, ok := <-ch
+ f, ok := <-s.serverWorkerChannel
if !ok {
return
}
- s.handleStream(data.st, data.stream, s.traceInfo(data.st, data.stream))
- data.wg.Done()
+ f()
}
- go s.serverWorker(ch)
+ go s.serverWorker()
}
// initServerWorkers creates worker goroutines and channels to process incoming
// connections to reduce the time spent overall on runtime.morestack.
func (s *Server) initServerWorkers() {
- s.serverWorkerChannels = make([]chan *serverWorkerData, s.opts.numServerWorkers)
+ s.serverWorkerChannel = make(chan func())
for i := uint32(0); i < s.opts.numServerWorkers; i++ {
- s.serverWorkerChannels[i] = make(chan *serverWorkerData)
- go s.serverWorker(s.serverWorkerChannels[i])
+ go s.serverWorker()
}
}
func (s *Server) stopServerWorkers() {
- for i := uint32(0); i < s.opts.numServerWorkers; i++ {
- close(s.serverWorkerChannels[i])
- }
+ close(s.serverWorkerChannel)
}
// NewServer creates a gRPC server which has no service registered and has not
@@ -945,13 +939,20 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
defer st.Close()
var wg sync.WaitGroup
- var roundRobinCounter uint32
+ streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams)
st.HandleStreams(func(stream *transport.Stream) {
wg.Add(1)
+
+ streamQuota.acquire()
+ f := func() {
+ defer streamQuota.release()
+ defer wg.Done()
+ s.handleStream(st, stream, s.traceInfo(st, stream))
+ }
+
if s.opts.numServerWorkers > 0 {
- data := &serverWorkerData{st: st, wg: &wg, stream: stream}
select {
- case s.serverWorkerChannels[atomic.AddUint32(&roundRobinCounter, 1)%s.opts.numServerWorkers] <- data:
+ case s.serverWorkerChannel <- f:
default:
// If all stream workers are busy, fallback to the default code path.
go func() {
@@ -961,8 +962,7 @@ func (s *Server) serveStreams(st transport.ServerTransport) {
}
} else {
go func() {
- defer wg.Done()
- s.handleStream(st, stream, s.traceInfo(st, stream))
+ go f()
}()
}
}, func(ctx context.Context, method string) context.Context {
@@ -1978,3 +1978,34 @@ type channelzServer struct {
func (c *channelzServer) ChannelzMetric() *channelz.ServerInternalMetric {
return c.s.channelzMetric()
}
+
+// atomicSemaphore implements a blocking, counting semaphore. acquire should be
+// called synchronously; release may be called asynchronously.
+type atomicSemaphore struct {
+ n atomic.Int64
+ wait chan struct{}
+}
+
+func (q *atomicSemaphore) acquire() {
+ if q.n.Add(-1) < 0 {
+ // We ran out of quota. Block until a release happens.
+ <-q.wait
+ }
+}
+
+func (q *atomicSemaphore) release() {
+ // N.B. the "<= 0" check below should allow for this to work with multiple
+ // concurrent calls to acquire, but also note that with synchronous calls to
+ // acquire, as our system does, n will never be less than -1. There are
+ // fairness issues (queuing) to consider if this was to be generalized.
+ if q.n.Add(1) <= 0 {
+ // An acquire was waiting on us. Unblock it.
+ q.wait <- struct{}{}
+ }
+}
+
+func newHandlerQuota(n uint32) *atomicSemaphore {
+ a := &atomicSemaphore{wait: make(chan struct{}, 1)}
+ a.n.Store(int64(n))
+ return a
+}
--
2.34.1

Просмотреть файл

@ -3,7 +3,7 @@
Summary: The open-source application container engine
Name: moby-engine
Version: 24.0.9
Release: 3%{?dist}
Release: 4%{?dist}
License: ASL 2.0
Group: Tools/Container
URL: https://mobyproject.org
@ -21,6 +21,7 @@ Patch1: CVE-2024-23651.patch
# Remove once we upgrade this package at least to version 25.0+.
Patch2: CVE-2024-23652.patch
Patch3: CVE-2023-45288.patch
Patch4: CVE-2023-44487.patch
%{?systemd_requires}
@ -126,6 +127,9 @@ fi
%{_unitdir}/*
%changelog
* Fri May 31 2024 Mitch Zhu <mitchzhu@microsoft.com> - 24.0.9-4
- Fix for CVE-2023-44487
* Fri May 03 2024 Chris Gunn <chrisgun@microsoft.com> - 24.0.9-3
- Fix for CVE-2023-45288