зеркало из https://github.com/Azure/ARO-RP.git
code comments
This commit is contained in:
Родитель
9e5c4f8930
Коммит
eee6958f0b
|
@ -11,19 +11,54 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// Kubernetes RBAC uses the cross-product of PolicyRules stored in Role or
|
||||
// ClusterRole resources, thus enabling the PolicyRules to be stored more
|
||||
// compactly. For example:
|
||||
//
|
||||
// rules:
|
||||
// - apiGroups: [ group1 ]
|
||||
// resources: [ resource1 ]
|
||||
// verbs: [ verb1 ]
|
||||
// - apiGroups: [ group1 ]
|
||||
// resources: [ resource1 ]
|
||||
// verbs: [ verb2 ]
|
||||
// - apiGroups: [ group1 ]
|
||||
// resources: [ resource2 ]
|
||||
// verbs: [ verb1 ]
|
||||
// - apiGroups: [ group1 ]
|
||||
// resources: [ resource2 ]
|
||||
// verbs: [ verb2 ]
|
||||
//
|
||||
// is equivalent to:
|
||||
//
|
||||
// rules:
|
||||
// - apiGroups: [ group1 ]
|
||||
// resources: [ resource1, resource2 ]
|
||||
// verbs: [ verb1, verb2 ]
|
||||
//
|
||||
//
|
||||
// This file contains functions which can compact slices of individual
|
||||
// PolicyRules according to some simple rules. We do not attempt to cover all
|
||||
// possible simplifications.
|
||||
|
||||
// compactVerbs compacts simple PolicyRules which differ only in their verbs
|
||||
// into a single PolicyRule with all the verbs combined.
|
||||
func compactVerbs(in []rbacv1.PolicyRule) []rbacv1.PolicyRule {
|
||||
out := make([]rbacv1.PolicyRule, 0, len(in))
|
||||
m := map[schema.GroupResource]map[string]struct{}{}
|
||||
|
||||
// 1. Collate matching simple PolicyRules into the map.
|
||||
for _, r := range in {
|
||||
if len(r.NonResourceURLs) > 0 ||
|
||||
len(r.ResourceNames) > 0 ||
|
||||
len(r.APIGroups) != 1 ||
|
||||
len(r.Resources) != 1 {
|
||||
// rule too complex for us to deal with - emit and continue
|
||||
out = append(out, r)
|
||||
continue
|
||||
}
|
||||
|
||||
// add rule to map so we can compact verbs
|
||||
k := schema.GroupResource{Group: r.APIGroups[0], Resource: r.Resources[0]}
|
||||
for _, v := range r.Verbs {
|
||||
if m[k] == nil {
|
||||
|
@ -33,6 +68,8 @@ func compactVerbs(in []rbacv1.PolicyRule) []rbacv1.PolicyRule {
|
|||
}
|
||||
}
|
||||
|
||||
// 2. Walk the map emitting a single PolicyRule for each key with the verbs
|
||||
// combined.
|
||||
for gr, verbs := range m {
|
||||
pr := &rbacv1.PolicyRule{
|
||||
APIGroups: []string{gr.Group},
|
||||
|
@ -50,6 +87,8 @@ func compactVerbs(in []rbacv1.PolicyRule) []rbacv1.PolicyRule {
|
|||
return out
|
||||
}
|
||||
|
||||
// compactResources compacts simple PolicyRules which differ only in their
|
||||
// resources into a single PolicyRule with all the resources combined.
|
||||
func compactResources(in []rbacv1.PolicyRule) []rbacv1.PolicyRule {
|
||||
out := make([]rbacv1.PolicyRule, 0, len(in))
|
||||
type groupVerbs struct {
|
||||
|
@ -58,14 +97,17 @@ func compactResources(in []rbacv1.PolicyRule) []rbacv1.PolicyRule {
|
|||
}
|
||||
m := map[groupVerbs]map[string]struct{}{}
|
||||
|
||||
// 1. Collate matching simple PolicyRules into the map.
|
||||
for _, r := range in {
|
||||
if len(r.NonResourceURLs) > 0 ||
|
||||
len(r.ResourceNames) > 0 ||
|
||||
len(r.APIGroups) != 1 {
|
||||
// rule too complex for us to deal with - emit and continue
|
||||
out = append(out, r)
|
||||
continue
|
||||
}
|
||||
|
||||
// add rule to map so we can compact resources
|
||||
k := groupVerbs{Group: r.APIGroups[0], Verbs: strings.Join(r.Verbs, "/")}
|
||||
for _, r := range r.Resources {
|
||||
if m[k] == nil {
|
||||
|
@ -75,6 +117,8 @@ func compactResources(in []rbacv1.PolicyRule) []rbacv1.PolicyRule {
|
|||
}
|
||||
}
|
||||
|
||||
// 2. Walk the map emitting a single PolicyRule for each key with the
|
||||
// resources combined.
|
||||
for gv, resources := range m {
|
||||
pr := &rbacv1.PolicyRule{
|
||||
APIGroups: []string{gv.Group},
|
||||
|
|
|
@ -16,6 +16,11 @@ import (
|
|||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// genDiscoveryCache generates the discovery cache. This is used, primarily, by
|
||||
// Geneva Actions k8s actions, as a fallback to be able to map kinds to
|
||||
// resources if the API server is flaky and discovery doesn't work at the time
|
||||
// of running the Geneva Action. It is also used by the dynamic client but its
|
||||
// use there is less critical.
|
||||
func genDiscoveryCache(restconfig *rest.Config) error {
|
||||
cli, err := disk.NewCachedDiscoveryClientForConfig(restconfig, discoveryCacheDir, "", 0)
|
||||
if err != nil {
|
||||
|
|
|
@ -18,6 +18,10 @@ import (
|
|||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// genRBAC auto-generates the system:aro-sre ClusterRole. This is close in
|
||||
// spirit to cluster-reader but is defined separately (a) so that we guarantee
|
||||
// it always covers all the necessary kinds and (b) so that the operator syncs
|
||||
// it to avoid unexpected changes.
|
||||
func genRBAC(restconfig *rest.Config) error {
|
||||
cli, err := discovery.NewDiscoveryClientForConfig(restconfig)
|
||||
if err != nil {
|
||||
|
@ -120,6 +124,8 @@ func isReadOnly(group string, apiresource *metav1.APIResource, verb string) bool
|
|||
case "get", "list", "watch":
|
||||
return true
|
||||
case "create":
|
||||
// These kinds are not actually persisted to etcd; create is kind-of
|
||||
// like get here.
|
||||
gr := schema.GroupResource{Group: group, Resource: apiresource.Name}.String()
|
||||
switch gr {
|
||||
case "tokenreviews.authentication.k8s.io",
|
||||
|
|
|
@ -97,6 +97,11 @@ func (p *prometheus) roundTripper(r *http.Request) (*http.Response, error) {
|
|||
return cli.Do(r)
|
||||
}
|
||||
|
||||
// modifyResponse: unfortunately Prometheus serves HTML files containing just a
|
||||
// couple of absolute links. Given that we're serving Prometheus under
|
||||
// /subscriptions/.../clusterName/prometheus, we need to dig these out and
|
||||
// rewrite them. This is a hack which hopefully goes away once we forward all
|
||||
// metrics to Kusto.
|
||||
func (p *prometheus) modifyResponse(r *http.Response) error {
|
||||
mediaType, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
|
||||
if mediaType != "text/html" {
|
||||
|
@ -115,6 +120,7 @@ func (p *prometheus) modifyResponse(r *http.Response) error {
|
|||
buf.Write(b)
|
||||
|
||||
} else {
|
||||
// walk the HTML parse tree calling makeRelative() on each node
|
||||
walk(n, makeRelative)
|
||||
|
||||
err = html.Render(buf, n)
|
||||
|
@ -133,18 +139,23 @@ func (p *prometheus) modifyResponse(r *http.Response) error {
|
|||
func makeRelative(n *html.Node) {
|
||||
switch n.DataAtom {
|
||||
case atom.A, atom.Link:
|
||||
// rewrite <a href="/foo"> -> <a href="./foo">
|
||||
// rewrite <link href="/foo"> -> <link href="./foo">
|
||||
for i, attr := range n.Attr {
|
||||
if attr.Namespace == "" && attr.Key == "href" && strings.HasPrefix(n.Attr[i].Val, "/") {
|
||||
n.Attr[i].Val = "." + n.Attr[i].Val
|
||||
}
|
||||
}
|
||||
case atom.Script:
|
||||
// rewrite <script src="/foo"> -> <script src="./foo">
|
||||
for i, attr := range n.Attr {
|
||||
if attr.Namespace == "" && attr.Key == "src" && strings.HasPrefix(n.Attr[i].Val, "/") {
|
||||
n.Attr[i].Val = "." + n.Attr[i].Val
|
||||
}
|
||||
}
|
||||
|
||||
// special hack: find <script>...</script> and rewrite
|
||||
// `var PATH_PREFIX = "";` -> `var PATH_PREFIX = ".";` once.
|
||||
if len(n.Attr) == 0 {
|
||||
n.FirstChild.Data = strings.Replace(n.FirstChild.Data, `var PATH_PREFIX = "";`, `var PATH_PREFIX = ".";`, 1)
|
||||
}
|
||||
|
|
|
@ -23,8 +23,34 @@ import (
|
|||
"github.com/Azure/ARO-RP/pkg/util/recover"
|
||||
)
|
||||
|
||||
// This file handles smart proxying of SSH connections between SRE->portal and
|
||||
// portal->cluster. We don't want to give the SRE the cluster SSH key, thus
|
||||
// this has to be an application-level proxy so we can replace the validated
|
||||
// one-time password that the SRE uses to authenticate with the cluster SSH key.
|
||||
//
|
||||
// Given that we're now an application-level proxy, we pull a second trick as
|
||||
// well: we inject SSH agent forwarding into the portal->cluster connection leg,
|
||||
// enabling an SRE to ssh from a master node to a worker node without needing an
|
||||
// additional credential.
|
||||
//
|
||||
// SSH itself is a multiplexed protocol. Within a single TCP connection there
|
||||
// can exist multiple SSH channels. Administrative requests and responses can
|
||||
// also be sent, both on any channel and/or globally. Channel creations and
|
||||
// requests can be initiated by either side of the connection.
|
||||
//
|
||||
// The golang.org/x/crypto/ssh library exposes the above at a connection level
|
||||
// as as Conn, chan NewChannel and chan *Request. All of these have to be
|
||||
// serviced to prevent the connection from blocking. Requests to open new
|
||||
// channels appear on chan NewChannel; global administrative requests appear on
|
||||
// chan *Request. Once a new channel is open, a Channel (effectively an
|
||||
// io.ReadWriteCloser) must be handled plus a further chan *Request for
|
||||
// channel-scoped administrative requests.
|
||||
//
|
||||
// The top half of this file deals with connection instantiation; the bottom
|
||||
// half deals with proxying Channels and *Requests.
|
||||
|
||||
const (
|
||||
sshTimeout = time.Hour
|
||||
sshTimeout = time.Hour // never allow a connection to live longer than an hour.
|
||||
)
|
||||
|
||||
func (s *ssh) Run() error {
|
||||
|
@ -60,6 +86,8 @@ func (s *ssh) newConn(ctx context.Context, c1 net.Conn) error {
|
|||
var portalDoc *api.PortalDocument
|
||||
var connmetadata cryptossh.ConnMetadata
|
||||
|
||||
// PasswordCallback is called via NewServerConn to validate the one-time
|
||||
// password provided.
|
||||
config.PasswordCallback = func(_connmetadata cryptossh.ConnMetadata, pw []byte) (*cryptossh.Permissions, error) {
|
||||
connmetadata = _connmetadata
|
||||
|
||||
|
@ -81,6 +109,7 @@ func (s *ssh) newConn(ctx context.Context, c1 net.Conn) error {
|
|||
return nil, s.dbPortal.Delete(ctx, portalDoc)
|
||||
}
|
||||
|
||||
// Serve the incoming (SRE->portal) connection.
|
||||
conn1, newchannels1, requests1, err := cryptossh.NewServerConn(c1, config)
|
||||
if err != nil {
|
||||
var username string
|
||||
|
@ -95,6 +124,7 @@ func (s *ssh) newConn(ctx context.Context, c1 net.Conn) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Log the incoming connection attempt.
|
||||
accessLog := utillog.EnrichWithPath(s.baseAccessLog, portalDoc.Portal.ID)
|
||||
accessLog = accessLog.WithFields(logrus.Fields{
|
||||
"hostname": fmt.Sprintf("master-%d", portalDoc.Portal.SSH.Master),
|
||||
|
@ -128,6 +158,7 @@ func (s *ssh) newConn(ctx context.Context, c1 net.Conn) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Connect the second connection leg (portal->cluster).
|
||||
conn2, newchannels2, requests2, err := cryptossh.NewClientConn(c2, "", &cryptossh.ClientConfig{
|
||||
User: "core",
|
||||
Auth: []cryptossh.AuthMethod{
|
||||
|
@ -153,9 +184,12 @@ func (s *ssh) newConn(ctx context.Context, c1 net.Conn) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Proxy channels and requests between the two connections.
|
||||
return s.proxyConn(accessLog, keyring, conn1, conn2, newchannels1, newchannels2, requests1, requests2)
|
||||
}
|
||||
|
||||
// proxyConn handles incoming new channel and administrative requests. It calls
|
||||
// newChannel to handle new channels, each on a new goroutine.
|
||||
func (s *ssh) proxyConn(accessLog *logrus.Entry, keyring agent.Agent, conn1, conn2 cryptossh.Conn, newchannels1, newchannels2 <-chan cryptossh.NewChannel, requests1, requests2 <-chan *cryptossh.Request) error {
|
||||
timer := time.NewTimer(sshTimeout)
|
||||
defer timer.Stop()
|
||||
|
@ -172,7 +206,8 @@ func (s *ssh) proxyConn(accessLog *logrus.Entry, keyring agent.Agent, conn1, con
|
|||
return nil
|
||||
}
|
||||
|
||||
// on the first c->s session, advertise agent availability
|
||||
// on the first SRE->cluster session, inject an advertisement of
|
||||
// agent availability.
|
||||
var firstSession bool
|
||||
if !sessionOpened && nc.ChannelType() == "session" {
|
||||
firstSession = true
|
||||
|
@ -188,8 +223,8 @@ func (s *ssh) proxyConn(accessLog *logrus.Entry, keyring agent.Agent, conn1, con
|
|||
return nil
|
||||
}
|
||||
|
||||
// hijack and handle incoming s->c agent requests
|
||||
if nc.ChannelType() == "auth-agent@openssh.com" {
|
||||
// hijack and handle incoming cluster->SRE agent requests
|
||||
go func() {
|
||||
_ = s.handleAgent(accessLog, nc, keyring)
|
||||
}()
|
||||
|
@ -234,6 +269,9 @@ func (s *ssh) handleAgent(accessLog *logrus.Entry, nc cryptossh.NewChannel, keyr
|
|||
return agent.ServeAgent(keyring, ch)
|
||||
}
|
||||
|
||||
// newChannel handles an incoming request to create a new channel. If the
|
||||
// channel creation is successful, it calls proxyChannel to proxy the channel
|
||||
// between SRE and cluster.
|
||||
func (s *ssh) newChannel(accessLog *logrus.Entry, nc cryptossh.NewChannel, conn1, conn2 cryptossh.Conn, firstSession bool) error {
|
||||
defer recover.Panic(s.log)
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче