зеркало из https://github.com/Azure/ARO-RP.git
Merge pull request #3119 from 2uasimojo/ARO-3801/revendor-hive
This commit is contained in:
Коммит
6f4d75cfdd
15
go.mod
15
go.mod
|
@ -69,15 +69,15 @@ require (
|
|||
golang.org/x/sync v0.1.0
|
||||
golang.org/x/text v0.9.0
|
||||
golang.org/x/tools v0.6.0
|
||||
k8s.io/api v0.25.0
|
||||
k8s.io/api v0.26.2
|
||||
k8s.io/apiextensions-apiserver v0.25.0
|
||||
k8s.io/apimachinery v0.25.0
|
||||
k8s.io/apimachinery v0.26.2
|
||||
k8s.io/cli-runtime v0.24.1
|
||||
k8s.io/client-go v12.0.0+incompatible
|
||||
k8s.io/code-generator v0.24.1
|
||||
k8s.io/kubectl v0.24.1
|
||||
k8s.io/kubernetes v1.23.5
|
||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed
|
||||
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749
|
||||
sigs.k8s.io/cluster-api-provider-azure v1.2.1
|
||||
sigs.k8s.io/controller-runtime v0.13.1
|
||||
sigs.k8s.io/controller-tools v0.9.0
|
||||
|
@ -194,6 +194,7 @@ require (
|
|||
github.com/opencontainers/runc v1.1.5 // indirect
|
||||
github.com/opencontainers/runtime-tools v0.9.1-0.20220714195903-17b3287fafb7 // indirect
|
||||
github.com/opencontainers/selinux v1.10.1 // indirect
|
||||
github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 // indirect
|
||||
github.com/ostreedev/ostree-go v0.0.0-20210805093236-719684c64e4f // indirect
|
||||
github.com/pascaldekloe/name v0.0.0-20180628100202-0fd16699aae1 // indirect
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
|
||||
|
@ -247,10 +248,10 @@ require (
|
|||
k8s.io/apiserver v0.24.7 // indirect
|
||||
k8s.io/component-base v0.25.0 // indirect
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 // indirect
|
||||
k8s.io/klog/v2 v2.70.1 // indirect
|
||||
k8s.io/klog/v2 v2.90.1 // indirect
|
||||
k8s.io/kube-aggregator v0.24.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/kube-storage-version-migrator v0.0.4 // indirect
|
||||
sigs.k8s.io/kustomize/api v0.11.4 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.13.6 // indirect
|
||||
|
@ -808,6 +809,6 @@ replace (
|
|||
)
|
||||
|
||||
replace (
|
||||
github.com/openshift/hive => github.com/openshift/hive v1.1.17-0.20220719141355-c63c9b0281d8
|
||||
github.com/openshift/hive/apis => github.com/openshift/hive/apis v0.0.0-20220719141355-c63c9b0281d8
|
||||
github.com/openshift/hive => github.com/openshift/hive v1.1.17-0.20230811220652-70b666ec89b0
|
||||
github.com/openshift/hive/apis => github.com/openshift/hive/apis v0.0.0-20230811220652-70b666ec89b0
|
||||
)
|
||||
|
|
24
go.sum
24
go.sum
|
@ -466,6 +466,7 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkg
|
|||
github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1 h1:yY9rWGoXv1U5pl4gxqlULARMQD7x0QG85lqEXTWysik=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
|
@ -1197,6 +1198,7 @@ github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvw
|
|||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
|
||||
github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU=
|
||||
github.com/onsi/ginkgo/v2 v2.3.1 h1:8SbseP7qM32WcvE6VaN6vfXxv698izmsJ1UQX9ve7T8=
|
||||
|
@ -1214,6 +1216,7 @@ github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je4
|
|||
github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
|
||||
github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
|
||||
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
|
||||
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
|
||||
github.com/onsi/gomega v1.22.0 h1:AIg2/OntwkBiCg5Tt1ayyiF1ArFrWFoCSMtMi/wdApk=
|
||||
github.com/onsi/gomega v1.22.0/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc=
|
||||
|
@ -1270,8 +1273,10 @@ github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20210626224711-
|
|||
github.com/openshift/cluster-api-provider-azure v0.1.0-alpha.3.0.20210626224711-5d94c794092f/go.mod h1:GR+ocB8I+Z7JTSBdO+DMu/diBfH66lRlRpnc1KWysUM=
|
||||
github.com/openshift/console-operator v0.0.0-20220318130441-e44516b9c315 h1:zmwv8TgbOgZ5QoaPhLdOivqg706Z+VyuPs703jNMdrE=
|
||||
github.com/openshift/console-operator v0.0.0-20220318130441-e44516b9c315/go.mod h1:jCX07P5qFcuJrzd0xO5caxLjvSscehiaq6We/hGlcW8=
|
||||
github.com/openshift/hive/apis v0.0.0-20220719141355-c63c9b0281d8 h1:7e4sMDIstjEKW6SmPv8VhusDaYinDBrspd1M7ybIHC8=
|
||||
github.com/openshift/hive/apis v0.0.0-20220719141355-c63c9b0281d8/go.mod h1:XWo9dsulk75E9xkfxS/GNpJrL5UHgn3wuSyPeO39NME=
|
||||
github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 h1:cHyxR+Y8rAMT6m1jQCaYGRwikqahI0OjjUDhFNf3ySQ=
|
||||
github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA=
|
||||
github.com/openshift/hive/apis v0.0.0-20230811220652-70b666ec89b0 h1:ATjjHF7IbYrPMrxpjVd0b76/zQztTMN1Dn7Qzs4rGJE=
|
||||
github.com/openshift/hive/apis v0.0.0-20230811220652-70b666ec89b0/go.mod h1:VIxA5HhvBmsqVn7aUVQYs004B9K4U5A+HrFwvRq2nK8=
|
||||
github.com/openshift/imagebuilder v1.2.4-0.20220502172744-009dbc6cb805/go.mod h1:TRYHe4CH9U6nkDjxjBNM5klrLbJBrRbpJE5SaRwUBsQ=
|
||||
github.com/openshift/library-go v0.0.0-20220303081124-fb4e7a2872f0 h1:hiwAdZ5ishMe4qtUejv+CuBWra18cjZMHVFlVPOZnw0=
|
||||
github.com/openshift/library-go v0.0.0-20220303081124-fb4e7a2872f0/go.mod h1:6AmNM4N4nHftckybV/U7bQW+5AvK5TW81ndSI6KEidw=
|
||||
|
@ -1481,7 +1486,6 @@ github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJ
|
|||
github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
|
@ -2108,6 +2112,7 @@ golang.org/x/tools v0.1.6/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
|||
golang.org/x/tools v0.1.7-0.20210921203514-b98090b833e3/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
||||
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
||||
golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM=
|
||||
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
|
||||
|
@ -2271,8 +2276,9 @@ k8s.io/cri-api v0.23.0/go.mod h1:2edENu3/mkyW3c6fVPPPaVGEFbLRacJizBbSp7ZOLOo=
|
|||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 h1:TT1WdmqqXareKxZ/oNXEUSwKlLiHzPMyB0t8BaFeBYI=
|
||||
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/klog/v2 v2.70.1 h1:7aaoSdahviPmR+XkS7FyxlkkXs6tHISSG03RxleQAVQ=
|
||||
k8s.io/klog/v2 v2.70.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
|
||||
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kube-aggregator v0.23.0 h1:IjY8CfGHH9WUvJXIaAsAxTzHDsaLVeaEqjkvo6MLMD0=
|
||||
k8s.io/kube-aggregator v0.23.0/go.mod h1:b1vpoaTWKZjCzvbe1KXFw3vPbISrghJsg7/RI8oZUME=
|
||||
k8s.io/kube-controller-manager v0.23.0/go.mod h1:iHapRJJBe+fWu6hG3ye43YMFEeZcnIlRxDUS72bwJoE=
|
||||
|
@ -2284,8 +2290,8 @@ k8s.io/kubectl v0.23.0/go.mod h1:TfcGEs3u4dkmoC2eku1GYymdGaMtPMcaLLFrX/RB2kI=
|
|||
k8s.io/kubernetes v1.23.0 h1:r2DrryCpnmFfBuelpUNSWXHtD6Zy7SdwaCcycV5DsJE=
|
||||
k8s.io/kubernetes v1.23.0/go.mod h1:sgD3+Qzb8FHlRKlZnNCN+np3zZuHEAb/0PKLJkYyCUI=
|
||||
k8s.io/metrics v0.23.0/go.mod h1:NDiZTwppEtAuKJ1Rxt3S4dhyRzdp6yUcJf0vo023dPo=
|
||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed h1:jAne/RjBTyawwAy0utX5eqigAwz/lQhTmy+Hr/Cpue4=
|
||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749 h1:xMMXJlJbsU8w3V5N2FLDQ8YgU8s1EoULdbQBcAeNJkY=
|
||||
k8s.io/utils v0.0.0-20230313181309-38a27ef9d749/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
|
||||
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
|
||||
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
|
||||
|
@ -2304,8 +2310,8 @@ sigs.k8s.io/controller-runtime v0.11.2/go.mod h1:P6QCzrEjLaZGqHsfd+os7JQ+WFZhvB8
|
|||
sigs.k8s.io/controller-tools v0.5.0 h1:3u2RCwOlp0cjCALAigpOcbAf50pE+kHSdueUosrC/AE=
|
||||
sigs.k8s.io/controller-tools v0.5.0/go.mod h1:JTsstrMpxs+9BUj6eGuAaEb6SDSPTeVtUyp0jmnAM/I=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k=
|
||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
|
||||
sigs.k8s.io/kind v0.11.1/go.mod h1:fRpgVhtqAWrtLB9ED7zQahUimpUXuG/iHT88xYqEGIA=
|
||||
sigs.k8s.io/kube-storage-version-migrator v0.0.4 h1:qsCecgZHgdismlTt8xCmS/3numvpxrj58RWJeIg76wc=
|
||||
sigs.k8s.io/kube-storage-version-migrator v0.0.4/go.mod h1:mXfSLkx9xbJHQsgNDDUZK/iQTs2tMbx/hsJlWe6Fthw=
|
||||
|
|
|
@ -263,8 +263,8 @@ func TestConditions(t *testing.T) {
|
|||
APIVersion: arov1alpha1.GroupVersion.Identifier(),
|
||||
Kind: "Cluster",
|
||||
Name: arov1alpha1.SingletonClusterName,
|
||||
Controller: pointer.BoolPtr(true),
|
||||
BlockOwnerDeletion: pointer.BoolPtr(true),
|
||||
Controller: pointer.Bool(true),
|
||||
BlockOwnerDeletion: pointer.Bool(true),
|
||||
}}
|
||||
if diff := cmp.Diff(wantOwnerReference, operator.ObjectMeta.OwnerReferences); diff != "" {
|
||||
t.Error(diff)
|
||||
|
|
201
vendor/github.com/openshift/custom-resource-status/LICENSE
сгенерированный
поставляемый
Normal file
201
vendor/github.com/openshift/custom-resource-status/LICENSE
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
114
vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go
сгенерированный
поставляемый
Normal file
114
vendor/github.com/openshift/custom-resource-status/conditions/v1/conditions.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,114 @@
|
|||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// SetStatusCondition sets the corresponding condition in conditions to newCondition.
|
||||
// The return value indicates if this resulted in any changes *other than* LastHeartbeatTime.
|
||||
func SetStatusCondition(conditions *[]Condition, newCondition Condition) bool {
|
||||
if conditions == nil {
|
||||
conditions = &[]Condition{}
|
||||
}
|
||||
existingCondition := FindStatusCondition(*conditions, newCondition.Type)
|
||||
if existingCondition == nil {
|
||||
newCondition.LastTransitionTime = metav1.NewTime(time.Now())
|
||||
newCondition.LastHeartbeatTime = metav1.NewTime(time.Now())
|
||||
*conditions = append(*conditions, newCondition)
|
||||
return true
|
||||
}
|
||||
|
||||
changed := updateCondition(existingCondition, newCondition)
|
||||
existingCondition.LastHeartbeatTime = metav1.NewTime(time.Now())
|
||||
return changed
|
||||
}
|
||||
|
||||
// SetStatusConditionNoHearbeat sets the corresponding condition in conditions to newCondition
|
||||
// without setting lastHeartbeatTime.
|
||||
// The return value indicates if this resulted in any changes.
|
||||
func SetStatusConditionNoHeartbeat(conditions *[]Condition, newCondition Condition) bool {
|
||||
if conditions == nil {
|
||||
conditions = &[]Condition{}
|
||||
}
|
||||
existingCondition := FindStatusCondition(*conditions, newCondition.Type)
|
||||
if existingCondition == nil {
|
||||
newCondition.LastTransitionTime = metav1.NewTime(time.Now())
|
||||
*conditions = append(*conditions, newCondition)
|
||||
return true
|
||||
}
|
||||
|
||||
return updateCondition(existingCondition, newCondition)
|
||||
}
|
||||
|
||||
// RemoveStatusCondition removes the corresponding conditionType from conditions.
|
||||
func RemoveStatusCondition(conditions *[]Condition, conditionType ConditionType) {
|
||||
if conditions == nil {
|
||||
return
|
||||
}
|
||||
newConditions := []Condition{}
|
||||
for _, condition := range *conditions {
|
||||
if condition.Type != conditionType {
|
||||
newConditions = append(newConditions, condition)
|
||||
}
|
||||
}
|
||||
|
||||
*conditions = newConditions
|
||||
}
|
||||
|
||||
func updateCondition(existingCondition *Condition, newCondition Condition) bool {
|
||||
changed := false
|
||||
if existingCondition.Status != newCondition.Status {
|
||||
changed = true
|
||||
existingCondition.Status = newCondition.Status
|
||||
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
|
||||
}
|
||||
|
||||
if existingCondition.Reason != newCondition.Reason {
|
||||
changed = true
|
||||
existingCondition.Reason = newCondition.Reason
|
||||
}
|
||||
if existingCondition.Message != newCondition.Message {
|
||||
changed = true
|
||||
existingCondition.Message = newCondition.Message
|
||||
}
|
||||
return changed
|
||||
}
|
||||
|
||||
// FindStatusCondition finds the conditionType in conditions.
|
||||
func FindStatusCondition(conditions []Condition, conditionType ConditionType) *Condition {
|
||||
for i := range conditions {
|
||||
if conditions[i].Type == conditionType {
|
||||
return &conditions[i]
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsStatusConditionTrue returns true when the conditionType is present and set to `corev1.ConditionTrue`
|
||||
func IsStatusConditionTrue(conditions []Condition, conditionType ConditionType) bool {
|
||||
return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionTrue)
|
||||
}
|
||||
|
||||
// IsStatusConditionFalse returns true when the conditionType is present and set to `corev1.ConditionFalse`
|
||||
func IsStatusConditionFalse(conditions []Condition, conditionType ConditionType) bool {
|
||||
return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionFalse)
|
||||
}
|
||||
|
||||
// IsStatusConditionUnknown returns true when the conditionType is present and set to `corev1.ConditionUnknown`
|
||||
func IsStatusConditionUnknown(conditions []Condition, conditionType ConditionType) bool {
|
||||
return IsStatusConditionPresentAndEqual(conditions, conditionType, corev1.ConditionUnknown)
|
||||
}
|
||||
|
||||
// IsStatusConditionPresentAndEqual returns true when conditionType is present and equal to status.
|
||||
func IsStatusConditionPresentAndEqual(conditions []Condition, conditionType ConditionType, status corev1.ConditionStatus) bool {
|
||||
for _, condition := range conditions {
|
||||
if condition.Type == conditionType {
|
||||
return condition.Status == status
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
9
vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go
сгенерированный
поставляемый
Normal file
9
vendor/github.com/openshift/custom-resource-status/conditions/v1/doc.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,9 @@
|
|||
// +k8s:deepcopy-gen=package,register
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
// Package v1 provides version v1 of the types and functions necessary to
|
||||
// manage and inspect a slice of conditions. It is opinionated in the
|
||||
// condition types provided but leaves it to the user to define additional
|
||||
// types as necessary.
|
||||
package v1
|
51
vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go
сгенерированный
поставляемый
Normal file
51
vendor/github.com/openshift/custom-resource-status/conditions/v1/types.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,51 @@
|
|||
package v1
|
||||
|
||||
import (
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Condition represents the state of the operator's
|
||||
// reconciliation functionality.
|
||||
// +k8s:deepcopy-gen=true
|
||||
type Condition struct {
|
||||
Type ConditionType `json:"type" description:"type of condition ie. Available|Progressing|Degraded."`
|
||||
|
||||
Status corev1.ConditionStatus `json:"status" description:"status of the condition, one of True, False, Unknown"`
|
||||
|
||||
// +optional
|
||||
Reason string `json:"reason,omitempty" description:"one-word CamelCase reason for the condition's last transition"`
|
||||
|
||||
// +optional
|
||||
Message string `json:"message,omitempty" description:"human-readable message indicating details about last transition"`
|
||||
|
||||
// +optional
|
||||
LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime" description:"last time we got an update on a given condition"`
|
||||
|
||||
// +optional
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime" description:"last time the condition transit from one status to another"`
|
||||
}
|
||||
|
||||
// ConditionType is the state of the operator's reconciliation functionality.
|
||||
type ConditionType string
|
||||
|
||||
const (
|
||||
// ConditionAvailable indicates that the resources maintained by the operator,
|
||||
// is functional and available in the cluster.
|
||||
ConditionAvailable ConditionType = "Available"
|
||||
|
||||
// ConditionProgressing indicates that the operator is actively making changes to the resources maintained by the
|
||||
// operator
|
||||
ConditionProgressing ConditionType = "Progressing"
|
||||
|
||||
// ConditionDegraded indicates that the resources maintained by the operator are not functioning completely.
|
||||
// An example of a degraded state would be if not all pods in a deployment were running.
|
||||
// It may still be available, but it is degraded
|
||||
ConditionDegraded ConditionType = "Degraded"
|
||||
|
||||
// ConditionUpgradeable indicates whether the resources maintained by the operator are in a state that is safe to upgrade.
|
||||
// When `False`, the resources maintained by the operator should not be upgraded and the
|
||||
// message field should contain a human readable description of what the administrator should do to
|
||||
// allow the operator to successfully update the resources maintained by the operator.
|
||||
ConditionUpgradeable ConditionType = "Upgradeable"
|
||||
)
|
23
vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go
сгенерированный
поставляемый
Normal file
23
vendor/github.com/openshift/custom-resource-status/conditions/v1/zz_generated.deepcopy.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,23 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Condition) DeepCopyInto(out *Condition) {
|
||||
*out = *in
|
||||
in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime)
|
||||
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition.
|
||||
func (in *Condition) DeepCopy() *Condition {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Condition)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
|
@ -6,9 +6,14 @@ type MachinePoolPlatform struct {
|
|||
// Zones is list of availability zones that can be used.
|
||||
Zones []string `json:"zones,omitempty"`
|
||||
|
||||
// Subnets is the list of subnets to which to attach the machines.
|
||||
// There must be exactly one private subnet for each availability zone used.
|
||||
// If public subnets are specified, there must be exactly one private and one public subnet specified for each availability zone.
|
||||
// Subnets is the list of IDs of subnets to which to attach the machines.
|
||||
// There must be exactly one subnet for each availability zone used.
|
||||
// These subnets may be public or private.
|
||||
// As a special case, for consistency with install-config, you may specify exactly one
|
||||
// private and one public subnet for each availability zone. In this case, the public
|
||||
// subnets will be filtered out and only the private subnets will be used.
|
||||
// If empty/omitted, we will look for subnets in each availability zone tagged with
|
||||
// Name=<clusterID>-private-<az>.
|
||||
Subnets []string `json:"subnets,omitempty"`
|
||||
|
||||
// InstanceType defines the ec2 instance type.
|
||||
|
@ -21,6 +26,10 @@ type MachinePoolPlatform struct {
|
|||
// SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.
|
||||
// +optional
|
||||
SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"`
|
||||
|
||||
// EC2MetadataOptions defines metadata service interaction options for EC2 instances in the machine pool.
|
||||
// +optional
|
||||
EC2Metadata *EC2Metadata `json:"metadataService,omitempty"`
|
||||
}
|
||||
|
||||
// SpotMarketOptions defines the options available to a user when configuring
|
||||
|
@ -48,3 +57,16 @@ type EC2RootVolume struct {
|
|||
// +optional
|
||||
KMSKeyARN string `json:"kmsKeyARN,omitempty"`
|
||||
}
|
||||
|
||||
// EC2Metadata defines the metadata service interaction options for an ec2 instance.
|
||||
// https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html
|
||||
type EC2Metadata struct {
|
||||
// Authentication determines whether or not the host requires the use of authentication when interacting with the metadata service.
|
||||
// When using authentication, this enforces v2 interaction method (IMDSv2) with the metadata service.
|
||||
// When omitted, this means the user has no opinion and the value is left to the platform to choose a good
|
||||
// default, which is subject to change over time. The current default is optional.
|
||||
// At this point this field represents `HttpTokens` parameter from `InstanceMetadataOptionsRequest` structure in AWS EC2 API
|
||||
// https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html
|
||||
// +optional
|
||||
Authentication string `json:"authentication,omitempty"`
|
||||
}
|
||||
|
|
|
@ -39,6 +39,13 @@ type PlatformStatus struct {
|
|||
// PrivateLinkAccess configures access to the cluster API using AWS PrivateLink
|
||||
type PrivateLinkAccess struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
|
||||
// AdditionalAllowedPrincipals is a list of additional allowed principal ARNs to be configured
|
||||
// for the Private Link cluster's VPC Endpoint Service.
|
||||
// ARNs provided as AdditionalAllowedPrincipals will be configured for the cluster's VPC Endpoint
|
||||
// Service in addition to the IAM entity used by Hive.
|
||||
// +optional
|
||||
AdditionalAllowedPrincipals *[]string `json:"additionalAllowedPrincipals,omitempty"`
|
||||
}
|
||||
|
||||
// PrivateLinkAccessStatus contains the observed state for PrivateLinkAccess resources.
|
||||
|
@ -54,6 +61,15 @@ type PrivateLinkAccessStatus struct {
|
|||
type VPCEndpointService struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
// DefaultAllowedPrincipal is the ARN of the IAM entity used by Hive as configured for the Private
|
||||
// Link cluster's VPC Endpoint Service.
|
||||
// +optional
|
||||
DefaultAllowedPrincipal *string `json:"defaultAllowedPrincipal,omitempty"`
|
||||
// AdditionalAllowedPrincipals is a list of additional allowed principal ARNs that have been configured
|
||||
// for the Private Link cluster's VPC Endpoint Service. This list in Status is used to determine if a sync
|
||||
// of Allowed Principals is needed outside of the regular reconcile period of 2hrs.
|
||||
// +optional
|
||||
AdditionalAllowedPrincipals *[]string `json:"additionalAllowedPrincipals,omitempty"`
|
||||
}
|
||||
|
||||
// AssumeRole stores information for the IAM role that needs to be assumed
|
||||
|
|
50
vendor/github.com/openshift/hive/apis/hive/v1/aws/zz_generated.deepcopy.go
сгенерированный
поставляемый
50
vendor/github.com/openshift/hive/apis/hive/v1/aws/zz_generated.deepcopy.go
сгенерированный
поставляемый
|
@ -21,6 +21,22 @@ func (in *AssumeRole) DeepCopy() *AssumeRole {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EC2Metadata) DeepCopyInto(out *EC2Metadata) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2Metadata.
|
||||
func (in *EC2Metadata) DeepCopy() *EC2Metadata {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EC2Metadata)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EC2RootVolume) DeepCopyInto(out *EC2RootVolume) {
|
||||
*out = *in
|
||||
|
@ -56,6 +72,11 @@ func (in *MachinePoolPlatform) DeepCopyInto(out *MachinePoolPlatform) {
|
|||
*out = new(SpotMarketOptions)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.EC2Metadata != nil {
|
||||
in, out := &in.EC2Metadata, &out.EC2Metadata
|
||||
*out = new(EC2Metadata)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -88,7 +109,7 @@ func (in *Platform) DeepCopyInto(out *Platform) {
|
|||
if in.PrivateLink != nil {
|
||||
in, out := &in.PrivateLink, &out.PrivateLink
|
||||
*out = new(PrivateLinkAccess)
|
||||
**out = **in
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -109,7 +130,7 @@ func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) {
|
|||
if in.PrivateLink != nil {
|
||||
in, out := &in.PrivateLink, &out.PrivateLink
|
||||
*out = new(PrivateLinkAccessStatus)
|
||||
**out = **in
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -127,6 +148,15 @@ func (in *PlatformStatus) DeepCopy() *PlatformStatus {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PrivateLinkAccess) DeepCopyInto(out *PrivateLinkAccess) {
|
||||
*out = *in
|
||||
if in.AdditionalAllowedPrincipals != nil {
|
||||
in, out := &in.AdditionalAllowedPrincipals, &out.AdditionalAllowedPrincipals
|
||||
*out = new([]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -143,7 +173,7 @@ func (in *PrivateLinkAccess) DeepCopy() *PrivateLinkAccess {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PrivateLinkAccessStatus) DeepCopyInto(out *PrivateLinkAccessStatus) {
|
||||
*out = *in
|
||||
out.VPCEndpointService = in.VPCEndpointService
|
||||
in.VPCEndpointService.DeepCopyInto(&out.VPCEndpointService)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -181,6 +211,20 @@ func (in *SpotMarketOptions) DeepCopy() *SpotMarketOptions {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VPCEndpointService) DeepCopyInto(out *VPCEndpointService) {
|
||||
*out = *in
|
||||
if in.DefaultAllowedPrincipal != nil {
|
||||
in, out := &in.DefaultAllowedPrincipal, &out.DefaultAllowedPrincipal
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.AdditionalAllowedPrincipals != nil {
|
||||
in, out := &in.AdditionalAllowedPrincipals, &out.AdditionalAllowedPrincipals
|
||||
*out = new([]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -2,5 +2,6 @@ package azure
|
|||
|
||||
// Metadata contains Azure metadata (e.g. for uninstalling the cluster).
|
||||
type Metadata struct {
|
||||
Region string `json:"region"`
|
||||
// ResourceGroupName is the name of the resource group in which the cluster resources were created.
|
||||
ResourceGroupName *string `json:"resourceGroupName"`
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ func (e CloudEnvironment) Name() string {
|
|||
return string(e)
|
||||
}
|
||||
|
||||
//SetBaseDomain parses the baseDomainID and sets the related fields on azure.Platform
|
||||
// SetBaseDomain parses the baseDomainID and sets the related fields on azure.Platform
|
||||
func (p *Platform) SetBaseDomain(baseDomainID string) error {
|
||||
parts := strings.Split(baseDomainID, "/")
|
||||
p.BaseDomainResourceGroupName = parts[4]
|
||||
|
|
|
@ -69,6 +69,16 @@ type ClusterClaimCondition struct {
|
|||
// ClusterClaimConditionType is a valid value for ClusterClaimCondition.Type.
|
||||
type ClusterClaimConditionType string
|
||||
|
||||
// ConditionType satisfies the conditions.Condition interface
|
||||
func (c ClusterClaimCondition) ConditionType() ConditionType {
|
||||
return c.Type
|
||||
}
|
||||
|
||||
// String satisfies the conditions.ConditionType interface
|
||||
func (t ClusterClaimConditionType) String() string {
|
||||
return string(t)
|
||||
}
|
||||
|
||||
const (
|
||||
// ClusterClaimPendingCondition is set when a cluster has not yet been assigned and made ready to the claim.
|
||||
ClusterClaimPendingCondition ClusterClaimConditionType = "Pending"
|
||||
|
|
59
vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go
сгенерированный
поставляемый
59
vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go
сгенерированный
поставляемый
|
@ -1,6 +1,7 @@
|
|||
package v1
|
||||
|
||||
import (
|
||||
configv1 "github.com/openshift/api/config/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
|
@ -30,10 +31,6 @@ const (
|
|||
// alert on cluster types differently.
|
||||
HiveClusterTypeLabel = "hive.openshift.io/cluster-type"
|
||||
|
||||
// DefaultClusterType will be used when the above HiveClusterTypeLabel is unset. This
|
||||
// value will not be added as a label, only used for metrics vectors.
|
||||
DefaultClusterType = "unspecified"
|
||||
|
||||
// HiveInstallLogLabel is used on ConfigMaps uploaded by the install manager which contain an install log.
|
||||
HiveInstallLogLabel = "hive.openshift.io/install-log"
|
||||
|
||||
|
@ -235,10 +232,16 @@ type Provisioning struct {
|
|||
// that will take precedence over the one from the ClusterImageSet.
|
||||
ImageSetRef *ClusterImageSetReference `json:"imageSetRef,omitempty"`
|
||||
|
||||
// ManifestsConfigMapRef is a reference to user-provided manifests to
|
||||
// add to or replace manifests that are generated by the installer.
|
||||
// ManifestsConfigMapRef is a reference to user-provided manifests to add to or replace manifests
|
||||
// that are generated by the installer. It serves the same purpose as, and is mutually exclusive
|
||||
// with, ManifestsSecretRef.
|
||||
ManifestsConfigMapRef *corev1.LocalObjectReference `json:"manifestsConfigMapRef,omitempty"`
|
||||
|
||||
// ManifestsSecretRef is a reference to user-provided manifests to add to or replace manifests
|
||||
// that are generated by the installer. It serves the same purpose as, and is mutually exclusive
|
||||
// with, ManifestsConfigMapRef.
|
||||
ManifestsSecretRef *corev1.LocalObjectReference `json:"manifestsSecretRef,omitempty"`
|
||||
|
||||
// SSHPrivateKeySecretRef is the reference to the secret that contains the private SSH key to use
|
||||
// for access to compute instances. This private key should correspond to the public key included
|
||||
// in the InstallConfig. The private key is used by Hive to gather logs on the target cluster if
|
||||
|
@ -276,6 +279,10 @@ type ClusterPoolReference struct {
|
|||
// ClaimedTimestamp is the time this cluster was assigned to a ClusterClaim. This is only used for
|
||||
// ClusterDeployments belonging to ClusterPools.
|
||||
ClaimedTimestamp *metav1.Time `json:"claimedTimestamp,omitempty"`
|
||||
// CustomizationRef is the ClusterPool Inventory claimed customization for this ClusterDeployment.
|
||||
// The Customization exists in the ClusterPool namespace.
|
||||
// +optional
|
||||
CustomizationRef *corev1.LocalObjectReference `json:"clusterDeploymentCustomization,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterMetadata contains metadata information about the installed cluster.
|
||||
|
@ -293,6 +300,16 @@ type ClusterMetadata struct {
|
|||
// AdminPasswordSecretRef references the secret containing the admin username/password which can be used to login to this cluster.
|
||||
// +optional
|
||||
AdminPasswordSecretRef *corev1.LocalObjectReference `json:"adminPasswordSecretRef,omitempty"`
|
||||
|
||||
// Platform holds platform-specific cluster metadata
|
||||
// +optional
|
||||
Platform *ClusterPlatformMetadata `json:"platform,omitempty"`
|
||||
}
|
||||
|
||||
type ClusterPlatformMetadata struct {
|
||||
// Azure holds azure-specific cluster metadata
|
||||
// +optional
|
||||
Azure *azure.Metadata `json:"azure,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterDeploymentStatus defines the observed state of ClusterDeployment
|
||||
|
@ -374,6 +391,16 @@ type ClusterDeploymentCondition struct {
|
|||
// ClusterDeploymentConditionType is a valid value for ClusterDeploymentCondition.Type
|
||||
type ClusterDeploymentConditionType string
|
||||
|
||||
// ConditionType satisfies the conditions.Condition interface
|
||||
func (c ClusterDeploymentCondition) ConditionType() ConditionType {
|
||||
return c.Type
|
||||
}
|
||||
|
||||
// String satisfies the conditions.ConditionType interface
|
||||
func (t ClusterDeploymentConditionType) String() string {
|
||||
return string(t)
|
||||
}
|
||||
|
||||
const (
|
||||
// InstallerImageResolutionFailedCondition is a condition that indicates whether the job
|
||||
// to determine the installer image based on a release image was successful.
|
||||
|
@ -455,6 +482,10 @@ const (
|
|||
ClusterInstallCompletedClusterDeploymentCondition ClusterDeploymentConditionType = "ClusterInstallCompleted"
|
||||
ClusterInstallStoppedClusterDeploymentCondition ClusterDeploymentConditionType = "ClusterInstallStopped"
|
||||
ClusterInstallRequirementsMetClusterDeploymentCondition ClusterDeploymentConditionType = "ClusterInstallRequirementsMet"
|
||||
|
||||
// ClusterImageSetNotFoundCondition is a legacy condition type that is not intended to be used
|
||||
// in production. This type is never used by hive.
|
||||
ClusterImageSetNotFoundCondition ClusterDeploymentConditionType = "ClusterImageSetNotFound"
|
||||
)
|
||||
|
||||
// PositivePolarityClusterDeploymentConditions is a slice containing all condition types with positive polarity
|
||||
|
@ -485,8 +516,7 @@ const (
|
|||
// Hibernating state.
|
||||
HibernatingReasonHibernating = string(ClusterPowerStateHibernating)
|
||||
// HibernatingReasonUnsupported is used as the reason when the cluster spec
|
||||
// specifies that the cluster be moved to a Hibernating state, but either the cluster
|
||||
// version is not compatible with hibernation (< 4.4.8) or the cloud provider of
|
||||
// specifies that the cluster be moved to a Hibernating state, but the cloud provider of
|
||||
// the cluster is not supported.
|
||||
HibernatingReasonUnsupported = "Unsupported"
|
||||
// HibernatingReasonFailedToStop is used when there was an error stopping machines
|
||||
|
@ -502,6 +532,9 @@ const (
|
|||
// HibernatingReasonPowerStatePaused indicates that we can't/won't discover the state of the
|
||||
// cluster's cloud machines because the powerstate-paused annotation is set.
|
||||
HibernatingReasonPowerStatePaused = "PowerStatePaused"
|
||||
// HibernatingReasonClusterDeploymentDeleted indicates that a Cluster Deployment has been deleted
|
||||
// and that the cluster is deprovisioning unless preserveOnDelete is set to true.
|
||||
HibernatingReasonClusterDeploymentDeleted = "ClusterDeploymentDeleted"
|
||||
|
||||
// ReadyReasonStoppingOrHibernating is used as the reason for the Ready condition when the cluster
|
||||
// is stopping or hibernating. Precise details are available in the Hibernating condition.
|
||||
|
@ -525,6 +558,9 @@ const (
|
|||
// ReadyReasonPowerStatePaused indicates that we can't/won't discover the state of the
|
||||
// cluster's cloud machines because the powerstate-paused annotation is set.
|
||||
ReadyReasonPowerStatePaused = "PowerStatePaused"
|
||||
// ReadyReasonClusterDeploymentDeleted indicates that a Cluster Deployment has been deleted
|
||||
// and that the cluster is deprovisioning unless preserveOnDelete is set to true.
|
||||
ReadyReasonClusterDeploymentDeleted = "ClusterDeploymentDeleted"
|
||||
)
|
||||
|
||||
// Provisioned status condition reasons
|
||||
|
@ -556,7 +592,7 @@ const InitializedConditionReason = "Initialized"
|
|||
// +kubebuilder:printcolumn:name="InfraID",type="string",JSONPath=".spec.clusterMetadata.infraID"
|
||||
// +kubebuilder:printcolumn:name="Platform",type="string",JSONPath=".metadata.labels.hive\\.openshift\\.io/cluster-platform"
|
||||
// +kubebuilder:printcolumn:name="Region",type="string",JSONPath=".metadata.labels.hive\\.openshift\\.io/cluster-region"
|
||||
// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".metadata.labels.hive\\.openshift\\.io/version-major-minor-patch"
|
||||
// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".metadata.labels.hive\\.openshift\\.io/version"
|
||||
// +kubebuilder:printcolumn:name="ClusterType",type="string",JSONPath=".metadata.labels.hive\\.openshift\\.io/cluster-type"
|
||||
// +kubebuilder:printcolumn:name="ProvisionStatus",type="string",JSONPath=".status.conditions[?(@.type=='Provisioned')].reason"
|
||||
// +kubebuilder:printcolumn:name="PowerState",type="string",JSONPath=".status.powerState"
|
||||
|
@ -652,6 +688,10 @@ type ClusterIngress struct {
|
|||
// should be used for this Ingress
|
||||
// +optional
|
||||
ServingCertificate string `json:"servingCertificate,omitempty"`
|
||||
|
||||
// HttpErrorCodePages allows configuring custom HTTP error pages using the IngressController object
|
||||
// +optional
|
||||
HttpErrorCodePages *configv1.ConfigMapNameReference `json:"httpErrorCodePages,omitempty"`
|
||||
}
|
||||
|
||||
// ControlPlaneConfigSpec contains additional configuration settings for a target
|
||||
|
@ -672,6 +712,7 @@ type ControlPlaneConfigSpec struct {
|
|||
// APIServerIPOverride is the optional override of the API server IP address.
|
||||
// Hive will use this IP address for creating TCP connections.
|
||||
// Port from the original API server URL will be used.
|
||||
// This field can be used when repointing the APIServer's DNS is not viable option.
|
||||
// +optional
|
||||
APIServerIPOverride string `json:"apiServerIPOverride,omitempty"`
|
||||
}
|
||||
|
|
99
vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go
сгенерированный
поставляемый
Normal file
99
vendor/github.com/openshift/hive/apis/hive/v1/clusterdeploymentcustomization_types.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,99 @@
|
|||
package v1
|
||||
|
||||
import (
|
||||
conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
// CustomizationApplyReasonSucceeded indicates that the customization
|
||||
// worked properly on the last applied cluster deployment.
|
||||
CustomizationApplyReasonSucceeded = "Succeeded"
|
||||
// CustomizationApplyReasonBrokenSyntax indicates that Hive failed to apply
|
||||
// customization patches on install-config. More details would be found in
|
||||
// ApplySucceded condition message.
|
||||
CustomizationApplyReasonBrokenSyntax = "BrokenBySyntax"
|
||||
// CustomizationApplyReasonBrokenCloud indicates that cluster deployment provision has failed
|
||||
// when using this customization. More details would be found in the ApplySucceeded condition message.
|
||||
CustomizationApplyReasonBrokenCloud = "BrokenByCloud"
|
||||
// CustomizationApplyReasonInstallationPending indicates that the customization patches have
|
||||
// been successfully applied but provisioning is not completed yet.
|
||||
CustomizationApplyReasonInstallationPending = "InstallationPending"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ClusterDeploymentCustomization is the Schema for clusterdeploymentcustomizations API.
|
||||
// +kubebuilder:subresource:status
|
||||
// +k8s:openapi-gen=true
|
||||
// +kubebuilder:resource:scope=Namespaced
|
||||
type ClusterDeploymentCustomization struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ClusterDeploymentCustomizationSpec `json:"spec"`
|
||||
Status ClusterDeploymentCustomizationStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterDeploymentCustomizationSpec defines the desired state of ClusterDeploymentCustomization.
|
||||
type ClusterDeploymentCustomizationSpec struct {
|
||||
// InstallConfigPatches is a list of patches to be applied to the install-config.
|
||||
InstallConfigPatches []PatchEntity `json:"installConfigPatches,omitempty"`
|
||||
}
|
||||
|
||||
// PatchEntity represent a json patch (RFC 6902) to be applied to the install-config
|
||||
type PatchEntity struct {
|
||||
// Op is the operation to perform: add, remove, replace, move, copy, test
|
||||
// +required
|
||||
Op string `json:"op"`
|
||||
// Path is the json path to the value to be modified
|
||||
// +required
|
||||
Path string `json:"path"`
|
||||
// From is the json path to copy or move the value from
|
||||
// +optional
|
||||
From string `json:"from,omitempty"`
|
||||
// Value is the value to be used in the operation
|
||||
// +required
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
// ClusterDeploymentCustomizationStatus defines the observed state of ClusterDeploymentCustomization.
|
||||
type ClusterDeploymentCustomizationStatus struct {
|
||||
// ClusterDeploymentRef is a reference to the cluster deployment that this customization is applied on.
|
||||
// +optional
|
||||
ClusterDeploymentRef *corev1.LocalObjectReference `json:"clusterDeploymentRef,omitempty"`
|
||||
|
||||
// ClusterPoolRef is the name of the current cluster pool the CDC used at.
|
||||
// +optional
|
||||
ClusterPoolRef *corev1.LocalObjectReference `json:"clusterPoolRef,omitempty"`
|
||||
|
||||
// LastAppliedConfiguration contains the last applied patches to the install-config.
|
||||
// The information will retain for reference in case the customization is updated.
|
||||
// +optional
|
||||
LastAppliedConfiguration string `json:"lastAppliedConfiguration,omitempty"`
|
||||
|
||||
// Conditions describes the state of the operator's reconciliation functionality.
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
// +optional
|
||||
Conditions []conditionsv1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"`
|
||||
}
|
||||
|
||||
const (
|
||||
ApplySucceededCondition conditionsv1.ConditionType = "ApplySucceeded"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ClusterDeploymentCustomizationList contains a list of ClusterDeploymentCustomizations.
|
||||
type ClusterDeploymentCustomizationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []ClusterDeploymentCustomization `json:"items"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
SchemeBuilder.Register(&ClusterDeploymentCustomization{}, &ClusterDeploymentCustomizationList{})
|
||||
}
|
14
vendor/github.com/openshift/hive/apis/hive/v1/clusterdeprovision_types.go
сгенерированный
поставляемый
14
vendor/github.com/openshift/hive/apis/hive/v1/clusterdeprovision_types.go
сгенерированный
поставляемый
|
@ -89,6 +89,10 @@ type AzureClusterDeprovision struct {
|
|||
// If empty, the value is equal to "AzurePublicCloud".
|
||||
// +optional
|
||||
CloudName *azure.CloudEnvironment `json:"cloudName,omitempty"`
|
||||
// ResourceGroupName is the name of the resource group where the cluster was installed.
|
||||
// Required for new deprovisions (schema notwithstanding).
|
||||
// +optional
|
||||
ResourceGroupName *string `json:"resourceGroupName,omitempty"`
|
||||
}
|
||||
|
||||
// GCPClusterDeprovision contains GCP-specific configuration for a ClusterDeprovision
|
||||
|
@ -187,6 +191,16 @@ type ClusterDeprovisionCondition struct {
|
|||
// ClusterDeprovisionConditionType is a valid value for ClusterDeprovisionCondition.Type
|
||||
type ClusterDeprovisionConditionType string
|
||||
|
||||
// ConditionType satisfies the conditions.Condition interface
|
||||
func (c ClusterDeprovisionCondition) ConditionType() ConditionType {
|
||||
return c.Type
|
||||
}
|
||||
|
||||
// String satisfies the conditions.ConditionType interface
|
||||
func (t ClusterDeprovisionConditionType) String() string {
|
||||
return string(t)
|
||||
}
|
||||
|
||||
const (
|
||||
// AuthenticationFailureClusterDeprovisionCondition is true when credentials cannot be used because of authentication failure
|
||||
AuthenticationFailureClusterDeprovisionCondition ClusterDeprovisionConditionType = "AuthenticationFailure"
|
||||
|
|
22
vendor/github.com/openshift/hive/apis/hive/v1/clusterinstall_conditions.go
сгенерированный
поставляемый
22
vendor/github.com/openshift/hive/apis/hive/v1/clusterinstall_conditions.go
сгенерированный
поставляемый
|
@ -10,7 +10,7 @@ import (
|
|||
// ClusterInstallCondition contains details for the current condition of a cluster install.
|
||||
type ClusterInstallCondition struct {
|
||||
// Type is the type of the condition.
|
||||
Type string `json:"type"`
|
||||
Type ClusterInstallConditionType `json:"type"`
|
||||
// Status is the status of the condition.
|
||||
Status corev1.ConditionStatus `json:"status"`
|
||||
// LastProbeTime is the last time we probed the condition.
|
||||
|
@ -27,20 +27,32 @@ type ClusterInstallCondition struct {
|
|||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
type ClusterInstallConditionType string
|
||||
|
||||
// ConditionType satisfies the conditions.Condition interface
|
||||
func (c ClusterInstallCondition) ConditionType() ConditionType {
|
||||
return c.Type
|
||||
}
|
||||
|
||||
// String satisfies the conditions.ConditionType interface
|
||||
func (t ClusterInstallConditionType) String() string {
|
||||
return string(t)
|
||||
}
|
||||
|
||||
const (
|
||||
// ClusterInstallRequirementsMet is True when all pre-install requirements have been met.
|
||||
ClusterInstallRequirementsMet = "RequirementsMet"
|
||||
ClusterInstallRequirementsMet ClusterInstallConditionType = "RequirementsMet"
|
||||
|
||||
// ClusterInstallCompleted is True when the requested install has been completed successfully.
|
||||
ClusterInstallCompleted = "Completed"
|
||||
ClusterInstallCompleted ClusterInstallConditionType = "Completed"
|
||||
|
||||
// ClusterInstallFailed is True when an attempt to install the cluster has failed.
|
||||
// The ClusterInstall controllers may still be retrying if supported, and this condition will
|
||||
// go back to False if a later attempt succeeds.
|
||||
ClusterInstallFailed = "Failed"
|
||||
ClusterInstallFailed ClusterInstallConditionType = "Failed"
|
||||
|
||||
// ClusterInstallStopped is True the controllers are no longer working on this
|
||||
// ClusterInstall. Combine with Completed or Failed to know if the overall request was
|
||||
// successful or not.
|
||||
ClusterInstallStopped = "Stopped"
|
||||
ClusterInstallStopped ClusterInstallConditionType = "Stopped"
|
||||
)
|
||||
|
|
|
@ -92,6 +92,11 @@ type ClusterPoolSpec struct {
|
|||
// HibernationConfig configures the hibernation/resume behavior of ClusterDeployments owned by the ClusterPool.
|
||||
// +optional
|
||||
HibernationConfig *HibernationConfig `json:"hibernationConfig"`
|
||||
|
||||
// Inventory maintains a list of entries consumed by the ClusterPool
|
||||
// to customize the default ClusterDeployment.
|
||||
// +optional
|
||||
Inventory []InventoryEntry `json:"inventory,omitempty"`
|
||||
}
|
||||
|
||||
type HibernationConfig struct {
|
||||
|
@ -110,6 +115,22 @@ type HibernationConfig struct {
|
|||
ResumeTimeout metav1.Duration `json:"resumeTimeout"`
|
||||
}
|
||||
|
||||
// InventoryEntryKind is the Kind of the inventory entry.
|
||||
// +kubebuilder:validation:Enum="";ClusterDeploymentCustomization
|
||||
type InventoryEntryKind string
|
||||
|
||||
const ClusterDeploymentCustomizationInventoryEntry InventoryEntryKind = "ClusterDeploymentCustomization"
|
||||
|
||||
// InventoryEntry maintains a reference to a custom resource consumed by a clusterpool to customize the cluster deployment.
|
||||
type InventoryEntry struct {
|
||||
// Kind denotes the kind of the referenced resource. The default is ClusterDeploymentCustomization, which is also currently the only supported value.
|
||||
// +kubebuilder:default=ClusterDeploymentCustomization
|
||||
Kind InventoryEntryKind `json:"kind,omitempty"`
|
||||
// Name is the name of the referenced resource.
|
||||
// +required
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
// ClusterPoolClaimLifetime defines the lifetimes for claims for the cluster pool.
|
||||
type ClusterPoolClaimLifetime struct {
|
||||
// Default is the default lifetime of the claim when no lifetime is set on the claim itself.
|
||||
|
@ -177,6 +198,16 @@ type ClusterPoolCondition struct {
|
|||
// ClusterPoolConditionType is a valid value for ClusterPoolCondition.Type
|
||||
type ClusterPoolConditionType string
|
||||
|
||||
// ConditionType satisfies the conditions.Condition interface
|
||||
func (c ClusterPoolCondition) ConditionType() ConditionType {
|
||||
return c.Type
|
||||
}
|
||||
|
||||
// String satisfies the conditions.ConditionType interface
|
||||
func (t ClusterPoolConditionType) String() string {
|
||||
return string(t)
|
||||
}
|
||||
|
||||
const (
|
||||
// ClusterPoolMissingDependenciesCondition is set when a cluster pool is missing dependencies required to create a
|
||||
// cluster. Dependencies include resources such as the ClusterImageSet and the credentials Secret.
|
||||
|
@ -187,6 +218,20 @@ const (
|
|||
// ClusterPoolAllClustersCurrentCondition indicates whether all unassigned (installing or ready)
|
||||
// ClusterDeployments in the pool match the current configuration of the ClusterPool.
|
||||
ClusterPoolAllClustersCurrentCondition ClusterPoolConditionType = "AllClustersCurrent"
|
||||
// ClusterPoolInventoryValidCondition is set to provide information on whether the cluster pool inventory is valid.
|
||||
ClusterPoolInventoryValidCondition ClusterPoolConditionType = "InventoryValid"
|
||||
// ClusterPoolDeletionPossibleCondition gives information about a deleted ClusterPool which is pending cleanup.
|
||||
// Note that it is normal for this condition to remain Initialized/Unknown until the ClusterPool is deleted.
|
||||
ClusterPoolDeletionPossibleCondition ClusterPoolConditionType = "DeletionPossible"
|
||||
)
|
||||
|
||||
const (
|
||||
// InventoryReasonValid is used when all ClusterDeploymentCustomization are
|
||||
// available and when used the ClusterDeployments are successfully installed.
|
||||
InventoryReasonValid = "Valid"
|
||||
// InventoryReasonInvalid is used when there is something wrong with ClusterDeploymentCustomization, for example
|
||||
// patching issue, provisioning failure, missing, etc.
|
||||
InventoryReasonInvalid = "Invalid"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
|
|
18
vendor/github.com/openshift/hive/apis/hive/v1/clusterprovision_types.go
сгенерированный
поставляемый
18
vendor/github.com/openshift/hive/apis/hive/v1/clusterprovision_types.go
сгенерированный
поставляемый
|
@ -31,8 +31,16 @@ type ClusterProvisionSpec struct {
|
|||
InstallLog *string `json:"installLog,omitempty"`
|
||||
|
||||
// Metadata is the metadata.json generated by the installer, providing metadata information about the cluster created.
|
||||
// NOTE: This is not used because it didn't work (it was always empty). We think because the thing it's storing
|
||||
// (ClusterMetadata from installer) is not a runtime.Object, so can't be put in a RawExtension.
|
||||
Metadata *runtime.RawExtension `json:"metadata,omitempty"`
|
||||
|
||||
// MetadataJSON is a JSON representation of the ClusterMetadata produced by the installer. We don't use a
|
||||
// runtime.RawExtension because ClusterMetadata isn't a runtime.Object. We don't use ClusterMetadata itself
|
||||
// because we don't want our API consumers to need to pull in the installer code and its dependencies.
|
||||
// +optional
|
||||
MetadataJSON []byte `json:"metadataJSON,omitempty"`
|
||||
|
||||
// AdminKubeconfigSecretRef references the secret containing the admin kubeconfig for this cluster.
|
||||
AdminKubeconfigSecretRef *corev1.LocalObjectReference `json:"adminKubeconfigSecretRef,omitempty"`
|
||||
|
||||
|
@ -96,6 +104,16 @@ type ClusterProvisionCondition struct {
|
|||
// ClusterProvisionConditionType is a valid value for ClusterProvisionCondition.Type
|
||||
type ClusterProvisionConditionType string
|
||||
|
||||
// ConditionType satisfies the conditions.Condition interface
|
||||
func (c ClusterProvisionCondition) ConditionType() ConditionType {
|
||||
return c.Type
|
||||
}
|
||||
|
||||
// String satisfies the conditions.ConditionType interface
|
||||
func (t ClusterProvisionConditionType) String() string {
|
||||
return string(t)
|
||||
}
|
||||
|
||||
const (
|
||||
// ClusterProvisionInitializedCondition is set when a cluster provision has finished initialization.
|
||||
ClusterProvisionInitializedCondition ClusterProvisionConditionType = "ClusterProvisionInitialized"
|
||||
|
|
11
vendor/github.com/openshift/hive/apis/hive/v1/conditions.go
сгенерированный
поставляемый
Normal file
11
vendor/github.com/openshift/hive/apis/hive/v1/conditions.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,11 @@
|
|||
package v1
|
||||
|
||||
import "fmt"
|
||||
|
||||
type Condition interface {
|
||||
ConditionType() ConditionType
|
||||
}
|
||||
|
||||
type ConditionType interface {
|
||||
fmt.Stringer
|
||||
}
|
|
@ -179,6 +179,16 @@ type DNSZoneCondition struct {
|
|||
// DNSZoneConditionType is a valid value for DNSZoneCondition.Type
|
||||
type DNSZoneConditionType string
|
||||
|
||||
// ConditionType satisfies the conditions.Condition interface
|
||||
func (c DNSZoneCondition) ConditionType() ConditionType {
|
||||
return c.Type
|
||||
}
|
||||
|
||||
// String satisfies the conditions.ConditionType interface
|
||||
func (t DNSZoneConditionType) String() string {
|
||||
return string(t)
|
||||
}
|
||||
|
||||
const (
|
||||
// ZoneAvailableDNSZoneCondition is true if the DNSZone is responding to DNS queries
|
||||
ZoneAvailableDNSZoneCondition DNSZoneConditionType = "ZoneAvailable"
|
||||
|
|
|
@ -90,6 +90,10 @@ type HiveConfigSpec struct {
|
|||
// +optional
|
||||
ControllersConfig *ControllersConfig `json:"controllersConfig,omitempty"`
|
||||
|
||||
// DeploymentConfig is used to configure (pods/containers of) the Deployments generated by hive-operator.
|
||||
// +optional
|
||||
DeploymentConfig *[]DeploymentConfig `json:"deploymentConfig,omitempty"`
|
||||
|
||||
// AWSPrivateLink defines the configuration for the aws-private-link controller.
|
||||
// It provides 3 major pieces of information required by the controller,
|
||||
// 1. The Credentials that should be used to create AWS PrivateLink resources other than
|
||||
|
@ -139,11 +143,14 @@ type HiveConfigSpec struct {
|
|||
|
||||
FeatureGates *FeatureGateSelection `json:"featureGates,omitempty"`
|
||||
|
||||
// ExportMetrics specifies whether the operator should enable metrics for hive controllers
|
||||
// to be extracted for prometheus.
|
||||
// When set to true, the operator deploys ServiceMonitors so that the prometheus instances that
|
||||
// extract metrics. The operator also sets up RBAC in the TargetNamespace so that openshift
|
||||
// prometheus in the cluster can list/access objects required to pull metrics.
|
||||
// ExportMetrics has been disabled and has no effect. If upgrading from a version where it was
|
||||
// active, please be aware of the following in your HiveConfig.Spec.TargetNamespace (default
|
||||
// `hive` if unset):
|
||||
// 1) ServiceMonitors named hive-controllers and hive-clustersync;
|
||||
// 2) Role and RoleBinding named prometheus-k8s;
|
||||
// 3) The `openshift.io/cluster-monitoring` metadata.label on the Namespace itself.
|
||||
// You may wish to delete these resources. Or you may wish to continue using them to enable
|
||||
// monitoring in your environment; but be aware that hive will no longer reconcile them.
|
||||
ExportMetrics bool `json:"exportMetrics,omitempty"`
|
||||
|
||||
// MetricsConfig encapsulates metrics specific configurations, like opting in for certain metrics.
|
||||
|
@ -599,6 +606,27 @@ type ControllersConfig struct {
|
|||
Controllers []SpecificControllerConfig `json:"controllers,omitempty"`
|
||||
}
|
||||
|
||||
type DeploymentName string
|
||||
|
||||
const (
|
||||
DeploymentNameControllers DeploymentName = "hive-controllers"
|
||||
DeploymentNameClustersync DeploymentName = "hive-clustersync"
|
||||
DeploymentNameAdmission DeploymentName = "hiveadmission"
|
||||
)
|
||||
|
||||
type DeploymentConfig struct {
|
||||
// DeploymentName is the name of one of the Deployments/StatefulSets managed by hive-operator.
|
||||
// NOTE: At this time each deployment has only one container. In the future, we may provide a
|
||||
// way to specify which container this DeploymentConfig will be applied to.
|
||||
// +kubebuilder:validation:Enum=hive-controllers;hive-clustersync;hiveadmission
|
||||
DeploymentName DeploymentName `json:"deploymentName"`
|
||||
|
||||
// Resources allows customization of the resource (memory, CPU, etc.) limits and requests used
|
||||
// by containers in the Deployment/StatefulSet named by DeploymentName.
|
||||
// +optional
|
||||
Resources *corev1.ResourceRequirements `json:"resources"`
|
||||
}
|
||||
|
||||
// +genclient:nonNamespaced
|
||||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
|
||||
const (
|
||||
// MachinePoolImageIDOverrideAnnotation can be applied to MachinePools to control the precise image ID to be used
|
||||
// for the MachineSets we reconcile for this pool. This feature is presently only implemented for AWS, and
|
||||
// for the MachineSets we reconcile for this pool. This feature is presently only implemented for AWS and GCP, and
|
||||
// is intended for very limited use cases we do not recommend pursuing regularly. As such it is not currently
|
||||
// part of our official API.
|
||||
MachinePoolImageIDOverrideAnnotation = "hive.openshift.io/image-id-override"
|
||||
|
@ -156,6 +156,16 @@ type MachinePoolCondition struct {
|
|||
// MachinePoolConditionType is a valid value for MachinePoolCondition.Type
|
||||
type MachinePoolConditionType string
|
||||
|
||||
// ConditionType satisfies the conditions.Condition interface
|
||||
func (c MachinePoolCondition) ConditionType() ConditionType {
|
||||
return c.Type
|
||||
}
|
||||
|
||||
// String satisfies the conditions.ConditionType interface
|
||||
func (t MachinePoolConditionType) String() string {
|
||||
return string(t)
|
||||
}
|
||||
|
||||
const (
|
||||
// NotEnoughReplicasMachinePoolCondition is true when the minReplicas field
|
||||
// is set too low for the number of machinesets for the machine pool.
|
||||
|
|
7
vendor/github.com/openshift/hive/apis/hive/v1/metricsconfig/metricsConfig.go
сгенерированный
поставляемый
7
vendor/github.com/openshift/hive/apis/hive/v1/metricsconfig/metricsConfig.go
сгенерированный
поставляемый
|
@ -1,7 +0,0 @@
|
|||
package metricsconfig
|
||||
|
||||
type MetricsConfig struct {
|
||||
// Optional metrics and their configurations
|
||||
// +optional
|
||||
MetricsWithDuration []MetricsWithDuration `json:"metricsWithDuration"`
|
||||
}
|
20
vendor/github.com/openshift/hive/apis/hive/v1/metricsconfig/metrics_config.go
сгенерированный
поставляемый
Normal file
20
vendor/github.com/openshift/hive/apis/hive/v1/metricsconfig/metrics_config.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,20 @@
|
|||
package metricsconfig
|
||||
|
||||
type MetricsConfig struct {
|
||||
// Optional metrics and their configurations
|
||||
// +optional
|
||||
MetricsWithDuration []MetricsWithDuration `json:"metricsWithDuration,omitempty"`
|
||||
// AdditionalClusterDeploymentLabels allows configuration of additional labels to be applied to certain metrics.
|
||||
// The keys can be any string value suitable for a metric label (see https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels).
|
||||
// The values can be any ClusterDeployment label key (from metadata.labels). When observing an affected metric,
|
||||
// hive will label it with the specified metric key, and copy the value from the specified ClusterDeployment label.
|
||||
// For example, including {"ocp_major_version": "hive.openshift.io/version-major"} will cause affected metrics to
|
||||
// include a label key ocp_major_version with the value from the hive.openshift.io/version-major ClusterDeployment
|
||||
// label -- e.g. "4".
|
||||
// NOTE: Avoid ClusterDeployment labels whose values are unbounded, such as those representing cluster names or IDs,
|
||||
// as these will cause your prometheus database to grow indefinitely.
|
||||
// Affected metrics are those whose type implements the metricsWithDynamicLabels interface found in
|
||||
// pkg/controller/metrics/metrics_with_dynamic_labels.go
|
||||
// +optional
|
||||
AdditionalClusterDeploymentLabels *map[string]string `json:"additionalClusterDeploymentLabels,omitempty"`
|
||||
}
|
11
vendor/github.com/openshift/hive/apis/hive/v1/metricsconfig/zz_generated.deepcopy.go
сгенерированный
поставляемый
11
vendor/github.com/openshift/hive/apis/hive/v1/metricsconfig/zz_generated.deepcopy.go
сгенерированный
поставляемый
|
@ -19,6 +19,17 @@ func (in *MetricsConfig) DeepCopyInto(out *MetricsConfig) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.AdditionalClusterDeploymentLabels != nil {
|
||||
in, out := &in.AdditionalClusterDeploymentLabels, &out.AdditionalClusterDeploymentLabels
|
||||
*out = new(map[string]string)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -106,6 +106,16 @@ type SecretMapping struct {
|
|||
// SyncConditionType is a valid value for SyncCondition.Type
|
||||
type SyncConditionType string
|
||||
|
||||
// ConditionType satisfies the conditions.Condition interface
|
||||
func (c SyncCondition) ConditionType() ConditionType {
|
||||
return c.Type
|
||||
}
|
||||
|
||||
// String satisfies the conditions.ConditionType interface
|
||||
func (t SyncConditionType) String() string {
|
||||
return string(t)
|
||||
}
|
||||
|
||||
const (
|
||||
// ApplySuccessSyncCondition indicates whether the resource or patch has been applied.
|
||||
ApplySuccessSyncCondition SyncConditionType = "ApplySuccess"
|
||||
|
|
200
vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go
сгенерированный
поставляемый
200
vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go
сгенерированный
поставляемый
|
@ -7,6 +7,7 @@ package v1
|
|||
|
||||
import (
|
||||
configv1 "github.com/openshift/api/config/v1"
|
||||
conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1"
|
||||
agent "github.com/openshift/hive/apis/hive/v1/agent"
|
||||
alibabacloud "github.com/openshift/hive/apis/hive/v1/alibabacloud"
|
||||
aws "github.com/openshift/hive/apis/hive/v1/aws"
|
||||
|
@ -676,6 +677,121 @@ func (in *ClusterDeploymentCondition) DeepCopy() *ClusterDeploymentCondition {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterDeploymentCustomization) DeepCopyInto(out *ClusterDeploymentCustomization) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomization.
|
||||
func (in *ClusterDeploymentCustomization) DeepCopy() *ClusterDeploymentCustomization {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterDeploymentCustomization)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ClusterDeploymentCustomization) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterDeploymentCustomizationList) DeepCopyInto(out *ClusterDeploymentCustomizationList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ClusterDeploymentCustomization, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationList.
|
||||
func (in *ClusterDeploymentCustomizationList) DeepCopy() *ClusterDeploymentCustomizationList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterDeploymentCustomizationList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ClusterDeploymentCustomizationList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterDeploymentCustomizationSpec) DeepCopyInto(out *ClusterDeploymentCustomizationSpec) {
|
||||
*out = *in
|
||||
if in.InstallConfigPatches != nil {
|
||||
in, out := &in.InstallConfigPatches, &out.InstallConfigPatches
|
||||
*out = make([]PatchEntity, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationSpec.
|
||||
func (in *ClusterDeploymentCustomizationSpec) DeepCopy() *ClusterDeploymentCustomizationSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterDeploymentCustomizationSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterDeploymentCustomizationStatus) DeepCopyInto(out *ClusterDeploymentCustomizationStatus) {
|
||||
*out = *in
|
||||
if in.ClusterDeploymentRef != nil {
|
||||
in, out := &in.ClusterDeploymentRef, &out.ClusterDeploymentRef
|
||||
*out = new(corev1.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.ClusterPoolRef != nil {
|
||||
in, out := &in.ClusterPoolRef, &out.ClusterPoolRef
|
||||
*out = new(corev1.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]conditionsv1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterDeploymentCustomizationStatus.
|
||||
func (in *ClusterDeploymentCustomizationStatus) DeepCopy() *ClusterDeploymentCustomizationStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterDeploymentCustomizationStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterDeploymentList) DeepCopyInto(out *ClusterDeploymentList) {
|
||||
*out = *in
|
||||
|
@ -1137,6 +1253,11 @@ func (in *ClusterIngress) DeepCopyInto(out *ClusterIngress) {
|
|||
*out = new(metav1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.HttpErrorCodePages != nil {
|
||||
in, out := &in.HttpErrorCodePages, &out.HttpErrorCodePages
|
||||
*out = new(configv1.ConfigMapNameReference)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1341,6 +1462,11 @@ func (in *ClusterPoolReference) DeepCopyInto(out *ClusterPoolReference) {
|
|||
in, out := &in.ClaimedTimestamp, &out.ClaimedTimestamp
|
||||
*out = (*in).DeepCopy()
|
||||
}
|
||||
if in.CustomizationRef != nil {
|
||||
in, out := &in.CustomizationRef, &out.CustomizationRef
|
||||
*out = new(corev1.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1413,6 +1539,11 @@ func (in *ClusterPoolSpec) DeepCopyInto(out *ClusterPoolSpec) {
|
|||
*out = new(HibernationConfig)
|
||||
**out = **in
|
||||
}
|
||||
if in.Inventory != nil {
|
||||
in, out := &in.Inventory, &out.Inventory
|
||||
*out = make([]InventoryEntry, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -2123,6 +2254,27 @@ func (in *DNSZoneStatus) DeepCopy() *DNSZoneStatus {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeploymentConfig) DeepCopyInto(out *DeploymentConfig) {
|
||||
*out = *in
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = new(corev1.ResourceRequirements)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfig.
|
||||
func (in *DeploymentConfig) DeepCopy() *DeploymentConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(DeploymentConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FailedProvisionAWSConfig) DeepCopyInto(out *FailedProvisionAWSConfig) {
|
||||
*out = *in
|
||||
|
@ -2410,6 +2562,17 @@ func (in *HiveConfigSpec) DeepCopyInto(out *HiveConfigSpec) {
|
|||
*out = new(ControllersConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.DeploymentConfig != nil {
|
||||
in, out := &in.DeploymentConfig, &out.DeploymentConfig
|
||||
*out = new([]DeploymentConfig)
|
||||
if **in != nil {
|
||||
in, out := *in, *out
|
||||
*out = make([]DeploymentConfig, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
if in.AWSPrivateLink != nil {
|
||||
in, out := &in.AWSPrivateLink, &out.AWSPrivateLink
|
||||
*out = new(AWSPrivateLinkConfig)
|
||||
|
@ -2500,6 +2663,22 @@ func (in *IdentityProviderStatus) DeepCopy() *IdentityProviderStatus {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *InventoryEntry) DeepCopyInto(out *InventoryEntry) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InventoryEntry.
|
||||
func (in *InventoryEntry) DeepCopy() *InventoryEntry {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(InventoryEntry)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubeconfigSecretReference) DeepCopyInto(out *KubeconfigSecretReference) {
|
||||
*out = *in
|
||||
|
@ -2989,6 +3168,22 @@ func (in *OvirtClusterDeprovision) DeepCopy() *OvirtClusterDeprovision {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PatchEntity) DeepCopyInto(out *PatchEntity) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchEntity.
|
||||
func (in *PatchEntity) DeepCopy() *PatchEntity {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PatchEntity)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Platform) DeepCopyInto(out *Platform) {
|
||||
*out = *in
|
||||
|
@ -3099,6 +3294,11 @@ func (in *Provisioning) DeepCopyInto(out *Provisioning) {
|
|||
*out = new(corev1.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.ManifestsSecretRef != nil {
|
||||
in, out := &in.ManifestsSecretRef, &out.ManifestsSecretRef
|
||||
*out = new(corev1.LocalObjectReference)
|
||||
**out = **in
|
||||
}
|
||||
if in.SSHPrivateKeySecretRef != nil {
|
||||
in, out := &in.SSHPrivateKeySecretRef, &out.SSHPrivateKeySecretRef
|
||||
*out = new(corev1.LocalObjectReference)
|
||||
|
|
|
@ -24,37 +24,36 @@ limitations under the License.
|
|||
// Each API group should define a utility function
|
||||
// called AddToScheme for adding its types to a Scheme:
|
||||
//
|
||||
// // in package myapigroupv1...
|
||||
// var (
|
||||
// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"}
|
||||
// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
|
||||
// AddToScheme = SchemeBuilder.AddToScheme
|
||||
// )
|
||||
// // in package myapigroupv1...
|
||||
// var (
|
||||
// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"}
|
||||
// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
|
||||
// AddToScheme = SchemeBuilder.AddToScheme
|
||||
// )
|
||||
//
|
||||
// func init() {
|
||||
// SchemeBuilder.Register(&MyType{}, &MyTypeList)
|
||||
// }
|
||||
// var (
|
||||
// scheme *runtime.Scheme = runtime.NewScheme()
|
||||
// )
|
||||
// func init() {
|
||||
// SchemeBuilder.Register(&MyType{}, &MyTypeList)
|
||||
// }
|
||||
// var (
|
||||
// scheme *runtime.Scheme = runtime.NewScheme()
|
||||
// )
|
||||
//
|
||||
// This also true of the built-in Kubernetes types. Then, in the entrypoint for
|
||||
// your manager, assemble the scheme containing exactly the types you need,
|
||||
// panicing if scheme registration failed. For instance, if our controller needs
|
||||
// types from the core/v1 API group (e.g. Pod), plus types from my.api.group/v1:
|
||||
//
|
||||
// func init() {
|
||||
// utilruntime.Must(myapigroupv1.AddToScheme(scheme))
|
||||
// utilruntime.Must(kubernetesscheme.AddToScheme(scheme))
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{
|
||||
// Scheme: scheme,
|
||||
// })
|
||||
// // ...
|
||||
// }
|
||||
// func init() {
|
||||
// utilruntime.Must(myapigroupv1.AddToScheme(scheme))
|
||||
// utilruntime.Must(kubernetesscheme.AddToScheme(scheme))
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{
|
||||
// Scheme: scheme,
|
||||
// })
|
||||
// // ...
|
||||
// }
|
||||
package scheme
|
||||
|
||||
import (
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
reviewers:
|
||||
- harshanarayana
|
||||
- pohly
|
||||
approvers:
|
||||
- dims
|
||||
|
|
|
@ -47,8 +47,9 @@ var (
|
|||
// If set, all log lines will be suppressed from the regular output, and
|
||||
// redirected to the logr implementation.
|
||||
// Use as:
|
||||
// ...
|
||||
// klog.SetLogger(zapr.NewLogger(zapLog))
|
||||
//
|
||||
// ...
|
||||
// klog.SetLogger(zapr.NewLogger(zapLog))
|
||||
//
|
||||
// To remove a backing logr implemention, use ClearLogger. Setting an
|
||||
// empty logger with SetLogger(logr.Logger{}) does not work.
|
||||
|
@ -69,11 +70,14 @@ func SetLogger(logger logr.Logger) {
|
|||
// routing log entries through klogr into klog and then into the actual Logger
|
||||
// backend.
|
||||
func SetLoggerWithOptions(logger logr.Logger, opts ...LoggerOption) {
|
||||
logging.logger = &logger
|
||||
logging.loggerOptions = loggerOptions{}
|
||||
for _, opt := range opts {
|
||||
opt(&logging.loggerOptions)
|
||||
}
|
||||
logging.logger = &logWriter{
|
||||
Logger: logger,
|
||||
writeKlogBuffer: logging.loggerOptions.writeKlogBuffer,
|
||||
}
|
||||
}
|
||||
|
||||
// ContextualLogger determines whether the logger passed to
|
||||
|
@ -92,6 +96,22 @@ func FlushLogger(flush func()) LoggerOption {
|
|||
}
|
||||
}
|
||||
|
||||
// WriteKlogBuffer sets a callback that will be invoked by klog to write output
|
||||
// produced by non-structured log calls like Infof.
|
||||
//
|
||||
// The buffer will contain exactly the same data that klog normally would write
|
||||
// into its own output stream(s). In particular this includes the header, if
|
||||
// klog is configured to write one. The callback then can divert that data into
|
||||
// its own output streams. The buffer may or may not end in a line break.
|
||||
//
|
||||
// Without such a callback, klog will call the logger's Info or Error method
|
||||
// with just the message string (i.e. no header).
|
||||
func WriteKlogBuffer(write func([]byte)) LoggerOption {
|
||||
return func(o *loggerOptions) {
|
||||
o.writeKlogBuffer = write
|
||||
}
|
||||
}
|
||||
|
||||
// LoggerOption implements the functional parameter paradigm for
|
||||
// SetLoggerWithOptions.
|
||||
type LoggerOption func(o *loggerOptions)
|
||||
|
@ -99,6 +119,13 @@ type LoggerOption func(o *loggerOptions)
|
|||
type loggerOptions struct {
|
||||
contextualLogger bool
|
||||
flush func()
|
||||
writeKlogBuffer func([]byte)
|
||||
}
|
||||
|
||||
// logWriter combines a logger (always set) with a write callback (optional).
|
||||
type logWriter struct {
|
||||
Logger
|
||||
writeKlogBuffer func([]byte)
|
||||
}
|
||||
|
||||
// ClearLogger removes a backing Logger implementation if one was set earlier
|
||||
|
@ -151,7 +178,7 @@ func Background() Logger {
|
|||
if logging.loggerOptions.contextualLogger {
|
||||
// Is non-nil because logging.loggerOptions.contextualLogger is
|
||||
// only true if a logger was set.
|
||||
return *logging.logger
|
||||
return logging.logger.Logger
|
||||
}
|
||||
|
||||
return klogLogger
|
||||
|
|
|
@ -40,44 +40,33 @@ type Buffer struct {
|
|||
next *Buffer
|
||||
}
|
||||
|
||||
// Buffers manages the reuse of individual buffer instances. It is thread-safe.
|
||||
type Buffers struct {
|
||||
// mu protects the free list. It is separate from the main mutex
|
||||
// so buffers can be grabbed and printed to without holding the main lock,
|
||||
// for better parallelization.
|
||||
mu sync.Mutex
|
||||
|
||||
// freeList is a list of byte buffers, maintained under mu.
|
||||
freeList *Buffer
|
||||
var buffers = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(Buffer)
|
||||
},
|
||||
}
|
||||
|
||||
// GetBuffer returns a new, ready-to-use buffer.
|
||||
func (bl *Buffers) GetBuffer() *Buffer {
|
||||
bl.mu.Lock()
|
||||
b := bl.freeList
|
||||
if b != nil {
|
||||
bl.freeList = b.next
|
||||
}
|
||||
bl.mu.Unlock()
|
||||
if b == nil {
|
||||
b = new(Buffer)
|
||||
} else {
|
||||
b.next = nil
|
||||
b.Reset()
|
||||
}
|
||||
func GetBuffer() *Buffer {
|
||||
b := buffers.Get().(*Buffer)
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
// PutBuffer returns a buffer to the free list.
|
||||
func (bl *Buffers) PutBuffer(b *Buffer) {
|
||||
func PutBuffer(b *Buffer) {
|
||||
if b.Len() >= 256 {
|
||||
// Let big buffers die a natural death.
|
||||
// Let big buffers die a natural death, without relying on
|
||||
// sync.Pool behavior. The documentation implies that items may
|
||||
// get deallocated while stored there ("If the Pool holds the
|
||||
// only reference when this [= be removed automatically]
|
||||
// happens, the item might be deallocated."), but
|
||||
// https://github.com/golang/go/issues/23199 leans more towards
|
||||
// having such a size limit.
|
||||
return
|
||||
}
|
||||
bl.mu.Lock()
|
||||
b.next = bl.freeList
|
||||
bl.freeList = b
|
||||
bl.mu.Unlock()
|
||||
|
||||
buffers.Put(b)
|
||||
}
|
||||
|
||||
// Some custom tiny helper functions to print the log header efficiently.
|
||||
|
@ -121,7 +110,8 @@ func (buf *Buffer) someDigits(i, d int) int {
|
|||
return copy(buf.Tmp[i:], buf.Tmp[j:])
|
||||
}
|
||||
|
||||
// FormatHeader formats a log header using the provided file name and line number.
|
||||
// FormatHeader formats a log header using the provided file name and line number
|
||||
// and writes it into the buffer.
|
||||
func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now time.Time) {
|
||||
if line < 0 {
|
||||
line = 0 // not a real line number, but acceptable to someDigits
|
||||
|
@ -157,3 +147,30 @@ func (buf *Buffer) FormatHeader(s severity.Severity, file string, line int, now
|
|||
buf.Tmp[n+2] = ' '
|
||||
buf.Write(buf.Tmp[:n+3])
|
||||
}
|
||||
|
||||
// SprintHeader formats a log header and returns a string. This is a simpler
|
||||
// version of FormatHeader for use in ktesting.
|
||||
func (buf *Buffer) SprintHeader(s severity.Severity, now time.Time) string {
|
||||
if s > severity.FatalLog {
|
||||
s = severity.InfoLog // for safety.
|
||||
}
|
||||
|
||||
// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
|
||||
// It's worth about 3X. Fprintf is hard.
|
||||
_, month, day := now.Date()
|
||||
hour, minute, second := now.Clock()
|
||||
// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
|
||||
buf.Tmp[0] = severity.Char[s]
|
||||
buf.twoDigits(1, int(month))
|
||||
buf.twoDigits(3, day)
|
||||
buf.Tmp[5] = ' '
|
||||
buf.twoDigits(6, hour)
|
||||
buf.Tmp[8] = ':'
|
||||
buf.twoDigits(9, minute)
|
||||
buf.Tmp[11] = ':'
|
||||
buf.twoDigits(12, second)
|
||||
buf.Tmp[14] = '.'
|
||||
buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
|
||||
buf.Tmp[21] = ']'
|
||||
return string(buf.Tmp[:22])
|
||||
}
|
||||
|
|
|
@ -24,6 +24,10 @@ import (
|
|||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
type textWriter interface {
|
||||
WriteText(*bytes.Buffer)
|
||||
}
|
||||
|
||||
// WithValues implements LogSink.WithValues. The old key/value pairs are
|
||||
// assumed to be well-formed, the new ones are checked and padded if
|
||||
// necessary. It returns a new slice.
|
||||
|
@ -91,11 +95,66 @@ func MergeKVs(first, second []interface{}) []interface{} {
|
|||
return merged
|
||||
}
|
||||
|
||||
type Formatter struct {
|
||||
AnyToStringHook AnyToStringFunc
|
||||
}
|
||||
|
||||
type AnyToStringFunc func(v interface{}) string
|
||||
|
||||
// MergeKVsInto is a variant of MergeKVs which directly formats the key/value
|
||||
// pairs into a buffer.
|
||||
func (f Formatter) MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) {
|
||||
if len(first) == 0 && len(second) == 0 {
|
||||
// Nothing to do at all.
|
||||
return
|
||||
}
|
||||
|
||||
if len(first) == 0 && len(second)%2 == 0 {
|
||||
// Nothing to be overridden, second slice is well-formed
|
||||
// and can be used directly.
|
||||
for i := 0; i < len(second); i += 2 {
|
||||
f.KVFormat(b, second[i], second[i+1])
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Determine which keys are in the second slice so that we can skip
|
||||
// them when iterating over the first one. The code intentionally
|
||||
// favors performance over completeness: we assume that keys are string
|
||||
// constants and thus compare equal when the string values are equal. A
|
||||
// string constant being overridden by, for example, a fmt.Stringer is
|
||||
// not handled.
|
||||
overrides := map[interface{}]bool{}
|
||||
for i := 0; i < len(second); i += 2 {
|
||||
overrides[second[i]] = true
|
||||
}
|
||||
for i := 0; i < len(first); i += 2 {
|
||||
key := first[i]
|
||||
if overrides[key] {
|
||||
continue
|
||||
}
|
||||
f.KVFormat(b, key, first[i+1])
|
||||
}
|
||||
// Round down.
|
||||
l := len(second)
|
||||
l = l / 2 * 2
|
||||
for i := 1; i < l; i += 2 {
|
||||
f.KVFormat(b, second[i-1], second[i])
|
||||
}
|
||||
if len(second)%2 == 1 {
|
||||
f.KVFormat(b, second[len(second)-1], missingValue)
|
||||
}
|
||||
}
|
||||
|
||||
func MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) {
|
||||
Formatter{}.MergeAndFormatKVs(b, first, second)
|
||||
}
|
||||
|
||||
const missingValue = "(MISSING)"
|
||||
|
||||
// KVListFormat serializes all key/value pairs into the provided buffer.
|
||||
// A space gets inserted before the first pair and between each pair.
|
||||
func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
|
||||
func (f Formatter) KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
|
||||
for i := 0; i < len(keysAndValues); i += 2 {
|
||||
var v interface{}
|
||||
k := keysAndValues[i]
|
||||
|
@ -104,69 +163,93 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
|
|||
} else {
|
||||
v = missingValue
|
||||
}
|
||||
b.WriteByte(' ')
|
||||
// Keys are assumed to be well-formed according to
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments
|
||||
// for the sake of performance. Keys with spaces,
|
||||
// special characters, etc. will break parsing.
|
||||
if sK, ok := k.(string); ok {
|
||||
// Avoid one allocation when the key is a string, which
|
||||
// normally it should be.
|
||||
b.WriteString(sK)
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf("%s", k))
|
||||
}
|
||||
|
||||
// The type checks are sorted so that more frequently used ones
|
||||
// come first because that is then faster in the common
|
||||
// cases. In Kubernetes, ObjectRef (a Stringer) is more common
|
||||
// than plain strings
|
||||
// (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235).
|
||||
switch v := v.(type) {
|
||||
case fmt.Stringer:
|
||||
writeStringValue(b, true, StringerToString(v))
|
||||
case string:
|
||||
writeStringValue(b, true, v)
|
||||
case error:
|
||||
writeStringValue(b, true, ErrorToString(v))
|
||||
case logr.Marshaler:
|
||||
value := MarshalerToValue(v)
|
||||
// A marshaler that returns a string is useful for
|
||||
// delayed formatting of complex values. We treat this
|
||||
// case like a normal string. This is useful for
|
||||
// multi-line support.
|
||||
//
|
||||
// We could do this by recursively formatting a value,
|
||||
// but that comes with the risk of infinite recursion
|
||||
// if a marshaler returns itself. Instead we call it
|
||||
// only once and rely on it returning the intended
|
||||
// value directly.
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
writeStringValue(b, true, value)
|
||||
default:
|
||||
writeStringValue(b, false, fmt.Sprintf("%+v", v))
|
||||
}
|
||||
case []byte:
|
||||
// In https://github.com/kubernetes/klog/pull/237 it was decided
|
||||
// to format byte slices with "%+q". The advantages of that are:
|
||||
// - readable output if the bytes happen to be printable
|
||||
// - non-printable bytes get represented as unicode escape
|
||||
// sequences (\uxxxx)
|
||||
//
|
||||
// The downsides are that we cannot use the faster
|
||||
// strconv.Quote here and that multi-line output is not
|
||||
// supported. If developers know that a byte array is
|
||||
// printable and they want multi-line output, they can
|
||||
// convert the value to string before logging it.
|
||||
b.WriteByte('=')
|
||||
b.WriteString(fmt.Sprintf("%+q", v))
|
||||
default:
|
||||
writeStringValue(b, false, fmt.Sprintf("%+v", v))
|
||||
}
|
||||
f.KVFormat(b, k, v)
|
||||
}
|
||||
}
|
||||
|
||||
func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) {
|
||||
Formatter{}.KVListFormat(b, keysAndValues...)
|
||||
}
|
||||
|
||||
// KVFormat serializes one key/value pair into the provided buffer.
|
||||
// A space gets inserted before the pair.
|
||||
func (f Formatter) KVFormat(b *bytes.Buffer, k, v interface{}) {
|
||||
b.WriteByte(' ')
|
||||
// Keys are assumed to be well-formed according to
|
||||
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments
|
||||
// for the sake of performance. Keys with spaces,
|
||||
// special characters, etc. will break parsing.
|
||||
if sK, ok := k.(string); ok {
|
||||
// Avoid one allocation when the key is a string, which
|
||||
// normally it should be.
|
||||
b.WriteString(sK)
|
||||
} else {
|
||||
b.WriteString(fmt.Sprintf("%s", k))
|
||||
}
|
||||
|
||||
// The type checks are sorted so that more frequently used ones
|
||||
// come first because that is then faster in the common
|
||||
// cases. In Kubernetes, ObjectRef (a Stringer) is more common
|
||||
// than plain strings
|
||||
// (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235).
|
||||
switch v := v.(type) {
|
||||
case textWriter:
|
||||
writeTextWriterValue(b, v)
|
||||
case fmt.Stringer:
|
||||
writeStringValue(b, true, StringerToString(v))
|
||||
case string:
|
||||
writeStringValue(b, true, v)
|
||||
case error:
|
||||
writeStringValue(b, true, ErrorToString(v))
|
||||
case logr.Marshaler:
|
||||
value := MarshalerToValue(v)
|
||||
// A marshaler that returns a string is useful for
|
||||
// delayed formatting of complex values. We treat this
|
||||
// case like a normal string. This is useful for
|
||||
// multi-line support.
|
||||
//
|
||||
// We could do this by recursively formatting a value,
|
||||
// but that comes with the risk of infinite recursion
|
||||
// if a marshaler returns itself. Instead we call it
|
||||
// only once and rely on it returning the intended
|
||||
// value directly.
|
||||
switch value := value.(type) {
|
||||
case string:
|
||||
writeStringValue(b, true, value)
|
||||
default:
|
||||
writeStringValue(b, false, f.AnyToString(value))
|
||||
}
|
||||
case []byte:
|
||||
// In https://github.com/kubernetes/klog/pull/237 it was decided
|
||||
// to format byte slices with "%+q". The advantages of that are:
|
||||
// - readable output if the bytes happen to be printable
|
||||
// - non-printable bytes get represented as unicode escape
|
||||
// sequences (\uxxxx)
|
||||
//
|
||||
// The downsides are that we cannot use the faster
|
||||
// strconv.Quote here and that multi-line output is not
|
||||
// supported. If developers know that a byte array is
|
||||
// printable and they want multi-line output, they can
|
||||
// convert the value to string before logging it.
|
||||
b.WriteByte('=')
|
||||
b.WriteString(fmt.Sprintf("%+q", v))
|
||||
default:
|
||||
writeStringValue(b, false, f.AnyToString(v))
|
||||
}
|
||||
}
|
||||
|
||||
func KVFormat(b *bytes.Buffer, k, v interface{}) {
|
||||
Formatter{}.KVFormat(b, k, v)
|
||||
}
|
||||
|
||||
// AnyToString is the historic fallback formatter.
|
||||
func (f Formatter) AnyToString(v interface{}) string {
|
||||
if f.AnyToStringHook != nil {
|
||||
return f.AnyToStringHook(v)
|
||||
}
|
||||
return fmt.Sprintf("%+v", v)
|
||||
}
|
||||
|
||||
// StringerToString converts a Stringer to a string,
|
||||
// handling panics if they occur.
|
||||
func StringerToString(s fmt.Stringer) (ret string) {
|
||||
|
@ -203,6 +286,16 @@ func ErrorToString(err error) (ret string) {
|
|||
return
|
||||
}
|
||||
|
||||
func writeTextWriterValue(b *bytes.Buffer, v textWriter) {
|
||||
b.WriteRune('=')
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
fmt.Fprintf(b, `"<panic: %s>"`, err)
|
||||
}
|
||||
}()
|
||||
v.WriteText(b)
|
||||
}
|
||||
|
||||
func writeStringValue(b *bytes.Buffer, quote bool, v string) {
|
||||
data := []byte(v)
|
||||
index := bytes.IndexByte(data, '\n')
|
||||
|
|
|
@ -17,8 +17,10 @@ limitations under the License.
|
|||
package klog
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
@ -31,11 +33,30 @@ type ObjectRef struct {
|
|||
|
||||
func (ref ObjectRef) String() string {
|
||||
if ref.Namespace != "" {
|
||||
return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name)
|
||||
var builder strings.Builder
|
||||
builder.Grow(len(ref.Namespace) + len(ref.Name) + 1)
|
||||
builder.WriteString(ref.Namespace)
|
||||
builder.WriteRune('/')
|
||||
builder.WriteString(ref.Name)
|
||||
return builder.String()
|
||||
}
|
||||
return ref.Name
|
||||
}
|
||||
|
||||
func (ref ObjectRef) WriteText(out *bytes.Buffer) {
|
||||
out.WriteRune('"')
|
||||
ref.writeUnquoted(out)
|
||||
out.WriteRune('"')
|
||||
}
|
||||
|
||||
func (ref ObjectRef) writeUnquoted(out *bytes.Buffer) {
|
||||
if ref.Namespace != "" {
|
||||
out.WriteString(ref.Namespace)
|
||||
out.WriteRune('/')
|
||||
}
|
||||
out.WriteString(ref.Name)
|
||||
}
|
||||
|
||||
// MarshalLog ensures that loggers with support for structured output will log
|
||||
// as a struct by removing the String method via a custom type.
|
||||
func (ref ObjectRef) MarshalLog() interface{} {
|
||||
|
@ -117,31 +138,31 @@ var _ fmt.Stringer = kobjSlice{}
|
|||
var _ logr.Marshaler = kobjSlice{}
|
||||
|
||||
func (ks kobjSlice) String() string {
|
||||
objectRefs, err := ks.process()
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
objectRefs, errStr := ks.process()
|
||||
if errStr != "" {
|
||||
return errStr
|
||||
}
|
||||
return fmt.Sprintf("%v", objectRefs)
|
||||
}
|
||||
|
||||
func (ks kobjSlice) MarshalLog() interface{} {
|
||||
objectRefs, err := ks.process()
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
objectRefs, errStr := ks.process()
|
||||
if errStr != "" {
|
||||
return errStr
|
||||
}
|
||||
return objectRefs
|
||||
}
|
||||
|
||||
func (ks kobjSlice) process() ([]interface{}, error) {
|
||||
func (ks kobjSlice) process() (objs []interface{}, err string) {
|
||||
s := reflect.ValueOf(ks.arg)
|
||||
switch s.Kind() {
|
||||
case reflect.Invalid:
|
||||
// nil parameter, print as nil.
|
||||
return nil, nil
|
||||
return nil, ""
|
||||
case reflect.Slice:
|
||||
// Okay, handle below.
|
||||
default:
|
||||
return nil, fmt.Errorf("<KObjSlice needs a slice, got type %T>", ks.arg)
|
||||
return nil, fmt.Sprintf("<KObjSlice needs a slice, got type %T>", ks.arg)
|
||||
}
|
||||
objectRefs := make([]interface{}, 0, s.Len())
|
||||
for i := 0; i < s.Len(); i++ {
|
||||
|
@ -151,8 +172,41 @@ func (ks kobjSlice) process() ([]interface{}, error) {
|
|||
} else if v, ok := item.(KMetadata); ok {
|
||||
objectRefs = append(objectRefs, KObj(v))
|
||||
} else {
|
||||
return nil, fmt.Errorf("<KObjSlice needs a slice of values implementing KMetadata, got type %T>", item)
|
||||
return nil, fmt.Sprintf("<KObjSlice needs a slice of values implementing KMetadata, got type %T>", item)
|
||||
}
|
||||
}
|
||||
return objectRefs, ""
|
||||
}
|
||||
|
||||
var nilToken = []byte("<nil>")
|
||||
|
||||
func (ks kobjSlice) WriteText(out *bytes.Buffer) {
|
||||
s := reflect.ValueOf(ks.arg)
|
||||
switch s.Kind() {
|
||||
case reflect.Invalid:
|
||||
// nil parameter, print as empty slice.
|
||||
out.WriteString("[]")
|
||||
return
|
||||
case reflect.Slice:
|
||||
// Okay, handle below.
|
||||
default:
|
||||
fmt.Fprintf(out, `"<KObjSlice needs a slice, got type %T>"`, ks.arg)
|
||||
return
|
||||
}
|
||||
out.Write([]byte{'['})
|
||||
defer out.Write([]byte{']'})
|
||||
for i := 0; i < s.Len(); i++ {
|
||||
if i > 0 {
|
||||
out.Write([]byte{' '})
|
||||
}
|
||||
item := s.Index(i).Interface()
|
||||
if item == nil {
|
||||
out.Write(nilToken)
|
||||
} else if v, ok := item.(KMetadata); ok {
|
||||
KObj(v).writeUnquoted(out)
|
||||
} else {
|
||||
fmt.Fprintf(out, "<KObjSlice needs a slice of values implementing KMetadata, got type %T>", item)
|
||||
return
|
||||
}
|
||||
}
|
||||
return objectRefs, nil
|
||||
}
|
||||
|
|
|
@ -39,39 +39,38 @@
|
|||
// This package provides several flags that modify this behavior.
|
||||
// As a result, flag.Parse must be called before any logging is done.
|
||||
//
|
||||
// -logtostderr=true
|
||||
// Logs are written to standard error instead of to files.
|
||||
// This shortcuts most of the usual output routing:
|
||||
// -alsologtostderr, -stderrthreshold and -log_dir have no
|
||||
// effect and output redirection at runtime with SetOutput is
|
||||
// ignored.
|
||||
// -alsologtostderr=false
|
||||
// Logs are written to standard error as well as to files.
|
||||
// -stderrthreshold=ERROR
|
||||
// Log events at or above this severity are logged to standard
|
||||
// error as well as to files.
|
||||
// -log_dir=""
|
||||
// Log files will be written to this directory instead of the
|
||||
// default temporary directory.
|
||||
// -logtostderr=true
|
||||
// Logs are written to standard error instead of to files.
|
||||
// This shortcuts most of the usual output routing:
|
||||
// -alsologtostderr, -stderrthreshold and -log_dir have no
|
||||
// effect and output redirection at runtime with SetOutput is
|
||||
// ignored.
|
||||
// -alsologtostderr=false
|
||||
// Logs are written to standard error as well as to files.
|
||||
// -stderrthreshold=ERROR
|
||||
// Log events at or above this severity are logged to standard
|
||||
// error as well as to files.
|
||||
// -log_dir=""
|
||||
// Log files will be written to this directory instead of the
|
||||
// default temporary directory.
|
||||
//
|
||||
// Other flags provide aids to debugging.
|
||||
//
|
||||
// -log_backtrace_at=""
|
||||
// When set to a file and line number holding a logging statement,
|
||||
// such as
|
||||
// -log_backtrace_at=gopherflakes.go:234
|
||||
// a stack trace will be written to the Info log whenever execution
|
||||
// hits that statement. (Unlike with -vmodule, the ".go" must be
|
||||
// present.)
|
||||
// -v=0
|
||||
// Enable V-leveled logging at the specified level.
|
||||
// -vmodule=""
|
||||
// The syntax of the argument is a comma-separated list of pattern=N,
|
||||
// where pattern is a literal file name (minus the ".go" suffix) or
|
||||
// "glob" pattern and N is a V level. For instance,
|
||||
// -vmodule=gopher*=3
|
||||
// sets the V level to 3 in all Go files whose names begin "gopher".
|
||||
// Other flags provide aids to debugging.
|
||||
//
|
||||
// -log_backtrace_at=""
|
||||
// When set to a file and line number holding a logging statement,
|
||||
// such as
|
||||
// -log_backtrace_at=gopherflakes.go:234
|
||||
// a stack trace will be written to the Info log whenever execution
|
||||
// hits that statement. (Unlike with -vmodule, the ".go" must be
|
||||
// present.)
|
||||
// -v=0
|
||||
// Enable V-leveled logging at the specified level.
|
||||
// -vmodule=""
|
||||
// The syntax of the argument is a comma-separated list of pattern=N,
|
||||
// where pattern is a literal file name (minus the ".go" suffix) or
|
||||
// "glob" pattern and N is a V level. For instance,
|
||||
// -vmodule=gopher*=3
|
||||
// sets the V level to 3 in all Go files whose names begin "gopher".
|
||||
package klog
|
||||
|
||||
import (
|
||||
|
@ -92,8 +91,6 @@ import (
|
|||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
|
||||
"k8s.io/klog/v2/internal/buffer"
|
||||
"k8s.io/klog/v2/internal/clock"
|
||||
"k8s.io/klog/v2/internal/dbg"
|
||||
|
@ -397,45 +394,48 @@ type flushSyncWriter interface {
|
|||
io.Writer
|
||||
}
|
||||
|
||||
// init sets up the defaults.
|
||||
var logging loggingT
|
||||
var commandLine flag.FlagSet
|
||||
|
||||
// init sets up the defaults and creates command line flags.
|
||||
func init() {
|
||||
commandLine.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory (no effect when -logtostderr=true)")
|
||||
commandLine.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file (no effect when -logtostderr=true)")
|
||||
commandLine.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", 1800,
|
||||
"Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. "+
|
||||
"If the value is 0, the maximum file size is unlimited.")
|
||||
commandLine.BoolVar(&logging.toStderr, "logtostderr", true, "log to standard error instead of files")
|
||||
commandLine.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files (no effect when -logtostderr=true)")
|
||||
logging.setVState(0, nil, false)
|
||||
commandLine.Var(&logging.verbosity, "v", "number for the log level verbosity")
|
||||
commandLine.BoolVar(&logging.addDirHeader, "add_dir_header", false, "If true, adds the file directory to the header of the log messages")
|
||||
commandLine.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages")
|
||||
commandLine.BoolVar(&logging.oneOutput, "one_output", false, "If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)")
|
||||
commandLine.BoolVar(&logging.skipLogHeaders, "skip_log_headers", false, "If true, avoid headers when opening log files (no effect when -logtostderr=true)")
|
||||
logging.stderrThreshold = severityValue{
|
||||
Severity: severity.ErrorLog, // Default stderrThreshold is ERROR.
|
||||
}
|
||||
logging.setVState(0, nil, false)
|
||||
logging.logDir = ""
|
||||
logging.logFile = ""
|
||||
logging.logFileMaxSizeMB = 1800
|
||||
logging.toStderr = true
|
||||
logging.alsoToStderr = false
|
||||
logging.skipHeaders = false
|
||||
logging.addDirHeader = false
|
||||
logging.skipLogHeaders = false
|
||||
logging.oneOutput = false
|
||||
commandLine.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false)")
|
||||
commandLine.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
|
||||
commandLine.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
|
||||
|
||||
logging.settings.contextualLoggingEnabled = true
|
||||
logging.flushD = newFlushDaemon(logging.lockAndFlushAll, nil)
|
||||
}
|
||||
|
||||
// InitFlags is for explicitly initializing the flags.
|
||||
// It may get called repeatedly for different flagsets, but not
|
||||
// twice for the same one. May get called concurrently
|
||||
// to other goroutines using klog. However, only some flags
|
||||
// may get set concurrently (see implementation).
|
||||
func InitFlags(flagset *flag.FlagSet) {
|
||||
if flagset == nil {
|
||||
flagset = flag.CommandLine
|
||||
}
|
||||
|
||||
flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory (no effect when -logtostderr=true)")
|
||||
flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file (no effect when -logtostderr=true)")
|
||||
flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB,
|
||||
"Defines the maximum size a log file can grow to (no effect when -logtostderr=true). Unit is megabytes. "+
|
||||
"If the value is 0, the maximum file size is unlimited.")
|
||||
flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files")
|
||||
flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files (no effect when -logtostderr=true)")
|
||||
flagset.Var(&logging.verbosity, "v", "number for the log level verbosity")
|
||||
flagset.BoolVar(&logging.addDirHeader, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header of the log messages")
|
||||
flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages")
|
||||
flagset.BoolVar(&logging.oneOutput, "one_output", logging.oneOutput, "If true, only write logs to their native severity level (vs also writing to each lower severity level; no effect when -logtostderr=true)")
|
||||
flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files (no effect when -logtostderr=true)")
|
||||
flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr when writing to files and stderr (no effect when -logtostderr=true or -alsologtostderr=false)")
|
||||
flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
|
||||
flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
|
||||
commandLine.VisitAll(func(f *flag.Flag) {
|
||||
flagset.Var(f.Value, f.Name, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
// Flush flushes all pending log I/O.
|
||||
|
@ -451,7 +451,7 @@ type settings struct {
|
|||
|
||||
// logger is the global Logger chosen by users of klog, nil if
|
||||
// none is available.
|
||||
logger *Logger
|
||||
logger *logWriter
|
||||
|
||||
// loggerOptions contains the options that were supplied for
|
||||
// globalLogger.
|
||||
|
@ -523,6 +523,11 @@ func (s settings) deepCopy() settings {
|
|||
}
|
||||
s.vmodule.filter = filter
|
||||
|
||||
if s.logger != nil {
|
||||
logger := *s.logger
|
||||
s.logger = &logger
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
|
@ -530,11 +535,6 @@ func (s settings) deepCopy() settings {
|
|||
type loggingT struct {
|
||||
settings
|
||||
|
||||
// bufferCache maintains the free list. It uses its own mutex
|
||||
// so buffers can be grabbed and printed to without holding the main lock,
|
||||
// for better parallelization.
|
||||
bufferCache buffer.Buffers
|
||||
|
||||
// flushD holds a flushDaemon that frequently flushes log file buffers.
|
||||
// Uses its own mutex.
|
||||
flushD *flushDaemon
|
||||
|
@ -550,12 +550,6 @@ type loggingT struct {
|
|||
vmap map[uintptr]Level
|
||||
}
|
||||
|
||||
var logging = loggingT{
|
||||
settings: settings{
|
||||
contextualLoggingEnabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
// setVState sets a consistent state for V logging.
|
||||
// l.mu is held.
|
||||
func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
|
||||
|
@ -633,8 +627,11 @@ It returns a buffer containing the formatted header and the user's file and line
|
|||
The depth specifies how many stack frames above lives the source line to be identified in the log message.
|
||||
|
||||
Log lines have this form:
|
||||
|
||||
Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
|
||||
|
||||
where the fields are defined as follows:
|
||||
|
||||
L A single character, representing the log level (eg 'I' for INFO)
|
||||
mm The month (zero padded; ie May is '05')
|
||||
dd The day (zero padded)
|
||||
|
@ -665,7 +662,7 @@ func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, strin
|
|||
|
||||
// formatHeader formats a log header using the provided file name and line number.
|
||||
func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer {
|
||||
buf := l.bufferCache.GetBuffer()
|
||||
buf := buffer.GetBuffer()
|
||||
if l.skipHeaders {
|
||||
return buf
|
||||
}
|
||||
|
@ -674,17 +671,18 @@ func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buf
|
|||
return buf
|
||||
}
|
||||
|
||||
func (l *loggingT) println(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) {
|
||||
func (l *loggingT) println(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) {
|
||||
l.printlnDepth(s, logger, filter, 1, args...)
|
||||
}
|
||||
|
||||
func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) {
|
||||
func (l *loggingT) printlnDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) {
|
||||
buf, file, line := l.header(s, depth)
|
||||
// if logger is set, we clear the generated header as we rely on the backing
|
||||
// logger implementation to print headers
|
||||
if logger != nil {
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buf = l.bufferCache.GetBuffer()
|
||||
// If a logger is set and doesn't support writing a formatted buffer,
|
||||
// we clear the generated header as we rely on the backing
|
||||
// logger implementation to print headers.
|
||||
if logger != nil && logger.writeKlogBuffer == nil {
|
||||
buffer.PutBuffer(buf)
|
||||
buf = buffer.GetBuffer()
|
||||
}
|
||||
if filter != nil {
|
||||
args = filter.Filter(args)
|
||||
|
@ -693,17 +691,18 @@ func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter
|
|||
l.output(s, logger, buf, depth, file, line, false)
|
||||
}
|
||||
|
||||
func (l *loggingT) print(s severity.Severity, logger *logr.Logger, filter LogFilter, args ...interface{}) {
|
||||
func (l *loggingT) print(s severity.Severity, logger *logWriter, filter LogFilter, args ...interface{}) {
|
||||
l.printDepth(s, logger, filter, 1, args...)
|
||||
}
|
||||
|
||||
func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, args ...interface{}) {
|
||||
func (l *loggingT) printDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, args ...interface{}) {
|
||||
buf, file, line := l.header(s, depth)
|
||||
// if logr is set, we clear the generated header as we rely on the backing
|
||||
// logr implementation to print headers
|
||||
if logger != nil {
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buf = l.bufferCache.GetBuffer()
|
||||
// If a logger is set and doesn't support writing a formatted buffer,
|
||||
// we clear the generated header as we rely on the backing
|
||||
// logger implementation to print headers.
|
||||
if logger != nil && logger.writeKlogBuffer == nil {
|
||||
buffer.PutBuffer(buf)
|
||||
buf = buffer.GetBuffer()
|
||||
}
|
||||
if filter != nil {
|
||||
args = filter.Filter(args)
|
||||
|
@ -715,17 +714,18 @@ func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter L
|
|||
l.output(s, logger, buf, depth, file, line, false)
|
||||
}
|
||||
|
||||
func (l *loggingT) printf(s severity.Severity, logger *logr.Logger, filter LogFilter, format string, args ...interface{}) {
|
||||
func (l *loggingT) printf(s severity.Severity, logger *logWriter, filter LogFilter, format string, args ...interface{}) {
|
||||
l.printfDepth(s, logger, filter, 1, format, args...)
|
||||
}
|
||||
|
||||
func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter LogFilter, depth int, format string, args ...interface{}) {
|
||||
func (l *loggingT) printfDepth(s severity.Severity, logger *logWriter, filter LogFilter, depth int, format string, args ...interface{}) {
|
||||
buf, file, line := l.header(s, depth)
|
||||
// if logr is set, we clear the generated header as we rely on the backing
|
||||
// logr implementation to print headers
|
||||
if logger != nil {
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buf = l.bufferCache.GetBuffer()
|
||||
// If a logger is set and doesn't support writing a formatted buffer,
|
||||
// we clear the generated header as we rely on the backing
|
||||
// logger implementation to print headers.
|
||||
if logger != nil && logger.writeKlogBuffer == nil {
|
||||
buffer.PutBuffer(buf)
|
||||
buf = buffer.GetBuffer()
|
||||
}
|
||||
if filter != nil {
|
||||
format, args = filter.FilterF(format, args)
|
||||
|
@ -740,13 +740,14 @@ func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter
|
|||
// printWithFileLine behaves like print but uses the provided file and line number. If
|
||||
// alsoLogToStderr is true, the log message always appears on standard error; it
|
||||
// will also appear in the log file unless --logtostderr is set.
|
||||
func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) {
|
||||
func (l *loggingT) printWithFileLine(s severity.Severity, logger *logWriter, filter LogFilter, file string, line int, alsoToStderr bool, args ...interface{}) {
|
||||
buf := l.formatHeader(s, file, line)
|
||||
// if logr is set, we clear the generated header as we rely on the backing
|
||||
// logr implementation to print headers
|
||||
if logger != nil {
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buf = l.bufferCache.GetBuffer()
|
||||
// If a logger is set and doesn't support writing a formatted buffer,
|
||||
// we clear the generated header as we rely on the backing
|
||||
// logger implementation to print headers.
|
||||
if logger != nil && logger.writeKlogBuffer == nil {
|
||||
buffer.PutBuffer(buf)
|
||||
buf = buffer.GetBuffer()
|
||||
}
|
||||
if filter != nil {
|
||||
args = filter.Filter(args)
|
||||
|
@ -759,7 +760,7 @@ func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, f
|
|||
}
|
||||
|
||||
// if loggr is specified, will call loggr.Error, otherwise output with logging module.
|
||||
func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) {
|
||||
func (l *loggingT) errorS(err error, logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) {
|
||||
if filter != nil {
|
||||
msg, keysAndValues = filter.FilterS(msg, keysAndValues)
|
||||
}
|
||||
|
@ -771,7 +772,7 @@ func (l *loggingT) errorS(err error, logger *logr.Logger, filter LogFilter, dept
|
|||
}
|
||||
|
||||
// if loggr is specified, will call loggr.Info, otherwise output with logging module.
|
||||
func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) {
|
||||
func (l *loggingT) infoS(logger *logWriter, filter LogFilter, depth int, msg string, keysAndValues ...interface{}) {
|
||||
if filter != nil {
|
||||
msg, keysAndValues = filter.FilterS(msg, keysAndValues)
|
||||
}
|
||||
|
@ -786,7 +787,7 @@ func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg s
|
|||
// set log severity by s
|
||||
func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) {
|
||||
// Only create a new buffer if we don't have one cached.
|
||||
b := l.bufferCache.GetBuffer()
|
||||
b := buffer.GetBuffer()
|
||||
// The message is always quoted, even if it contains line breaks.
|
||||
// If developers want multi-line output, they should use a small, fixed
|
||||
// message and put the multi-line output into a value.
|
||||
|
@ -797,7 +798,7 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string,
|
|||
serialize.KVListFormat(&b.Buffer, keysAndValues...)
|
||||
l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer)
|
||||
// Make the buffer available for reuse.
|
||||
l.bufferCache.PutBuffer(b)
|
||||
buffer.PutBuffer(b)
|
||||
}
|
||||
|
||||
// redirectBuffer is used to set an alternate destination for the logs
|
||||
|
@ -852,7 +853,7 @@ func LogToStderr(stderr bool) {
|
|||
}
|
||||
|
||||
// output writes the data to the log files and releases the buffer.
|
||||
func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) {
|
||||
func (l *loggingT) output(s severity.Severity, logger *logWriter, buf *buffer.Buffer, depth int, file string, line int, alsoToStderr bool) {
|
||||
var isLocked = true
|
||||
l.mu.Lock()
|
||||
defer func() {
|
||||
|
@ -868,13 +869,17 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf
|
|||
}
|
||||
}
|
||||
data := buf.Bytes()
|
||||
if log != nil {
|
||||
// TODO: set 'severity' and caller information as structured log info
|
||||
// keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line}
|
||||
if s == severity.ErrorLog {
|
||||
logging.logger.WithCallDepth(depth+3).Error(nil, string(data))
|
||||
if logger != nil {
|
||||
if logger.writeKlogBuffer != nil {
|
||||
logger.writeKlogBuffer(data)
|
||||
} else {
|
||||
log.WithCallDepth(depth + 3).Info(string(data))
|
||||
// TODO: set 'severity' and caller information as structured log info
|
||||
// keysAndValues := []interface{}{"severity", severityName[s], "file", file, "line", line}
|
||||
if s == severity.ErrorLog {
|
||||
logger.WithCallDepth(depth+3).Error(nil, string(data))
|
||||
} else {
|
||||
logger.WithCallDepth(depth + 3).Info(string(data))
|
||||
}
|
||||
}
|
||||
} else if l.toStderr {
|
||||
os.Stderr.Write(data)
|
||||
|
@ -949,7 +954,7 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf
|
|||
timeoutFlush(ExitFlushTimeout)
|
||||
OsExit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
|
||||
}
|
||||
l.bufferCache.PutBuffer(buf)
|
||||
buffer.PutBuffer(buf)
|
||||
|
||||
if stats := severityStats[s]; stats != nil {
|
||||
atomic.AddInt64(&stats.lines, 1)
|
||||
|
@ -1283,7 +1288,7 @@ func (l *loggingT) setV(pc uintptr) Level {
|
|||
// See the documentation of V for more information.
|
||||
type Verbose struct {
|
||||
enabled bool
|
||||
logr *logr.Logger
|
||||
logger *logWriter
|
||||
}
|
||||
|
||||
func newVerbose(level Level, b bool) Verbose {
|
||||
|
@ -1291,16 +1296,20 @@ func newVerbose(level Level, b bool) Verbose {
|
|||
return Verbose{b, nil}
|
||||
}
|
||||
v := logging.logger.V(int(level))
|
||||
return Verbose{b, &v}
|
||||
return Verbose{b, &logWriter{Logger: v, writeKlogBuffer: logging.loggerOptions.writeKlogBuffer}}
|
||||
}
|
||||
|
||||
// V reports whether verbosity at the call site is at least the requested level.
|
||||
// The returned value is a struct of type Verbose, which implements Info, Infoln
|
||||
// and Infof. These methods will write to the Info log if called.
|
||||
// Thus, one may write either
|
||||
//
|
||||
// if klog.V(2).Enabled() { klog.Info("log this") }
|
||||
//
|
||||
// or
|
||||
//
|
||||
// klog.V(2).Info("log this")
|
||||
//
|
||||
// The second form is shorter but the first is cheaper if logging is off because it does
|
||||
// not evaluate its arguments.
|
||||
//
|
||||
|
@ -1310,6 +1319,13 @@ func newVerbose(level Level, b bool) Verbose {
|
|||
// less than or equal to the value of the -vmodule pattern matching the source file
|
||||
// containing the call.
|
||||
func V(level Level) Verbose {
|
||||
return VDepth(1, level)
|
||||
}
|
||||
|
||||
// VDepth is a variant of V that accepts a number of stack frames that will be
|
||||
// skipped when checking the -vmodule patterns. VDepth(0) is equivalent to
|
||||
// V().
|
||||
func VDepth(depth int, level Level) Verbose {
|
||||
// This function tries hard to be cheap unless there's work to do.
|
||||
// The fast path is two atomic loads and compares.
|
||||
|
||||
|
@ -1326,7 +1342,7 @@ func V(level Level) Verbose {
|
|||
// but if V logging is enabled we're slow anyway.
|
||||
logging.mu.Lock()
|
||||
defer logging.mu.Unlock()
|
||||
if runtime.Callers(2, logging.pcs[:]) == 0 {
|
||||
if runtime.Callers(2+depth, logging.pcs[:]) == 0 {
|
||||
return newVerbose(level, false)
|
||||
}
|
||||
// runtime.Callers returns "return PCs", but we want
|
||||
|
@ -1354,7 +1370,7 @@ func (v Verbose) Enabled() bool {
|
|||
// See the documentation of V for usage.
|
||||
func (v Verbose) Info(args ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.print(severity.InfoLog, v.logr, logging.filter, args...)
|
||||
logging.print(severity.InfoLog, v.logger, logging.filter, args...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1362,7 +1378,7 @@ func (v Verbose) Info(args ...interface{}) {
|
|||
// See the documentation of V for usage.
|
||||
func (v Verbose) InfoDepth(depth int, args ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.printDepth(severity.InfoLog, v.logr, logging.filter, depth, args...)
|
||||
logging.printDepth(severity.InfoLog, v.logger, logging.filter, depth, args...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1370,7 +1386,7 @@ func (v Verbose) InfoDepth(depth int, args ...interface{}) {
|
|||
// See the documentation of V for usage.
|
||||
func (v Verbose) Infoln(args ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.println(severity.InfoLog, v.logr, logging.filter, args...)
|
||||
logging.println(severity.InfoLog, v.logger, logging.filter, args...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1378,7 +1394,7 @@ func (v Verbose) Infoln(args ...interface{}) {
|
|||
// See the documentation of V for usage.
|
||||
func (v Verbose) InfolnDepth(depth int, args ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.printlnDepth(severity.InfoLog, v.logr, logging.filter, depth, args...)
|
||||
logging.printlnDepth(severity.InfoLog, v.logger, logging.filter, depth, args...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1386,7 +1402,7 @@ func (v Verbose) InfolnDepth(depth int, args ...interface{}) {
|
|||
// See the documentation of V for usage.
|
||||
func (v Verbose) Infof(format string, args ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.printf(severity.InfoLog, v.logr, logging.filter, format, args...)
|
||||
logging.printf(severity.InfoLog, v.logger, logging.filter, format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1394,7 +1410,7 @@ func (v Verbose) Infof(format string, args ...interface{}) {
|
|||
// See the documentation of V for usage.
|
||||
func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.printfDepth(severity.InfoLog, v.logr, logging.filter, depth, format, args...)
|
||||
logging.printfDepth(severity.InfoLog, v.logger, logging.filter, depth, format, args...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1402,7 +1418,7 @@ func (v Verbose) InfofDepth(depth int, format string, args ...interface{}) {
|
|||
// See the documentation of V for usage.
|
||||
func (v Verbose) InfoS(msg string, keysAndValues ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.infoS(v.logr, logging.filter, 0, msg, keysAndValues...)
|
||||
logging.infoS(v.logger, logging.filter, 0, msg, keysAndValues...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1416,14 +1432,14 @@ func InfoSDepth(depth int, msg string, keysAndValues ...interface{}) {
|
|||
// See the documentation of V for usage.
|
||||
func (v Verbose) InfoSDepth(depth int, msg string, keysAndValues ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.infoS(v.logr, logging.filter, depth, msg, keysAndValues...)
|
||||
logging.infoS(v.logger, logging.filter, depth, msg, keysAndValues...)
|
||||
}
|
||||
}
|
||||
|
||||
// Deprecated: Use ErrorS instead.
|
||||
func (v Verbose) Error(err error, msg string, args ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.errorS(err, v.logr, logging.filter, 0, msg, args...)
|
||||
logging.errorS(err, v.logger, logging.filter, 0, msg, args...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1431,7 +1447,7 @@ func (v Verbose) Error(err error, msg string, args ...interface{}) {
|
|||
// See the documentation of V for usage.
|
||||
func (v Verbose) ErrorS(err error, msg string, keysAndValues ...interface{}) {
|
||||
if v.enabled {
|
||||
logging.errorS(err, v.logr, logging.filter, 0, msg, keysAndValues...)
|
||||
logging.errorS(err, v.logger, logging.filter, 0, msg, keysAndValues...)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1582,10 +1598,10 @@ func ErrorSDepth(depth int, err error, msg string, keysAndValues ...interface{})
|
|||
//
|
||||
// Callers who want more control over handling of fatal events may instead use a
|
||||
// combination of different functions:
|
||||
// - some info or error logging function, optionally with a stack trace
|
||||
// value generated by github.com/go-logr/lib/dbg.Backtrace
|
||||
// - Flush to flush pending log data
|
||||
// - panic, os.Exit or returning to the caller with an error
|
||||
// - some info or error logging function, optionally with a stack trace
|
||||
// value generated by github.com/go-logr/lib/dbg.Backtrace
|
||||
// - Flush to flush pending log data
|
||||
// - panic, os.Exit or returning to the caller with an error
|
||||
//
|
||||
// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
|
||||
func Fatal(args ...interface{}) {
|
||||
|
|
|
@ -42,19 +42,21 @@ func (l *klogger) Init(info logr.RuntimeInfo) {
|
|||
l.callDepth += info.CallDepth
|
||||
}
|
||||
|
||||
func (l klogger) Info(level int, msg string, kvList ...interface{}) {
|
||||
func (l *klogger) Info(level int, msg string, kvList ...interface{}) {
|
||||
merged := serialize.MergeKVs(l.values, kvList)
|
||||
if l.prefix != "" {
|
||||
msg = l.prefix + ": " + msg
|
||||
}
|
||||
V(Level(level)).InfoSDepth(l.callDepth+1, msg, merged...)
|
||||
// Skip this function.
|
||||
VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...)
|
||||
}
|
||||
|
||||
func (l klogger) Enabled(level int) bool {
|
||||
return V(Level(level)).Enabled()
|
||||
func (l *klogger) Enabled(level int) bool {
|
||||
// Skip this function and logr.Logger.Info where Enabled is called.
|
||||
return VDepth(l.callDepth+2, Level(level)).Enabled()
|
||||
}
|
||||
|
||||
func (l klogger) Error(err error, msg string, kvList ...interface{}) {
|
||||
func (l *klogger) Error(err error, msg string, kvList ...interface{}) {
|
||||
merged := serialize.MergeKVs(l.values, kvList)
|
||||
if l.prefix != "" {
|
||||
msg = l.prefix + ": " + msg
|
||||
|
|
|
@ -0,0 +1,181 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package net
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
)
|
||||
|
||||
// IPFamily refers to a specific family if not empty, i.e. "4" or "6".
|
||||
type IPFamily string
|
||||
|
||||
// Constants for valid IPFamilys:
|
||||
const (
|
||||
IPFamilyUnknown IPFamily = ""
|
||||
|
||||
IPv4 IPFamily = "4"
|
||||
IPv6 IPFamily = "6"
|
||||
)
|
||||
|
||||
// IsDualStackIPs returns true if:
|
||||
// - all elements of ips are valid
|
||||
// - at least one IP from each family (v4 and v6) is present
|
||||
func IsDualStackIPs(ips []net.IP) (bool, error) {
|
||||
v4Found := false
|
||||
v6Found := false
|
||||
for i, ip := range ips {
|
||||
switch IPFamilyOf(ip) {
|
||||
case IPv4:
|
||||
v4Found = true
|
||||
case IPv6:
|
||||
v6Found = true
|
||||
default:
|
||||
return false, fmt.Errorf("invalid IP[%d]: %v", i, ip)
|
||||
}
|
||||
}
|
||||
|
||||
return (v4Found && v6Found), nil
|
||||
}
|
||||
|
||||
// IsDualStackIPStrings returns true if:
|
||||
// - all elements of ips can be parsed as IPs
|
||||
// - at least one IP from each family (v4 and v6) is present
|
||||
func IsDualStackIPStrings(ips []string) (bool, error) {
|
||||
parsedIPs := make([]net.IP, 0, len(ips))
|
||||
for i, ip := range ips {
|
||||
parsedIP := ParseIPSloppy(ip)
|
||||
if parsedIP == nil {
|
||||
return false, fmt.Errorf("invalid IP[%d]: %v", i, ip)
|
||||
}
|
||||
parsedIPs = append(parsedIPs, parsedIP)
|
||||
}
|
||||
return IsDualStackIPs(parsedIPs)
|
||||
}
|
||||
|
||||
// IsDualStackCIDRs returns true if:
|
||||
// - all elements of cidrs are non-nil
|
||||
// - at least one CIDR from each family (v4 and v6) is present
|
||||
func IsDualStackCIDRs(cidrs []*net.IPNet) (bool, error) {
|
||||
v4Found := false
|
||||
v6Found := false
|
||||
for i, cidr := range cidrs {
|
||||
switch IPFamilyOfCIDR(cidr) {
|
||||
case IPv4:
|
||||
v4Found = true
|
||||
case IPv6:
|
||||
v6Found = true
|
||||
default:
|
||||
return false, fmt.Errorf("invalid CIDR[%d]: %v", i, cidr)
|
||||
}
|
||||
}
|
||||
|
||||
return (v4Found && v6Found), nil
|
||||
}
|
||||
|
||||
// IsDualStackCIDRStrings returns if
|
||||
// - all elements of cidrs can be parsed as CIDRs
|
||||
// - at least one CIDR from each family (v4 and v6) is present
|
||||
func IsDualStackCIDRStrings(cidrs []string) (bool, error) {
|
||||
parsedCIDRs, err := ParseCIDRs(cidrs)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return IsDualStackCIDRs(parsedCIDRs)
|
||||
}
|
||||
|
||||
// IPFamilyOf returns the IP family of ip, or IPFamilyUnknown if it is invalid.
|
||||
func IPFamilyOf(ip net.IP) IPFamily {
|
||||
switch {
|
||||
case ip.To4() != nil:
|
||||
return IPv4
|
||||
case ip.To16() != nil:
|
||||
return IPv6
|
||||
default:
|
||||
return IPFamilyUnknown
|
||||
}
|
||||
}
|
||||
|
||||
// IPFamilyOfString returns the IP family of ip, or IPFamilyUnknown if ip cannot
|
||||
// be parsed as an IP.
|
||||
func IPFamilyOfString(ip string) IPFamily {
|
||||
return IPFamilyOf(ParseIPSloppy(ip))
|
||||
}
|
||||
|
||||
// IPFamilyOfCIDR returns the IP family of cidr.
|
||||
func IPFamilyOfCIDR(cidr *net.IPNet) IPFamily {
|
||||
if cidr == nil {
|
||||
return IPFamilyUnknown
|
||||
}
|
||||
return IPFamilyOf(cidr.IP)
|
||||
}
|
||||
|
||||
// IPFamilyOfCIDRString returns the IP family of cidr.
|
||||
func IPFamilyOfCIDRString(cidr string) IPFamily {
|
||||
ip, _, _ := ParseCIDRSloppy(cidr)
|
||||
return IPFamilyOf(ip)
|
||||
}
|
||||
|
||||
// IsIPv6 returns true if netIP is IPv6 (and false if it is IPv4, nil, or invalid).
|
||||
func IsIPv6(netIP net.IP) bool {
|
||||
return IPFamilyOf(netIP) == IPv6
|
||||
}
|
||||
|
||||
// IsIPv6String returns true if ip contains a single IPv6 address and nothing else. It
|
||||
// returns false if ip is an empty string, an IPv4 address, or anything else that is not a
|
||||
// single IPv6 address.
|
||||
func IsIPv6String(ip string) bool {
|
||||
return IPFamilyOfString(ip) == IPv6
|
||||
}
|
||||
|
||||
// IsIPv6CIDR returns true if a cidr is a valid IPv6 CIDR. It returns false if cidr is
|
||||
// nil or an IPv4 CIDR. Its behavior is not defined if cidr is invalid.
|
||||
func IsIPv6CIDR(cidr *net.IPNet) bool {
|
||||
return IPFamilyOfCIDR(cidr) == IPv6
|
||||
}
|
||||
|
||||
// IsIPv6CIDRString returns true if cidr contains a single IPv6 CIDR and nothing else. It
|
||||
// returns false if cidr is an empty string, an IPv4 CIDR, or anything else that is not a
|
||||
// single valid IPv6 CIDR.
|
||||
func IsIPv6CIDRString(cidr string) bool {
|
||||
return IPFamilyOfCIDRString(cidr) == IPv6
|
||||
}
|
||||
|
||||
// IsIPv4 returns true if netIP is IPv4 (and false if it is IPv6, nil, or invalid).
|
||||
func IsIPv4(netIP net.IP) bool {
|
||||
return IPFamilyOf(netIP) == IPv4
|
||||
}
|
||||
|
||||
// IsIPv4String returns true if ip contains a single IPv4 address and nothing else. It
|
||||
// returns false if ip is an empty string, an IPv6 address, or anything else that is not a
|
||||
// single IPv4 address.
|
||||
func IsIPv4String(ip string) bool {
|
||||
return IPFamilyOfString(ip) == IPv4
|
||||
}
|
||||
|
||||
// IsIPv4CIDR returns true if cidr is a valid IPv4 CIDR. It returns false if cidr is nil
|
||||
// or an IPv6 CIDR. Its behavior is not defined if cidr is invalid.
|
||||
func IsIPv4CIDR(cidr *net.IPNet) bool {
|
||||
return IPFamilyOfCIDR(cidr) == IPv4
|
||||
}
|
||||
|
||||
// IsIPv4CIDRString returns true if cidr contains a single IPv4 CIDR and nothing else. It
|
||||
// returns false if cidr is an empty string, an IPv6 CIDR, or anything else that is not a
|
||||
// single valid IPv4 CIDR.
|
||||
func IsIPv4CIDRString(cidr string) bool {
|
||||
return IPFamilyOfCIDRString(cidr) == IPv4
|
||||
}
|
|
@ -29,138 +29,16 @@ import (
|
|||
// order is maintained
|
||||
func ParseCIDRs(cidrsString []string) ([]*net.IPNet, error) {
|
||||
cidrs := make([]*net.IPNet, 0, len(cidrsString))
|
||||
for _, cidrString := range cidrsString {
|
||||
for i, cidrString := range cidrsString {
|
||||
_, cidr, err := ParseCIDRSloppy(cidrString)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse cidr value:%q with error:%v", cidrString, err)
|
||||
return nil, fmt.Errorf("invalid CIDR[%d]: %v (%v)", i, cidr, err)
|
||||
}
|
||||
cidrs = append(cidrs, cidr)
|
||||
}
|
||||
return cidrs, nil
|
||||
}
|
||||
|
||||
// IsDualStackIPs returns if a slice of ips is:
|
||||
// - all are valid ips
|
||||
// - at least one ip from each family (v4 or v6)
|
||||
func IsDualStackIPs(ips []net.IP) (bool, error) {
|
||||
v4Found := false
|
||||
v6Found := false
|
||||
for _, ip := range ips {
|
||||
if ip == nil {
|
||||
return false, fmt.Errorf("ip %v is invalid", ip)
|
||||
}
|
||||
|
||||
if v4Found && v6Found {
|
||||
continue
|
||||
}
|
||||
|
||||
if IsIPv6(ip) {
|
||||
v6Found = true
|
||||
continue
|
||||
}
|
||||
|
||||
v4Found = true
|
||||
}
|
||||
|
||||
return (v4Found && v6Found), nil
|
||||
}
|
||||
|
||||
// IsDualStackIPStrings returns if
|
||||
// - all are valid ips
|
||||
// - at least one ip from each family (v4 or v6)
|
||||
func IsDualStackIPStrings(ips []string) (bool, error) {
|
||||
parsedIPs := make([]net.IP, 0, len(ips))
|
||||
for _, ip := range ips {
|
||||
parsedIP := ParseIPSloppy(ip)
|
||||
parsedIPs = append(parsedIPs, parsedIP)
|
||||
}
|
||||
return IsDualStackIPs(parsedIPs)
|
||||
}
|
||||
|
||||
// IsDualStackCIDRs returns if
|
||||
// - all are valid cidrs
|
||||
// - at least one cidr from each family (v4 or v6)
|
||||
func IsDualStackCIDRs(cidrs []*net.IPNet) (bool, error) {
|
||||
v4Found := false
|
||||
v6Found := false
|
||||
for _, cidr := range cidrs {
|
||||
if cidr == nil {
|
||||
return false, fmt.Errorf("cidr %v is invalid", cidr)
|
||||
}
|
||||
|
||||
if v4Found && v6Found {
|
||||
continue
|
||||
}
|
||||
|
||||
if IsIPv6(cidr.IP) {
|
||||
v6Found = true
|
||||
continue
|
||||
}
|
||||
v4Found = true
|
||||
}
|
||||
|
||||
return v4Found && v6Found, nil
|
||||
}
|
||||
|
||||
// IsDualStackCIDRStrings returns if
|
||||
// - all are valid cidrs
|
||||
// - at least one cidr from each family (v4 or v6)
|
||||
func IsDualStackCIDRStrings(cidrs []string) (bool, error) {
|
||||
parsedCIDRs, err := ParseCIDRs(cidrs)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return IsDualStackCIDRs(parsedCIDRs)
|
||||
}
|
||||
|
||||
// IsIPv6 returns if netIP is IPv6.
|
||||
func IsIPv6(netIP net.IP) bool {
|
||||
return netIP != nil && netIP.To4() == nil
|
||||
}
|
||||
|
||||
// IsIPv6String returns if ip is IPv6.
|
||||
func IsIPv6String(ip string) bool {
|
||||
netIP := ParseIPSloppy(ip)
|
||||
return IsIPv6(netIP)
|
||||
}
|
||||
|
||||
// IsIPv6CIDRString returns if cidr is IPv6.
|
||||
// This assumes cidr is a valid CIDR.
|
||||
func IsIPv6CIDRString(cidr string) bool {
|
||||
ip, _, _ := ParseCIDRSloppy(cidr)
|
||||
return IsIPv6(ip)
|
||||
}
|
||||
|
||||
// IsIPv6CIDR returns if a cidr is ipv6
|
||||
func IsIPv6CIDR(cidr *net.IPNet) bool {
|
||||
ip := cidr.IP
|
||||
return IsIPv6(ip)
|
||||
}
|
||||
|
||||
// IsIPv4 returns if netIP is IPv4.
|
||||
func IsIPv4(netIP net.IP) bool {
|
||||
return netIP != nil && netIP.To4() != nil
|
||||
}
|
||||
|
||||
// IsIPv4String returns if ip is IPv4.
|
||||
func IsIPv4String(ip string) bool {
|
||||
netIP := ParseIPSloppy(ip)
|
||||
return IsIPv4(netIP)
|
||||
}
|
||||
|
||||
// IsIPv4CIDR returns if a cidr is ipv4
|
||||
func IsIPv4CIDR(cidr *net.IPNet) bool {
|
||||
ip := cidr.IP
|
||||
return IsIPv4(ip)
|
||||
}
|
||||
|
||||
// IsIPv4CIDRString returns if cidr is IPv4.
|
||||
// This assumes cidr is a valid CIDR.
|
||||
func IsIPv4CIDRString(cidr string) bool {
|
||||
ip, _, _ := ParseCIDRSloppy(cidr)
|
||||
return IsIPv4(ip)
|
||||
}
|
||||
|
||||
// ParsePort parses a string representing an IP port. If the string is not a
|
||||
// valid port number, this returns an error.
|
||||
func ParsePort(port string, allowZero bool) (int, error) {
|
||||
|
|
|
@ -23,15 +23,6 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
// IPFamily refers to a specific family if not empty, i.e. "4" or "6".
|
||||
type IPFamily string
|
||||
|
||||
// Constants for valid IPFamilys:
|
||||
const (
|
||||
IPv4 IPFamily = "4"
|
||||
IPv6 = "6"
|
||||
)
|
||||
|
||||
// Protocol is a network protocol support by LocalPort.
|
||||
type Protocol string
|
||||
|
||||
|
@ -67,7 +58,7 @@ func NewLocalPort(desc, ip string, ipFamily IPFamily, port int, protocol Protoco
|
|||
if protocol != TCP && protocol != UDP {
|
||||
return nil, fmt.Errorf("Unsupported protocol %s", protocol)
|
||||
}
|
||||
if ipFamily != "" && ipFamily != "4" && ipFamily != "6" {
|
||||
if ipFamily != IPFamilyUnknown && ipFamily != IPv4 && ipFamily != IPv6 {
|
||||
return nil, fmt.Errorf("Invalid IP family %s", ipFamily)
|
||||
}
|
||||
if ip != "" {
|
||||
|
@ -75,9 +66,10 @@ func NewLocalPort(desc, ip string, ipFamily IPFamily, port int, protocol Protoco
|
|||
if parsedIP == nil {
|
||||
return nil, fmt.Errorf("invalid ip address %s", ip)
|
||||
}
|
||||
asIPv4 := parsedIP.To4()
|
||||
if asIPv4 == nil && ipFamily == IPv4 || asIPv4 != nil && ipFamily == IPv6 {
|
||||
return nil, fmt.Errorf("ip address and family mismatch %s, %s", ip, ipFamily)
|
||||
if ipFamily != IPFamilyUnknown {
|
||||
if IPFamily(parsedIP) != ipFamily {
|
||||
return nil, fmt.Errorf("ip address and family mismatch %s, %s", ip, ipFamily)
|
||||
}
|
||||
}
|
||||
}
|
||||
return &LocalPort{Description: desc, IP: ip, IPFamily: ipFamily, Port: port, Protocol: protocol}, nil
|
||||
|
|
|
@ -53,6 +53,7 @@ func Int(i int) *int {
|
|||
}
|
||||
|
||||
// IntPtr is a function variable referring to Int.
|
||||
//
|
||||
// Deprecated: Use Int instead.
|
||||
var IntPtr = Int // for back-compat
|
||||
|
||||
|
@ -66,6 +67,7 @@ func IntDeref(ptr *int, def int) int {
|
|||
}
|
||||
|
||||
// IntPtrDerefOr is a function variable referring to IntDeref.
|
||||
//
|
||||
// Deprecated: Use IntDeref instead.
|
||||
var IntPtrDerefOr = IntDeref // for back-compat
|
||||
|
||||
|
@ -75,6 +77,7 @@ func Int32(i int32) *int32 {
|
|||
}
|
||||
|
||||
// Int32Ptr is a function variable referring to Int32.
|
||||
//
|
||||
// Deprecated: Use Int32 instead.
|
||||
var Int32Ptr = Int32 // for back-compat
|
||||
|
||||
|
@ -88,6 +91,7 @@ func Int32Deref(ptr *int32, def int32) int32 {
|
|||
}
|
||||
|
||||
// Int32PtrDerefOr is a function variable referring to Int32Deref.
|
||||
//
|
||||
// Deprecated: Use Int32Deref instead.
|
||||
var Int32PtrDerefOr = Int32Deref // for back-compat
|
||||
|
||||
|
@ -103,12 +107,73 @@ func Int32Equal(a, b *int32) bool {
|
|||
return *a == *b
|
||||
}
|
||||
|
||||
// Uint returns a pointer to an uint
|
||||
func Uint(i uint) *uint {
|
||||
return &i
|
||||
}
|
||||
|
||||
// UintPtr is a function variable referring to Uint.
|
||||
//
|
||||
// Deprecated: Use Uint instead.
|
||||
var UintPtr = Uint // for back-compat
|
||||
|
||||
// UintDeref dereferences the uint ptr and returns it if not nil, or else
|
||||
// returns def.
|
||||
func UintDeref(ptr *uint, def uint) uint {
|
||||
if ptr != nil {
|
||||
return *ptr
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
// UintPtrDerefOr is a function variable referring to UintDeref.
|
||||
//
|
||||
// Deprecated: Use UintDeref instead.
|
||||
var UintPtrDerefOr = UintDeref // for back-compat
|
||||
|
||||
// Uint32 returns a pointer to an uint32.
|
||||
func Uint32(i uint32) *uint32 {
|
||||
return &i
|
||||
}
|
||||
|
||||
// Uint32Ptr is a function variable referring to Uint32.
|
||||
//
|
||||
// Deprecated: Use Uint32 instead.
|
||||
var Uint32Ptr = Uint32 // for back-compat
|
||||
|
||||
// Uint32Deref dereferences the uint32 ptr and returns it if not nil, or else
|
||||
// returns def.
|
||||
func Uint32Deref(ptr *uint32, def uint32) uint32 {
|
||||
if ptr != nil {
|
||||
return *ptr
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
// Uint32PtrDerefOr is a function variable referring to Uint32Deref.
|
||||
//
|
||||
// Deprecated: Use Uint32Deref instead.
|
||||
var Uint32PtrDerefOr = Uint32Deref // for back-compat
|
||||
|
||||
// Uint32Equal returns true if both arguments are nil or both arguments
|
||||
// dereference to the same value.
|
||||
func Uint32Equal(a, b *uint32) bool {
|
||||
if (a == nil) != (b == nil) {
|
||||
return false
|
||||
}
|
||||
if a == nil {
|
||||
return true
|
||||
}
|
||||
return *a == *b
|
||||
}
|
||||
|
||||
// Int64 returns a pointer to an int64.
|
||||
func Int64(i int64) *int64 {
|
||||
return &i
|
||||
}
|
||||
|
||||
// Int64Ptr is a function variable referring to Int64.
|
||||
//
|
||||
// Deprecated: Use Int64 instead.
|
||||
var Int64Ptr = Int64 // for back-compat
|
||||
|
||||
|
@ -122,6 +187,7 @@ func Int64Deref(ptr *int64, def int64) int64 {
|
|||
}
|
||||
|
||||
// Int64PtrDerefOr is a function variable referring to Int64Deref.
|
||||
//
|
||||
// Deprecated: Use Int64Deref instead.
|
||||
var Int64PtrDerefOr = Int64Deref // for back-compat
|
||||
|
||||
|
@ -137,12 +203,49 @@ func Int64Equal(a, b *int64) bool {
|
|||
return *a == *b
|
||||
}
|
||||
|
||||
// Uint64 returns a pointer to an uint64.
|
||||
func Uint64(i uint64) *uint64 {
|
||||
return &i
|
||||
}
|
||||
|
||||
// Uint64Ptr is a function variable referring to Uint64.
|
||||
//
|
||||
// Deprecated: Use Uint64 instead.
|
||||
var Uint64Ptr = Uint64 // for back-compat
|
||||
|
||||
// Uint64Deref dereferences the uint64 ptr and returns it if not nil, or else
|
||||
// returns def.
|
||||
func Uint64Deref(ptr *uint64, def uint64) uint64 {
|
||||
if ptr != nil {
|
||||
return *ptr
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
// Uint64PtrDerefOr is a function variable referring to Uint64Deref.
|
||||
//
|
||||
// Deprecated: Use Uint64Deref instead.
|
||||
var Uint64PtrDerefOr = Uint64Deref // for back-compat
|
||||
|
||||
// Uint64Equal returns true if both arguments are nil or both arguments
|
||||
// dereference to the same value.
|
||||
func Uint64Equal(a, b *uint64) bool {
|
||||
if (a == nil) != (b == nil) {
|
||||
return false
|
||||
}
|
||||
if a == nil {
|
||||
return true
|
||||
}
|
||||
return *a == *b
|
||||
}
|
||||
|
||||
// Bool returns a pointer to a bool.
|
||||
func Bool(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
// BoolPtr is a function variable referring to Bool.
|
||||
//
|
||||
// Deprecated: Use Bool instead.
|
||||
var BoolPtr = Bool // for back-compat
|
||||
|
||||
|
@ -156,6 +259,7 @@ func BoolDeref(ptr *bool, def bool) bool {
|
|||
}
|
||||
|
||||
// BoolPtrDerefOr is a function variable referring to BoolDeref.
|
||||
//
|
||||
// Deprecated: Use BoolDeref instead.
|
||||
var BoolPtrDerefOr = BoolDeref // for back-compat
|
||||
|
||||
|
@ -177,6 +281,7 @@ func String(s string) *string {
|
|||
}
|
||||
|
||||
// StringPtr is a function variable referring to String.
|
||||
//
|
||||
// Deprecated: Use String instead.
|
||||
var StringPtr = String // for back-compat
|
||||
|
||||
|
@ -190,6 +295,7 @@ func StringDeref(ptr *string, def string) string {
|
|||
}
|
||||
|
||||
// StringPtrDerefOr is a function variable referring to StringDeref.
|
||||
//
|
||||
// Deprecated: Use StringDeref instead.
|
||||
var StringPtrDerefOr = StringDeref // for back-compat
|
||||
|
||||
|
@ -211,6 +317,7 @@ func Float32(i float32) *float32 {
|
|||
}
|
||||
|
||||
// Float32Ptr is a function variable referring to Float32.
|
||||
//
|
||||
// Deprecated: Use Float32 instead.
|
||||
var Float32Ptr = Float32
|
||||
|
||||
|
@ -224,6 +331,7 @@ func Float32Deref(ptr *float32, def float32) float32 {
|
|||
}
|
||||
|
||||
// Float32PtrDerefOr is a function variable referring to Float32Deref.
|
||||
//
|
||||
// Deprecated: Use Float32Deref instead.
|
||||
var Float32PtrDerefOr = Float32Deref // for back-compat
|
||||
|
||||
|
@ -245,6 +353,7 @@ func Float64(i float64) *float64 {
|
|||
}
|
||||
|
||||
// Float64Ptr is a function variable referring to Float64.
|
||||
//
|
||||
// Deprecated: Use Float64 instead.
|
||||
var Float64Ptr = Float64
|
||||
|
||||
|
@ -258,6 +367,7 @@ func Float64Deref(ptr *float64, def float64) float64 {
|
|||
}
|
||||
|
||||
// Float64PtrDerefOr is a function variable referring to Float64Deref.
|
||||
//
|
||||
// Deprecated: Use Float64Deref instead.
|
||||
var Float64PtrDerefOr = Float64Deref // for back-compat
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
@ -93,13 +94,16 @@ func (s traceStep) writeItem(b *bytes.Buffer, formatter string, startTime time.T
|
|||
// Trace keeps track of a set of "steps" and allows us to log a specific
|
||||
// step if it took longer than its share of the total allowed time
|
||||
type Trace struct {
|
||||
// constant fields
|
||||
name string
|
||||
fields []Field
|
||||
threshold *time.Duration
|
||||
startTime time.Time
|
||||
endTime *time.Time
|
||||
traceItems []traceItem
|
||||
parentTrace *Trace
|
||||
// fields guarded by a lock
|
||||
lock sync.RWMutex
|
||||
threshold *time.Duration
|
||||
endTime *time.Time
|
||||
traceItems []traceItem
|
||||
}
|
||||
|
||||
func (t *Trace) time() time.Time {
|
||||
|
@ -138,6 +142,8 @@ func New(name string, fields ...Field) *Trace {
|
|||
// how long it took. The Fields add key value pairs to provide additional details about the trace
|
||||
// step.
|
||||
func (t *Trace) Step(msg string, fields ...Field) {
|
||||
t.lock.Lock()
|
||||
defer t.lock.Unlock()
|
||||
if t.traceItems == nil {
|
||||
// traces almost always have less than 6 steps, do this to avoid more than a single allocation
|
||||
t.traceItems = make([]traceItem, 0, 6)
|
||||
|
@ -153,7 +159,9 @@ func (t *Trace) Nest(msg string, fields ...Field) *Trace {
|
|||
newTrace := New(msg, fields...)
|
||||
if t != nil {
|
||||
newTrace.parentTrace = t
|
||||
t.lock.Lock()
|
||||
t.traceItems = append(t.traceItems, newTrace)
|
||||
t.lock.Unlock()
|
||||
}
|
||||
return newTrace
|
||||
}
|
||||
|
@ -163,7 +171,9 @@ func (t *Trace) Nest(msg string, fields ...Field) *Trace {
|
|||
// is logged.
|
||||
func (t *Trace) Log() {
|
||||
endTime := time.Now()
|
||||
t.lock.Lock()
|
||||
t.endTime = &endTime
|
||||
t.lock.Unlock()
|
||||
// an explicit logging request should dump all the steps out at the higher level
|
||||
if t.parentTrace == nil { // We don't start logging until Log or LogIfLong is called on the root trace
|
||||
t.logTrace()
|
||||
|
@ -178,13 +188,17 @@ func (t *Trace) Log() {
|
|||
// If the Trace is nested it is not immediately logged. Instead, it is logged when the trace it
|
||||
// is nested within is logged.
|
||||
func (t *Trace) LogIfLong(threshold time.Duration) {
|
||||
t.lock.Lock()
|
||||
t.threshold = &threshold
|
||||
t.lock.Unlock()
|
||||
t.Log()
|
||||
}
|
||||
|
||||
// logTopLevelTraces finds all traces in a hierarchy of nested traces that should be logged but do not have any
|
||||
// parents that will be logged, due to threshold limits, and logs them as top level traces.
|
||||
func (t *Trace) logTrace() {
|
||||
t.lock.RLock()
|
||||
defer t.lock.RUnlock()
|
||||
if t.durationIsWithinThreshold() {
|
||||
var buffer bytes.Buffer
|
||||
traceNum := rand.Int31()
|
||||
|
@ -244,9 +258,13 @@ func (t *Trace) calculateStepThreshold() *time.Duration {
|
|||
traceThreshold := *t.threshold
|
||||
for _, s := range t.traceItems {
|
||||
nestedTrace, ok := s.(*Trace)
|
||||
if ok && nestedTrace.threshold != nil {
|
||||
traceThreshold = traceThreshold - *nestedTrace.threshold
|
||||
lenTrace--
|
||||
if ok {
|
||||
nestedTrace.lock.RLock()
|
||||
if nestedTrace.threshold != nil {
|
||||
traceThreshold = traceThreshold - *nestedTrace.threshold
|
||||
lenTrace--
|
||||
}
|
||||
nestedTrace.lock.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1053,8 +1053,11 @@ github.com/openshift/client-go/security/clientset/versioned/typed/security/v1
|
|||
# github.com/openshift/console-operator v0.0.0-20220407014945-45d37e70e0c2 => github.com/openshift/console-operator v0.0.0-20220318130441-e44516b9c315
|
||||
## explicit; go 1.16
|
||||
github.com/openshift/console-operator/pkg/api
|
||||
# github.com/openshift/hive/apis v0.0.0 => github.com/openshift/hive/apis v0.0.0-20220719141355-c63c9b0281d8
|
||||
## explicit; go 1.18
|
||||
# github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87
|
||||
## explicit; go 1.12
|
||||
github.com/openshift/custom-resource-status/conditions/v1
|
||||
# github.com/openshift/hive/apis v0.0.0 => github.com/openshift/hive/apis v0.0.0-20230811220652-70b666ec89b0
|
||||
## explicit; go 1.20
|
||||
github.com/openshift/hive/apis/hive/v1
|
||||
github.com/openshift/hive/apis/hive/v1/agent
|
||||
github.com/openshift/hive/apis/hive/v1/alibabacloud
|
||||
|
@ -1517,7 +1520,7 @@ gopkg.in/yaml.v2
|
|||
# gopkg.in/yaml.v3 v3.0.1
|
||||
## explicit
|
||||
gopkg.in/yaml.v3
|
||||
# k8s.io/api v0.25.0 => k8s.io/api v0.23.0
|
||||
# k8s.io/api v0.26.2 => k8s.io/api v0.23.0
|
||||
## explicit; go 1.16
|
||||
k8s.io/api/admission/v1
|
||||
k8s.io/api/admission/v1beta1
|
||||
|
@ -1583,7 +1586,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme
|
|||
k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1
|
||||
k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1
|
||||
k8s.io/apiextensions-apiserver/third_party/forked/celopenapi/model
|
||||
# k8s.io/apimachinery v0.25.0 => k8s.io/apimachinery v0.23.0
|
||||
# k8s.io/apimachinery v0.26.2 => k8s.io/apimachinery v0.23.0
|
||||
## explicit; go 1.16
|
||||
k8s.io/apimachinery/pkg/api/equality
|
||||
k8s.io/apimachinery/pkg/api/errors
|
||||
|
@ -1969,7 +1972,7 @@ k8s.io/gengo/generator
|
|||
k8s.io/gengo/namer
|
||||
k8s.io/gengo/parser
|
||||
k8s.io/gengo/types
|
||||
# k8s.io/klog/v2 v2.70.1
|
||||
# k8s.io/klog/v2 v2.90.1
|
||||
## explicit; go 1.13
|
||||
k8s.io/klog/v2
|
||||
k8s.io/klog/v2/internal/buffer
|
||||
|
@ -2018,8 +2021,8 @@ k8s.io/kubernetes/pkg/apis/rbac/v1
|
|||
k8s.io/kubernetes/pkg/features
|
||||
k8s.io/kubernetes/pkg/kubelet/events
|
||||
k8s.io/kubernetes/pkg/util/parsers
|
||||
# k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed
|
||||
## explicit; go 1.12
|
||||
# k8s.io/utils v0.0.0-20230313181309-38a27ef9d749
|
||||
## explicit; go 1.18
|
||||
k8s.io/utils/buffer
|
||||
k8s.io/utils/clock
|
||||
k8s.io/utils/clock/testing
|
||||
|
@ -2092,7 +2095,7 @@ sigs.k8s.io/controller-tools/pkg/schemapatcher
|
|||
sigs.k8s.io/controller-tools/pkg/schemapatcher/internal/yaml
|
||||
sigs.k8s.io/controller-tools/pkg/version
|
||||
sigs.k8s.io/controller-tools/pkg/webhook
|
||||
# sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2
|
||||
# sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd
|
||||
## explicit; go 1.18
|
||||
sigs.k8s.io/json
|
||||
sigs.k8s.io/json/internal/golang/encoding/json
|
||||
|
@ -2301,5 +2304,5 @@ sigs.k8s.io/yaml
|
|||
# sigs.k8s.io/structured-merge-diff => sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06
|
||||
# sourcegraph.com/sourcegraph/go-diff => github.com/sourcegraph/go-diff v0.5.1
|
||||
# vbom.ml/util => github.com/fvbommel/util v0.0.3
|
||||
# github.com/openshift/hive => github.com/openshift/hive v1.1.17-0.20220719141355-c63c9b0281d8
|
||||
# github.com/openshift/hive/apis => github.com/openshift/hive/apis v0.0.0-20220719141355-c63c9b0281d8
|
||||
# github.com/openshift/hive => github.com/openshift/hive v1.1.17-0.20230811220652-70b666ec89b0
|
||||
# github.com/openshift/hive/apis => github.com/openshift/hive/apis v0.0.0-20230811220652-70b666ec89b0
|
||||
|
|
|
@ -75,6 +75,8 @@ import (
|
|||
// either be any string type, an integer, implement json.Unmarshaler, or
|
||||
// implement encoding.TextUnmarshaler.
|
||||
//
|
||||
// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError.
|
||||
//
|
||||
// If a JSON value is not appropriate for a given target type,
|
||||
// or if a JSON number overflows the target type, Unmarshal
|
||||
// skips that field and completes the unmarshaling as best it can.
|
||||
|
@ -85,14 +87,13 @@ import (
|
|||
//
|
||||
// The JSON null value unmarshals into an interface, map, pointer, or slice
|
||||
// by setting that Go value to nil. Because null is often used in JSON to mean
|
||||
// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
|
||||
// “not present,” unmarshaling a JSON null into any other Go type has no effect
|
||||
// on the value and produces no error.
|
||||
//
|
||||
// When unmarshaling quoted strings, invalid UTF-8 or
|
||||
// invalid UTF-16 surrogate pairs are not treated as an error.
|
||||
// Instead, they are replaced by the Unicode replacement
|
||||
// character U+FFFD.
|
||||
//
|
||||
func Unmarshal(data []byte, v any, opts ...UnmarshalOpt) error {
|
||||
// Check for well-formedness.
|
||||
// Avoids filling out half a data structure
|
||||
|
|
|
@ -77,31 +77,31 @@ import (
|
|||
//
|
||||
// Examples of struct field tags and their meanings:
|
||||
//
|
||||
// // Field appears in JSON as key "myName".
|
||||
// Field int `json:"myName"`
|
||||
// // Field appears in JSON as key "myName".
|
||||
// Field int `json:"myName"`
|
||||
//
|
||||
// // Field appears in JSON as key "myName" and
|
||||
// // the field is omitted from the object if its value is empty,
|
||||
// // as defined above.
|
||||
// Field int `json:"myName,omitempty"`
|
||||
// // Field appears in JSON as key "myName" and
|
||||
// // the field is omitted from the object if its value is empty,
|
||||
// // as defined above.
|
||||
// Field int `json:"myName,omitempty"`
|
||||
//
|
||||
// // Field appears in JSON as key "Field" (the default), but
|
||||
// // the field is skipped if empty.
|
||||
// // Note the leading comma.
|
||||
// Field int `json:",omitempty"`
|
||||
// // Field appears in JSON as key "Field" (the default), but
|
||||
// // the field is skipped if empty.
|
||||
// // Note the leading comma.
|
||||
// Field int `json:",omitempty"`
|
||||
//
|
||||
// // Field is ignored by this package.
|
||||
// Field int `json:"-"`
|
||||
// // Field is ignored by this package.
|
||||
// Field int `json:"-"`
|
||||
//
|
||||
// // Field appears in JSON as key "-".
|
||||
// Field int `json:"-,"`
|
||||
// // Field appears in JSON as key "-".
|
||||
// Field int `json:"-,"`
|
||||
//
|
||||
// The "string" option signals that a field is stored as JSON inside a
|
||||
// JSON-encoded string. It applies only to fields of string, floating point,
|
||||
// integer, or boolean types. This extra level of encoding is sometimes used
|
||||
// when communicating with JavaScript programs:
|
||||
//
|
||||
// Int64String int64 `json:",string"`
|
||||
// Int64String int64 `json:",string"`
|
||||
//
|
||||
// The key name will be used if it's a non-empty string consisting of
|
||||
// only Unicode letters, digits, and ASCII punctuation except quotation
|
||||
|
@ -154,7 +154,6 @@ import (
|
|||
// JSON cannot represent cyclic data structures and Marshal does not
|
||||
// handle them. Passing cyclic structures to Marshal will result in
|
||||
// an error.
|
||||
//
|
||||
func Marshal(v any) ([]byte, error) {
|
||||
e := newEncodeState()
|
||||
|
||||
|
@ -784,7 +783,7 @@ func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
|
|||
if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter {
|
||||
// We're a large number of nested ptrEncoder.encode calls deep;
|
||||
// start checking if we've run into a pointer cycle.
|
||||
ptr := v.Pointer()
|
||||
ptr := v.UnsafePointer()
|
||||
if _, ok := e.ptrSeen[ptr]; ok {
|
||||
e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())})
|
||||
}
|
||||
|
@ -877,9 +876,9 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
|
|||
// Here we use a struct to memorize the pointer to the first element of the slice
|
||||
// and its length.
|
||||
ptr := struct {
|
||||
ptr uintptr
|
||||
ptr interface{} // always an unsafe.Pointer, but avoids a dependency on package unsafe
|
||||
len int
|
||||
}{v.Pointer(), v.Len()}
|
||||
}{v.UnsafePointer(), v.Len()}
|
||||
if _, ok := e.ptrSeen[ptr]; ok {
|
||||
e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())})
|
||||
}
|
||||
|
|
|
@ -24,8 +24,9 @@ const (
|
|||
// 4) simpleLetterEqualFold, no specials, no non-letters.
|
||||
//
|
||||
// The letters S and K are special because they map to 3 runes, not just 2:
|
||||
// * S maps to s and to U+017F 'ſ' Latin small letter long s
|
||||
// * k maps to K and to U+212A 'K' Kelvin sign
|
||||
// - S maps to s and to U+017F 'ſ' Latin small letter long s
|
||||
// - k maps to K and to U+212A 'K' Kelvin sign
|
||||
//
|
||||
// See https://play.golang.org/p/tTxjOc0OGo
|
||||
//
|
||||
// The returned function is specialized for matching against s and
|
||||
|
|
|
@ -27,6 +27,7 @@ func Valid(data []byte) bool {
|
|||
|
||||
// checkValid verifies that data is valid JSON-encoded data.
|
||||
// scan is passed in for use by checkValid to avoid an allocation.
|
||||
// checkValid returns nil or a SyntaxError.
|
||||
func checkValid(data []byte, scan *scanner) error {
|
||||
scan.reset()
|
||||
for _, c := range data {
|
||||
|
@ -42,6 +43,7 @@ func checkValid(data []byte, scan *scanner) error {
|
|||
}
|
||||
|
||||
// A SyntaxError is a description of a JSON syntax error.
|
||||
// Unmarshal will return a SyntaxError if the JSON can't be parsed.
|
||||
type SyntaxError struct {
|
||||
msg string // description of error
|
||||
Offset int64 // error occurred after reading Offset bytes
|
||||
|
|
|
@ -289,7 +289,6 @@ var _ Unmarshaler = (*RawMessage)(nil)
|
|||
// Number, for JSON numbers
|
||||
// string, for JSON string literals
|
||||
// nil, for JSON null
|
||||
//
|
||||
type Token any
|
||||
*/
|
||||
|
||||
|
|
|
@ -34,13 +34,13 @@ type Decoder interface {
|
|||
}
|
||||
|
||||
// NewDecoderCaseSensitivePreserveInts returns a decoder that matches the behavior of encoding/json#NewDecoder, with the following changes:
|
||||
// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields)
|
||||
// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded.
|
||||
// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if
|
||||
// the JSON data does not contain a "." character and parses as an integer successfully and
|
||||
// does not overflow int64. Otherwise, the number is unmarshaled as a float64.
|
||||
// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError,
|
||||
// but will be recognizeable by this package's IsSyntaxError() function.
|
||||
// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields)
|
||||
// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded.
|
||||
// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if
|
||||
// the JSON data does not contain a "." character and parses as an integer successfully and
|
||||
// does not overflow int64. Otherwise, the number is unmarshaled as a float64.
|
||||
// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError,
|
||||
// but will be recognizeable by this package's IsSyntaxError() function.
|
||||
func NewDecoderCaseSensitivePreserveInts(r io.Reader) Decoder {
|
||||
d := internaljson.NewDecoder(r)
|
||||
d.CaseSensitive()
|
||||
|
@ -51,13 +51,13 @@ func NewDecoderCaseSensitivePreserveInts(r io.Reader) Decoder {
|
|||
// UnmarshalCaseSensitivePreserveInts parses the JSON-encoded data and stores the result in the value pointed to by v.
|
||||
//
|
||||
// UnmarshalCaseSensitivePreserveInts matches the behavior of encoding/json#Unmarshal, with the following changes:
|
||||
// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields)
|
||||
// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded.
|
||||
// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if
|
||||
// the JSON data does not contain a "." character and parses as an integer successfully and
|
||||
// does not overflow int64. Otherwise, the number is unmarshaled as a float64.
|
||||
// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError,
|
||||
// but will be recognizeable by this package's IsSyntaxError() function.
|
||||
// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields)
|
||||
// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded.
|
||||
// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if
|
||||
// the JSON data does not contain a "." character and parses as an integer successfully and
|
||||
// does not overflow int64. Otherwise, the number is unmarshaled as a float64.
|
||||
// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError,
|
||||
// but will be recognizeable by this package's IsSyntaxError() function.
|
||||
func UnmarshalCaseSensitivePreserveInts(data []byte, v interface{}) error {
|
||||
return internaljson.Unmarshal(
|
||||
data,
|
||||
|
|
Загрузка…
Ссылка в новой задаче