This commit is contained in:
Amber Brown 2022-07-04 08:27:40 +10:00 коммит произвёл Mikalai Radchuk
Родитель 07108b5a7b
Коммит 4dd364aac5
114 изменённых файлов: 13943 добавлений и 81 удалений

15
vendor/github.com/openshift/cluster-api/pkg/apis/machine/common/BUILD.bazel сгенерированный поставляемый
Просмотреть файл

@ -1,15 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"consts.go",
"plugins.go",
],
importpath = "github.com/openshift/cluster-api/pkg/apis/machine/common",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/pkg/errors:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)

50
vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/BUILD.bazel сгенерированный поставляемый
Просмотреть файл

@ -1,50 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"common_types.go",
"defaults.go",
"doc.go",
"machine_types.go",
"machineclass_types.go",
"machinedeployment_types.go",
"machineset_types.go",
"register.go",
"zz_generated.deepcopy.go",
],
importpath = "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/machine/common:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
"//vendor/sigs.k8s.io/controller-runtime/pkg/scheme:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"machine_types_test.go",
"machinedeployment_types_test.go",
"machineset_types_test.go",
"v1alpha1_suite_test.go",
],
embed = [":go_default_library"],
deps = [
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/sigs.k8s.io/controller-runtime/pkg/client:go_default_library",
"//vendor/sigs.k8s.io/controller-runtime/pkg/envtest:go_default_library",
],
)

9
vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machine_types.go сгенерированный поставляемый
Просмотреть файл

@ -46,13 +46,14 @@ const (
// Machine is the Schema for the machines API
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".metadata.annotations['machine\.openshift\.io/instance-state']",description="State of instance"
// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".metadata.labels['machine\.openshift\.io/instance-type']",description="Type of instance"
// +kubebuilder:printcolumn:name="Region",type="string",JSONPath=".metadata.labels['machine\.openshift\.io/region']",description="Region associated with machine"
// +kubebuilder:printcolumn:name="Zone",type="string",JSONPath=".metadata.labels['machine\.openshift\.io/zone']",description="Zone associated with machine"
// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Phase of machine"
// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".metadata.labels['machine\\.openshift\\.io/instance-type']",description="Type of instance"
// +kubebuilder:printcolumn:name="Region",type="string",JSONPath=".metadata.labels['machine\\.openshift\\.io/region']",description="Region associated with machine"
// +kubebuilder:printcolumn:name="Zone",type="string",JSONPath=".metadata.labels['machine\\.openshift\\.io/zone']",description="Zone associated with machine"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Machine age"
// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.nodeRef.name",description="Node associated with machine",priority=1
// +kubebuilder:printcolumn:name="ProviderID",type="string",JSONPath=".spec.providerID",description="Provider ID of machine created in cloud provider",priority=1
// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".metadata.annotations['machine\\.openshift\\.io/instance-state']",description="State of instance",priority=1
type Machine struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`

2
vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/machineset_types.go сгенерированный поставляемый
Просмотреть файл

@ -66,7 +66,7 @@ type MachineSetSpec struct {
// DeletePolicy defines the policy used to identify nodes to delete when downscaling.
// Defaults to "Random". Valid values are "Random, "Newest", "Oldest"
// +kubebuilder:validation:Enum=Random,Newest,Oldest
// +kubebuilder:validation:Enum=Random;Newest;Oldest
DeletePolicy string `json:"deletePolicy,omitempty"`
// Selector is a label query over machines that should match the replica count.

8
vendor/github.com/openshift/cluster-api/pkg/apis/machine/v1beta1/zz_generated.deepcopy.go сгенерированный поставляемый
Просмотреть файл

@ -121,7 +121,7 @@ func (in *MachineClass) DeepCopyObject() runtime.Object {
func (in *MachineClassList) DeepCopyInto(out *MachineClassList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]MachineClass, len(*in))
@ -182,7 +182,7 @@ func (in *MachineDeployment) DeepCopyObject() runtime.Object {
func (in *MachineDeploymentList) DeepCopyInto(out *MachineDeploymentList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]MachineDeployment, len(*in))
@ -295,7 +295,7 @@ func (in *MachineDeploymentStrategy) DeepCopy() *MachineDeploymentStrategy {
func (in *MachineList) DeepCopyInto(out *MachineList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Machine, len(*in))
@ -382,7 +382,7 @@ func (in *MachineSet) DeepCopyObject() runtime.Object {
func (in *MachineSetList) DeepCopyInto(out *MachineSetList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]MachineSet, len(*in))

190
vendor/github.com/openshift/hive/LICENSE сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,190 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2018 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

190
vendor/github.com/openshift/hive/apis/LICENSE сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,190 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2018 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

4
vendor/github.com/openshift/hive/apis/hive/v1/agent/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
// Package agent contains API Schema definitions for assisted agent based installations.
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/hive/apis/hive
package agent

12
vendor/github.com/openshift/hive/apis/hive/v1/agent/platform.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,12 @@
package agent
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// BareMetalPlatform defines agent based install configuration specific to bare metal clusters.
// Can only be used with spec.installStrategy.agent.
type BareMetalPlatform struct {
// AgentSelector is a label selector used for associating relevant custom resources with this cluster.
// (Agent, BareMetalHost, etc)
AgentSelector metav1.LabelSelector `json:"agentSelector"`
}

23
vendor/github.com/openshift/hive/apis/hive/v1/agent/zz_generated.deepcopy.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,23 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package agent
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BareMetalPlatform) DeepCopyInto(out *BareMetalPlatform) {
*out = *in
in.AgentSelector.DeepCopyInto(&out.AgentSelector)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatform.
func (in *BareMetalPlatform) DeepCopy() *BareMetalPlatform {
if in == nil {
return nil
}
out := new(BareMetalPlatform)
in.DeepCopyInto(out)
return out
}

4
vendor/github.com/openshift/hive/apis/hive/v1/alibabacloud/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
// Package alibabacloud contains API Schema definitions for Alibaba Cloud cluster.
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/hive/apis/hive
package alibabacloud

41
vendor/github.com/openshift/hive/apis/hive/v1/alibabacloud/machinepool.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,41 @@
package alibabacloud
// DiskCategory is the category of the ECS disk. Supported disk category:
// cloud_essd(ESSD disk), cloud_efficiency(ultra disk).
//
// +kubebuilder:validation:Enum="";cloud_efficiency;cloud_essd
type DiskCategory string
// MachinePool stores the configuration for a machine pool installed
// on Alibaba Cloud.
type MachinePool struct {
// Zones is list of availability zones that can be used.
// eg. ["cn-hangzhou-i", "cn-hangzhou-h", "cn-hangzhou-j"]
//
// +optional
Zones []string `json:"zones,omitempty"`
// InstanceType defines the ECS instance type.
// eg. ecs.g6.large
//
// +optional
InstanceType string `json:"instanceType,omitempty"`
// SystemDiskCategory defines the category of the system disk.
//
// +optional
SystemDiskCategory DiskCategory `json:"systemDiskCategory,omitempty"`
// SystemDiskSize defines the size of the system disk in gibibytes (GiB).
//
// +kubebuilder:validation:Type=integer
// +kubebuilder:validation:Minimum=120
// +optional
SystemDiskSize int `json:"systemDiskSize,omitempty"`
// ImageID is the Image ID that should be used to create ECS instance.
// If set, the ImageID should belong to the same region as the cluster.
//
// +optional
ImageID string `json:"imageID,omitempty"`
}

16
vendor/github.com/openshift/hive/apis/hive/v1/alibabacloud/platform.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,16 @@
package alibabacloud
import (
corev1 "k8s.io/api/core/v1"
)
// Platform stores all the global configuration that all machinesets use.
type Platform struct {
// CredentialsSecretRef refers to a secret that contains Alibaba Cloud account access
// credentials.
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
// Region specifies the Alibaba Cloud region where the cluster will be
// created.
Region string `json:"region"`
}

44
vendor/github.com/openshift/hive/apis/hive/v1/alibabacloud/zz_generated.deepcopy.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,44 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package alibabacloud
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MachinePool) DeepCopyInto(out *MachinePool) {
*out = *in
if in.Zones != nil {
in, out := &in.Zones, &out.Zones
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool.
func (in *MachinePool) DeepCopy() *MachinePool {
if in == nil {
return nil
}
out := new(MachinePool)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Platform) DeepCopyInto(out *Platform) {
*out = *in
out.CredentialsSecretRef = in.CredentialsSecretRef
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform.
func (in *Platform) DeepCopy() *Platform {
if in == nil {
return nil
}
out := new(Platform)
in.DeepCopyInto(out)
return out
}

3
vendor/github.com/openshift/hive/apis/hive/v1/aws/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,3 @@
// Package aws contains API Schema definitions for AWS clusters.
// +k8s:deepcopy-gen=package,register
package aws

50
vendor/github.com/openshift/hive/apis/hive/v1/aws/machinepool.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,50 @@
package aws
// MachinePoolPlatform stores the configuration for a machine pool
// installed on AWS.
type MachinePoolPlatform struct {
// Zones is list of availability zones that can be used.
Zones []string `json:"zones,omitempty"`
// Subnets is the list of subnets to which to attach the machines.
// There must be exactly one private subnet for each availability zone used.
// If public subnets are specified, there must be exactly one private and one public subnet specified for each availability zone.
Subnets []string `json:"subnets,omitempty"`
// InstanceType defines the ec2 instance type.
// eg. m4-large
InstanceType string `json:"type"`
// EC2RootVolume defines the storage for ec2 instance.
EC2RootVolume `json:"rootVolume"`
// SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.
// +optional
SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"`
}
// SpotMarketOptions defines the options available to a user when configuring
// Machines to run on Spot instances.
// Most users should provide an empty struct.
type SpotMarketOptions struct {
// The maximum price the user is willing to pay for their instances
// Default: On-Demand price
// +optional
MaxPrice *string `json:"maxPrice,omitempty"`
}
// EC2RootVolume defines the storage for an ec2 instance.
type EC2RootVolume struct {
// IOPS defines the iops for the storage.
// +optional
IOPS int `json:"iops,omitempty"`
// Size defines the size of the storage.
Size int `json:"size"`
// Type defines the type of the storage.
Type string `json:"type"`
// The KMS key that will be used to encrypt the EBS volume.
// If no key is provided the default KMS key for the account will be used.
// https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetEbsDefaultKmsKeyId.html
// +optional
KMSKeyARN string `json:"kmsKeyARN,omitempty"`
}

69
vendor/github.com/openshift/hive/apis/hive/v1/aws/platform.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,69 @@
package aws
import (
corev1 "k8s.io/api/core/v1"
)
// Platform stores all the global configuration that
// all machinesets use.
type Platform struct {
// CredentialsSecretRef refers to a secret that contains the AWS account access
// credentials.
// +optional
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef,omitempty"`
// CredentialsAssumeRole refers to the IAM role that must be assumed to obtain
// AWS account access for the cluster operations.
// +optional
CredentialsAssumeRole *AssumeRole `json:"credentialsAssumeRole,omitempty"`
// Region specifies the AWS region where the cluster will be created.
Region string `json:"region"`
// UserTags specifies additional tags for AWS resources created for the cluster.
// +optional
UserTags map[string]string `json:"userTags,omitempty"`
// PrivateLink allows uses to enable access to the cluster's API server using AWS
// PrivateLink. AWS PrivateLink includes a pair of VPC Endpoint Service and VPC
// Endpoint accross AWS accounts and allows clients to connect to services using AWS's
// internal networking instead of the Internet.
PrivateLink *PrivateLinkAccess `json:"privateLink,omitempty"`
}
// PlatformStatus contains the observed state on AWS platform.
type PlatformStatus struct {
PrivateLink *PrivateLinkAccessStatus `json:"privateLink,omitempty"`
}
// PrivateLinkAccess configures access to the cluster API using AWS PrivateLink
type PrivateLinkAccess struct {
Enabled bool `json:"enabled"`
}
// PrivateLinkAccessStatus contains the observed state for PrivateLinkAccess resources.
type PrivateLinkAccessStatus struct {
// +optional
VPCEndpointService VPCEndpointService `json:"vpcEndpointService,omitempty"`
// +optional
VPCEndpointID string `json:"vpcEndpointID,omitempty"`
// +optional
HostedZoneID string `json:"hostedZoneID,omitempty"`
}
type VPCEndpointService struct {
Name string `json:"name,omitempty"`
ID string `json:"id,omitempty"`
}
// AssumeRole stores information for the IAM role that needs to be assumed
// using an existing AWS session.
type AssumeRole struct {
RoleARN string `json:"roleARN"`
// ExternalID is random string generated by platform so that assume role
// is protected from confused deputy problem.
// more info: https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html
// +optional
ExternalID string `json:"externalID,omitempty"`
}

195
vendor/github.com/openshift/hive/apis/hive/v1/aws/zz_generated.deepcopy.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,195 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package aws
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AssumeRole) DeepCopyInto(out *AssumeRole) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AssumeRole.
func (in *AssumeRole) DeepCopy() *AssumeRole {
if in == nil {
return nil
}
out := new(AssumeRole)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EC2RootVolume) DeepCopyInto(out *EC2RootVolume) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EC2RootVolume.
func (in *EC2RootVolume) DeepCopy() *EC2RootVolume {
if in == nil {
return nil
}
out := new(EC2RootVolume)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MachinePoolPlatform) DeepCopyInto(out *MachinePoolPlatform) {
*out = *in
if in.Zones != nil {
in, out := &in.Zones, &out.Zones
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Subnets != nil {
in, out := &in.Subnets, &out.Subnets
*out = make([]string, len(*in))
copy(*out, *in)
}
out.EC2RootVolume = in.EC2RootVolume
if in.SpotMarketOptions != nil {
in, out := &in.SpotMarketOptions, &out.SpotMarketOptions
*out = new(SpotMarketOptions)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolPlatform.
func (in *MachinePoolPlatform) DeepCopy() *MachinePoolPlatform {
if in == nil {
return nil
}
out := new(MachinePoolPlatform)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Platform) DeepCopyInto(out *Platform) {
*out = *in
out.CredentialsSecretRef = in.CredentialsSecretRef
if in.CredentialsAssumeRole != nil {
in, out := &in.CredentialsAssumeRole, &out.CredentialsAssumeRole
*out = new(AssumeRole)
**out = **in
}
if in.UserTags != nil {
in, out := &in.UserTags, &out.UserTags
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.PrivateLink != nil {
in, out := &in.PrivateLink, &out.PrivateLink
*out = new(PrivateLinkAccess)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform.
func (in *Platform) DeepCopy() *Platform {
if in == nil {
return nil
}
out := new(Platform)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) {
*out = *in
if in.PrivateLink != nil {
in, out := &in.PrivateLink, &out.PrivateLink
*out = new(PrivateLinkAccessStatus)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlatformStatus.
func (in *PlatformStatus) DeepCopy() *PlatformStatus {
if in == nil {
return nil
}
out := new(PlatformStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PrivateLinkAccess) DeepCopyInto(out *PrivateLinkAccess) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateLinkAccess.
func (in *PrivateLinkAccess) DeepCopy() *PrivateLinkAccess {
if in == nil {
return nil
}
out := new(PrivateLinkAccess)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PrivateLinkAccessStatus) DeepCopyInto(out *PrivateLinkAccessStatus) {
*out = *in
out.VPCEndpointService = in.VPCEndpointService
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateLinkAccessStatus.
func (in *PrivateLinkAccessStatus) DeepCopy() *PrivateLinkAccessStatus {
if in == nil {
return nil
}
out := new(PrivateLinkAccessStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SpotMarketOptions) DeepCopyInto(out *SpotMarketOptions) {
*out = *in
if in.MaxPrice != nil {
in, out := &in.MaxPrice, &out.MaxPrice
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotMarketOptions.
func (in *SpotMarketOptions) DeepCopy() *SpotMarketOptions {
if in == nil {
return nil
}
out := new(SpotMarketOptions)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VPCEndpointService) DeepCopyInto(out *VPCEndpointService) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VPCEndpointService.
func (in *VPCEndpointService) DeepCopy() *VPCEndpointService {
if in == nil {
return nil
}
out := new(VPCEndpointService)
in.DeepCopyInto(out)
return out
}

48
vendor/github.com/openshift/hive/apis/hive/v1/azure/disk.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,48 @@
package azure
import "fmt"
// ToID creates an Azure resource ID for the disk encryption set.
// It is possible to return a non-valid ID when SubscriptionID is empty. This
// should never happen since if SubscriptionID is empty, it is set to the
// current subscription. Also, should it somehow be empty and this returns an
// invalid ID, the validation code will produce an error when checked against
// the validation.RxDiskEncryptionSetID regular expression.
func (d *DiskEncryptionSet) ToID() string {
return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/diskEncryptionSets/%s",
d.SubscriptionID, d.ResourceGroup, d.Name)
}
// OSDisk defines the disk for machines on Azure.
type OSDisk struct {
// DiskSizeGB defines the size of disk in GB.
//
// +kubebuilder:validation:Minimum=0
DiskSizeGB int32 `json:"diskSizeGB"`
// DiskType defines the type of disk.
// For control plane nodes, the valid values are Premium_LRS and StandardSSD_LRS.
// Default is Premium_LRS.
// +optional
// +kubebuilder:validation:Enum=Standard_LRS;Premium_LRS;StandardSSD_LRS
DiskType string `json:"diskType,omitempty"`
// DiskEncryptionSet defines a disk encryption set.
//
// +optional
*DiskEncryptionSet `json:"diskEncryptionSet,omitempty"`
}
// DiskEncryptionSet defines the configuration for a disk encryption set.
type DiskEncryptionSet struct {
// SubscriptionID defines the Azure subscription the disk encryption
// set is in.
SubscriptionID string `json:"subscriptionId,omitempty"`
// ResourceGroup defines the Azure resource group used by the disk
// encryption set.
ResourceGroup string `json:"resourceGroup"`
// Name is the name of the disk encryption set.
Name string `json:"name"`
}
// DefaultDiskType holds the default Azure disk type used by the VMs.
const DefaultDiskType string = "Premium_LRS"

4
vendor/github.com/openshift/hive/apis/hive/v1/azure/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
// Package azure contains API Schema definitions for Azure cluster.
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/hive/apis/hive
package azure

32
vendor/github.com/openshift/hive/apis/hive/v1/azure/machinepool.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,32 @@
package azure
// MachinePool stores the configuration for a machine pool installed
// on Azure.
type MachinePool struct {
// Zones is list of availability zones that can be used.
// eg. ["1", "2", "3"]
Zones []string `json:"zones,omitempty"`
// InstanceType defines the azure instance type.
// eg. Standard_DS_V2
InstanceType string `json:"type"`
// OSDisk defines the storage for instance.
OSDisk `json:"osDisk"`
// OSImage defines the image to use for the OS.
// +optional
OSImage *OSImage `json:"osImage,omitempty"`
}
// OSImage is the image to use for the OS of a machine.
type OSImage struct {
// Publisher is the publisher of the image.
Publisher string `json:"publisher"`
// Offer is the offer of the image.
Offer string `json:"offer"`
// SKU is the SKU of the image.
SKU string `json:"sku"`
// Version is the version of the image.
Version string `json:"version"`
}

6
vendor/github.com/openshift/hive/apis/hive/v1/azure/metadata.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,6 @@
package azure
// Metadata contains Azure metadata (e.g. for uninstalling the cluster).
type Metadata struct {
Region string `json:"region"`
}

58
vendor/github.com/openshift/hive/apis/hive/v1/azure/platform.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,58 @@
package azure
import (
"strings"
corev1 "k8s.io/api/core/v1"
)
// Platform stores all the global configuration that all machinesets
// use.
type Platform struct {
// CredentialsSecretRef refers to a secret that contains the Azure account access
// credentials.
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
// Region specifies the Azure region where the cluster will be created.
Region string `json:"region"`
// BaseDomainResourceGroupName specifies the resource group where the azure DNS zone for the base domain is found
BaseDomainResourceGroupName string `json:"baseDomainResourceGroupName,omitempty"`
// cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK
// with the appropriate Azure API endpoints.
// If empty, the value is equal to "AzurePublicCloud".
// +optional
CloudName CloudEnvironment `json:"cloudName,omitempty"`
}
// CloudEnvironment is the name of the Azure cloud environment
// +kubebuilder:validation:Enum="";AzurePublicCloud;AzureUSGovernmentCloud;AzureChinaCloud;AzureGermanCloud
type CloudEnvironment string
const (
// PublicCloud is the general-purpose, public Azure cloud environment.
PublicCloud CloudEnvironment = "AzurePublicCloud"
// USGovernmentCloud is the Azure cloud environment for the US government.
USGovernmentCloud CloudEnvironment = "AzureUSGovernmentCloud"
// ChinaCloud is the Azure cloud environment used in China.
ChinaCloud CloudEnvironment = "AzureChinaCloud"
// GermanCloud is the Azure cloud environment used in Germany.
GermanCloud CloudEnvironment = "AzureGermanCloud"
)
// Name returns name that Azure uses for the cloud environment.
// See https://github.com/Azure/go-autorest/blob/ec5f4903f77ed9927ac95b19ab8e44ada64c1356/autorest/azure/environments.go#L13
func (e CloudEnvironment) Name() string {
return string(e)
}
//SetBaseDomain parses the baseDomainID and sets the related fields on azure.Platform
func (p *Platform) SetBaseDomain(baseDomainID string) error {
parts := strings.Split(baseDomainID, "/")
p.BaseDomainResourceGroupName = parts[4]
return nil
}

119
vendor/github.com/openshift/hive/apis/hive/v1/azure/zz_generated.deepcopy.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,119 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package azure
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DiskEncryptionSet) DeepCopyInto(out *DiskEncryptionSet) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionSet.
func (in *DiskEncryptionSet) DeepCopy() *DiskEncryptionSet {
if in == nil {
return nil
}
out := new(DiskEncryptionSet)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MachinePool) DeepCopyInto(out *MachinePool) {
*out = *in
if in.Zones != nil {
in, out := &in.Zones, &out.Zones
*out = make([]string, len(*in))
copy(*out, *in)
}
in.OSDisk.DeepCopyInto(&out.OSDisk)
if in.OSImage != nil {
in, out := &in.OSImage, &out.OSImage
*out = new(OSImage)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool.
func (in *MachinePool) DeepCopy() *MachinePool {
if in == nil {
return nil
}
out := new(MachinePool)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Metadata) DeepCopyInto(out *Metadata) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata.
func (in *Metadata) DeepCopy() *Metadata {
if in == nil {
return nil
}
out := new(Metadata)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSDisk) DeepCopyInto(out *OSDisk) {
*out = *in
if in.DiskEncryptionSet != nil {
in, out := &in.DiskEncryptionSet, &out.DiskEncryptionSet
*out = new(DiskEncryptionSet)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDisk.
func (in *OSDisk) DeepCopy() *OSDisk {
if in == nil {
return nil
}
out := new(OSDisk)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSImage) DeepCopyInto(out *OSImage) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSImage.
func (in *OSImage) DeepCopy() *OSImage {
if in == nil {
return nil
}
out := new(OSImage)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Platform) DeepCopyInto(out *Platform) {
*out = *in
out.CredentialsSecretRef = in.CredentialsSecretRef
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform.
func (in *Platform) DeepCopy() *Platform {
if in == nil {
return nil
}
out := new(Platform)
in.DeepCopyInto(out)
return out
}

4
vendor/github.com/openshift/hive/apis/hive/v1/baremetal/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
// Package baremetal contains API Schema definitions for bare metal clusters.
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/hive/apis/hive
package baremetal

11
vendor/github.com/openshift/hive/apis/hive/v1/baremetal/platform.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,11 @@
package baremetal
import corev1 "k8s.io/api/core/v1"
// Platform stores the global configuration for the cluster.
type Platform struct {
// LibvirtSSHPrivateKeySecretRef is the reference to the secret that contains the private SSH key to use
// for access to the libvirt provisioning host.
// The SSH private key is expected to be in the secret data under the "ssh-privatekey" key.
LibvirtSSHPrivateKeySecretRef corev1.LocalObjectReference `json:"libvirtSSHPrivateKeySecretRef"`
}

23
vendor/github.com/openshift/hive/apis/hive/v1/baremetal/zz_generated.deepcopy.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,23 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package baremetal
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Platform) DeepCopyInto(out *Platform) {
*out = *in
out.LibvirtSSHPrivateKeySecretRef = in.LibvirtSSHPrivateKeySecretRef
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform.
func (in *Platform) DeepCopy() *Platform {
if in == nil {
return nil
}
out := new(Platform)
in.DeepCopyInto(out)
return out
}

55
vendor/github.com/openshift/hive/apis/hive/v1/checkpoint_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,55 @@
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// CheckpointSpec defines the metadata around the Hive objects state in the namespace at the time of the last backup.
type CheckpointSpec struct {
// LastBackupChecksum is the checksum of all Hive objects in the namespace at the time of the last backup.
LastBackupChecksum string `json:"lastBackupChecksum"`
// LastBackupTime is the last time we performed a backup of the namespace
LastBackupTime metav1.Time `json:"lastBackupTime"`
// LastBackupRef is a reference to last backup object created
LastBackupRef BackupReference `json:"lastBackupRef"`
}
// BackupReference is a reference to a backup resource
type BackupReference struct {
Name string `json:"name"`
Namespace string `json:"namespace"`
}
// CheckpointStatus defines the observed state of Checkpoint
type CheckpointStatus struct {
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Checkpoint is the Schema for the backup of Hive objects.
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope=Namespaced
type Checkpoint struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec CheckpointSpec `json:"spec,omitempty"`
Status CheckpointStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// CheckpointList contains a list of Checkpoint
type CheckpointList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Checkpoint `json:"items"`
}
func init() {
SchemeBuilder.Register(&Checkpoint{}, &CheckpointList{})
}

110
vendor/github.com/openshift/hive/apis/hive/v1/clusterclaim_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,110 @@
package v1
import (
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ClusterClaimSpec defines the desired state of the ClusterClaim.
type ClusterClaimSpec struct {
// ClusterPoolName is the name of the cluster pool from which to claim a cluster.
ClusterPoolName string `json:"clusterPoolName"`
// Subjects hold references to which to authorize access to the claimed cluster.
// +optional
Subjects []rbacv1.Subject `json:"subjects,omitempty"`
// Namespace is the namespace containing the ClusterDeployment (name will match the namespace) of the claimed cluster.
// This field will be set as soon as a suitable cluster can be found, however that cluster may still be
// resuming and not yet ready for use. Wait for the ClusterRunning condition to be true to avoid this issue.
// +optional
Namespace string `json:"namespace,omitempty"`
// Lifetime is the maximum lifetime of the claim after it is assigned a cluster. If the claim still exists
// when the lifetime has elapsed, the claim will be deleted by Hive.
// This is a Duration value; see https://pkg.go.dev/time#ParseDuration for accepted formats.
// Note: due to discrepancies in validation vs parsing, we use a Pattern instead of `Format=duration`. See
// https://bugzilla.redhat.com/show_bug.cgi?id=2050332
// https://github.com/kubernetes/apimachinery/issues/131
// https://github.com/kubernetes/apiextensions-apiserver/issues/56
// +optional
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$"
Lifetime *metav1.Duration `json:"lifetime,omitempty"`
}
// ClusterClaimStatus defines the observed state of ClusterClaim.
type ClusterClaimStatus struct {
// Conditions includes more detailed status for the cluster pool.
// +optional
Conditions []ClusterClaimCondition `json:"conditions,omitempty"`
// Lifetime is the maximum lifetime of the claim after it is assigned a cluster. If the claim still exists
// when the lifetime has elapsed, the claim will be deleted by Hive.
// +optional
Lifetime *metav1.Duration `json:"lifetime,omitempty"`
}
// ClusterClaimCondition contains details for the current condition of a cluster claim.
type ClusterClaimCondition struct {
// Type is the type of the condition.
Type ClusterClaimConditionType `json:"type"`
// Status is the status of the condition.
Status corev1.ConditionStatus `json:"status"`
// LastProbeTime is the last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
// LastTransitionTime is the last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
// Reason is a unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty"`
}
// ClusterClaimConditionType is a valid value for ClusterClaimCondition.Type.
type ClusterClaimConditionType string
const (
// ClusterClaimPendingCondition is set when a cluster has not yet been assigned and made ready to the claim.
ClusterClaimPendingCondition ClusterClaimConditionType = "Pending"
// ClusterRunningCondition is true when a claimed cluster is running and ready for use.
ClusterRunningCondition ClusterClaimConditionType = "ClusterRunning"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterClaim represents a claim to a cluster from a cluster pool.
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=clusterclaims
// +kubebuilder:printcolumn:name="Pool",type="string",JSONPath=".spec.clusterPoolName"
// +kubebuilder:printcolumn:name="Pending",type="string",JSONPath=".status.conditions[?(@.type=='Pending')].reason"
// +kubebuilder:printcolumn:name="ClusterNamespace",type="string",JSONPath=".spec.namespace"
// +kubebuilder:printcolumn:name="ClusterRunning",type="string",JSONPath=".status.conditions[?(@.type=='ClusterRunning')].reason"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
type ClusterClaim struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ClusterClaimSpec `json:"spec"`
Status ClusterClaimStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterClaimList contains a list of ClusterClaims.
type ClusterClaimList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ClusterClaim `json:"items"`
}
func init() {
SchemeBuilder.Register(&ClusterClaim{}, &ClusterClaimList{})
}

740
vendor/github.com/openshift/hive/apis/hive/v1/clusterdeployment_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,740 @@
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/openshift/hive/apis/hive/v1/agent"
"github.com/openshift/hive/apis/hive/v1/alibabacloud"
"github.com/openshift/hive/apis/hive/v1/aws"
"github.com/openshift/hive/apis/hive/v1/azure"
"github.com/openshift/hive/apis/hive/v1/baremetal"
"github.com/openshift/hive/apis/hive/v1/gcp"
"github.com/openshift/hive/apis/hive/v1/ibmcloud"
"github.com/openshift/hive/apis/hive/v1/none"
"github.com/openshift/hive/apis/hive/v1/openstack"
"github.com/openshift/hive/apis/hive/v1/ovirt"
"github.com/openshift/hive/apis/hive/v1/vsphere"
)
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// Important: Run "make" to regenerate code after modifying this file
const (
// FinalizerDeprovision is used on ClusterDeployments to ensure we run a successful deprovision
// job before cleaning up the API object.
FinalizerDeprovision string = "hive.openshift.io/deprovision"
// HiveClusterTypeLabel is an optional label that can be applied to ClusterDeployments. It is
// shown in short output, usable in searching, and adds metrics vectors which can be used to
// alert on cluster types differently.
HiveClusterTypeLabel = "hive.openshift.io/cluster-type"
// DefaultClusterType will be used when the above HiveClusterTypeLabel is unset. This
// value will not be added as a label, only used for metrics vectors.
DefaultClusterType = "unspecified"
// HiveInstallLogLabel is used on ConfigMaps uploaded by the install manager which contain an install log.
HiveInstallLogLabel = "hive.openshift.io/install-log"
// HiveClusterPlatformLabel is a label that is applied to ClusterDeployments
// to denote which platform the cluster was created on. This can be used in
// searching and filtering clusters, as well as in SelectorSyncSets to only
// target specific cloud platforms.
HiveClusterPlatformLabel = "hive.openshift.io/cluster-platform"
// HiveClusterRegionLabel is a label that is applied to ClusterDeployments
// to denote which region the cluster was created in. This can be used in
// searching and filtering clusters, as well as in SelectorSyncSets to only
// target specific regions of the cluster-platform.
HiveClusterRegionLabel = "hive.openshift.io/cluster-region"
// FinalizerArgoCDCluster is used on ClusterDeployments to ensure we clean up the ArgoCD cluster
// secret before cleaning up the API object.
FinalizerArgoCDCluster = "hive.openshift.io/argocd-cluster"
)
// ClusterPowerState is used to indicate whether a cluster is running or in a
// hibernating state.
type ClusterPowerState string
const (
// ClusterPowerStateRunning is the default state of a cluster after it has
// been installed. All of its machines should be running.
ClusterPowerStateRunning ClusterPowerState = "Running"
// ClusterPowerStateHibernating indicates the machines belonging to a cluster
// are stopped.
ClusterPowerStateHibernating ClusterPowerState = "Hibernating"
// ClusterPowerStateSyncSetsNotApplied indicates SyncSets have not yet been applied
// for the cluster based on ClusterSync.Status.FirstSucessTime
ClusterPowerStateSyncSetsNotApplied ClusterPowerState = "SyncSetsNotApplied"
// ClusterPowerStateStartingMachines is used to reflect attempt to list and start cloud VMs
ClusterPowerStateStartingMachines ClusterPowerState = "StartingMachines"
// ClusterPowerStateFailedToStartMachines
ClusterPowerStateFailedToStartMachines ClusterPowerState = "FailedToStartMachines"
// ClusterPowerStateStopping indicates the cluster is transitioning
// from a Running state to a Hibernating state.
ClusterPowerStateStopping ClusterPowerState = "Stopping"
// ClusterPowerStateFailedToStop is used when there was an error stopping machines
// to enter hibernation
ClusterPowerStateFailedToStop ClusterPowerState = "FailedToStop"
// ClusterPowerStateWaitingForMachinesToStop is used when waiting for cloud VMs to stop
ClusterPowerStateWaitingForMachinesToStop ClusterPowerState = "WaitingForMachinesToStop"
// ClusterPowerStateWaitingForMachines is used when waiting for cloud VMs to start.
ClusterPowerStateWaitingForMachines ClusterPowerState = "WaitingForMachines"
// ClusterPowerStateWaitingForNodes is used when waiting for nodes to become Ready.
ClusterPowerStateWaitingForNodes ClusterPowerState = "WaitingForNodes"
// ClusterPowerStatePausingForClusterOperatorsToSettle is used when pausing to let ClusterOperators start and post new status before we check it.
ClusterPowerStatePausingForClusterOperatorsToSettle ClusterPowerState = "PausingForClusterOperatorsToSettle"
// ClusterPowerStateWaitingForClusterOperators is used when waiting for ClusterOperators to
// get to a good state. (Available=True, Processing=False, Degraded=False)
ClusterPowerStateWaitingForClusterOperators ClusterPowerState = "WaitingForClusterOperators"
// ClusterPowerStateUnknown indicates that we can't/won't discover the state of the cluster's cloud machines.
ClusterPowerStateUnknown = "Unknown"
)
// ClusterDeploymentSpec defines the desired state of ClusterDeployment
type ClusterDeploymentSpec struct {
// ClusterName is the friendly name of the cluster. It is used for subdomains,
// some resource tagging, and other instances where a friendly name for the
// cluster is useful.
// +required
ClusterName string `json:"clusterName"`
// BaseDomain is the base domain to which the cluster should belong.
// +required
BaseDomain string `json:"baseDomain"`
// Platform is the configuration for the specific platform upon which to
// perform the installation.
// +required
Platform Platform `json:"platform"`
// PullSecretRef is the reference to the secret to use when pulling images.
// +optional
PullSecretRef *corev1.LocalObjectReference `json:"pullSecretRef,omitempty"`
// PreserveOnDelete allows the user to disconnect a cluster from Hive without deprovisioning it. This can also be
// used to abandon ongoing cluster deprovision.
// +optional
PreserveOnDelete bool `json:"preserveOnDelete,omitempty"`
// ControlPlaneConfig contains additional configuration for the target cluster's control plane
// +optional
ControlPlaneConfig ControlPlaneConfigSpec `json:"controlPlaneConfig,omitempty"`
// Ingress allows defining desired clusteringress/shards to be configured on the cluster.
// +optional
Ingress []ClusterIngress `json:"ingress,omitempty"`
// CertificateBundles is a list of certificate bundles associated with this cluster
// +optional
CertificateBundles []CertificateBundleSpec `json:"certificateBundles,omitempty"`
// ManageDNS specifies whether a DNSZone should be created and managed automatically
// for this ClusterDeployment
// +optional
ManageDNS bool `json:"manageDNS,omitempty"`
// ClusterMetadata contains metadata information about the installed cluster.
ClusterMetadata *ClusterMetadata `json:"clusterMetadata,omitempty"`
// Installed is true if the cluster has been installed
// +optional
Installed bool `json:"installed"`
// Provisioning contains settings used only for initial cluster provisioning.
// May be unset in the case of adopted clusters.
Provisioning *Provisioning `json:"provisioning,omitempty"`
// ClusterInstallLocalReference provides reference to an object that implements
// the hivecontract ClusterInstall. The namespace of the object is same as the
// ClusterDeployment.
// This cannot be set when Provisioning is also set.
// +optional
ClusterInstallRef *ClusterInstallLocalReference `json:"clusterInstallRef,omitempty"`
// ClusterPoolRef is a reference to the ClusterPool that this ClusterDeployment originated from.
// +optional
ClusterPoolRef *ClusterPoolReference `json:"clusterPoolRef,omitempty"`
// PowerState indicates whether a cluster should be running or hibernating. When omitted,
// PowerState defaults to the Running state.
// +kubebuilder:validation:Enum="";Running;Hibernating
// +optional
PowerState ClusterPowerState `json:"powerState,omitempty"`
// HibernateAfter will transition a cluster to hibernating power state after it has been running for the
// given duration. The time that a cluster has been running is the time since the cluster was installed or the
// time since the cluster last came out of hibernation.
// This is a Duration value; see https://pkg.go.dev/time#ParseDuration for accepted formats.
// Note: due to discrepancies in validation vs parsing, we use a Pattern instead of `Format=duration`. See
// https://bugzilla.redhat.com/show_bug.cgi?id=2050332
// https://github.com/kubernetes/apimachinery/issues/131
// https://github.com/kubernetes/apiextensions-apiserver/issues/56
// +optional
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$"
HibernateAfter *metav1.Duration `json:"hibernateAfter,omitempty"`
// InstallAttemptsLimit is the maximum number of times Hive will attempt to install the cluster.
// +optional
InstallAttemptsLimit *int32 `json:"installAttemptsLimit,omitempty"`
// BoundServiceAccountSignkingKeySecretRef refers to a Secret that contains a
// 'bound-service-account-signing-key.key' data key pointing to the private
// key that will be used to sign ServiceAccount objects. Primarily used to
// provision AWS clusters to use Amazon's Security Token Service.
// +optional
BoundServiceAccountSignkingKeySecretRef *corev1.LocalObjectReference `json:"boundServiceAccountSigningKeySecretRef,omitempty"`
}
// ClusterInstallLocalReference provides reference to an object that implements
// the hivecontract ClusterInstall. The namespace of the object is same as the
// ClusterDeployment.
type ClusterInstallLocalReference struct {
Group string `json:"group"`
Version string `json:"version"`
Kind string `json:"kind"`
Name string `json:"name"`
}
// Provisioning contains settings used only for initial cluster provisioning.
type Provisioning struct {
// InstallConfigSecretRef is the reference to a secret that contains an openshift-install
// InstallConfig. This file will be passed through directly to the installer.
// Any version of InstallConfig can be used, provided it can be parsed by the openshift-install
// version for the release you are provisioning.
// +optional
InstallConfigSecretRef *corev1.LocalObjectReference `json:"installConfigSecretRef,omitempty"`
// ReleaseImage is the image containing metadata for all components that run in the cluster, and
// is the primary and best way to specify what specific version of OpenShift you wish to install.
ReleaseImage string `json:"releaseImage,omitempty"`
// InstallerImageOverride allows specifying a URI for the installer image, normally gleaned from
// the metadata within the ReleaseImage.
// +optional
InstallerImageOverride string `json:"installerImageOverride,omitempty"`
// ImageSetRef is a reference to a ClusterImageSet. If a value is specified for ReleaseImage,
// that will take precedence over the one from the ClusterImageSet.
ImageSetRef *ClusterImageSetReference `json:"imageSetRef,omitempty"`
// ManifestsConfigMapRef is a reference to user-provided manifests to
// add to or replace manifests that are generated by the installer.
ManifestsConfigMapRef *corev1.LocalObjectReference `json:"manifestsConfigMapRef,omitempty"`
// SSHPrivateKeySecretRef is the reference to the secret that contains the private SSH key to use
// for access to compute instances. This private key should correspond to the public key included
// in the InstallConfig. The private key is used by Hive to gather logs on the target cluster if
// there are install failures.
// The SSH private key is expected to be in the secret data under the "ssh-privatekey" key.
// +optional
SSHPrivateKeySecretRef *corev1.LocalObjectReference `json:"sshPrivateKeySecretRef,omitempty"`
// SSHKnownHosts are known hosts to be configured in the hive install manager pod to avoid ssh prompts.
// Use of ssh in the install pod is somewhat limited today (failure log gathering from cluster, some bare metal
// provisioning scenarios), so this setting is often not needed.
SSHKnownHosts []string `json:"sshKnownHosts,omitempty"`
// InstallerEnv are extra environment variables to pass through to the installer. This may be used to enable
// additional features of the installer.
// +optional
InstallerEnv []corev1.EnvVar `json:"installerEnv,omitempty"`
}
// ClusterImageSetReference is a reference to a ClusterImageSet
type ClusterImageSetReference struct {
// Name is the name of the ClusterImageSet that this refers to
Name string `json:"name"`
}
// ClusterPoolReference is a reference to a ClusterPool
type ClusterPoolReference struct {
// Namespace is the namespace where the ClusterPool resides.
Namespace string `json:"namespace"`
// PoolName is the name of the ClusterPool for which the cluster was created.
PoolName string `json:"poolName"`
// ClaimName is the name of the ClusterClaim that claimed the cluster from the pool.
// +optional
ClaimName string `json:"claimName,omitempty"`
// ClaimedTimestamp is the time this cluster was assigned to a ClusterClaim. This is only used for
// ClusterDeployments belonging to ClusterPools.
ClaimedTimestamp *metav1.Time `json:"claimedTimestamp,omitempty"`
}
// ClusterMetadata contains metadata information about the installed cluster.
type ClusterMetadata struct {
// ClusterID is a globally unique identifier for this cluster generated during installation. Used for reporting metrics among other places.
ClusterID string `json:"clusterID"`
// InfraID is an identifier for this cluster generated during installation and used for tagging/naming resources in cloud providers.
InfraID string `json:"infraID"`
// AdminKubeconfigSecretRef references the secret containing the admin kubeconfig for this cluster.
AdminKubeconfigSecretRef corev1.LocalObjectReference `json:"adminKubeconfigSecretRef"`
// AdminPasswordSecretRef references the secret containing the admin username/password which can be used to login to this cluster.
// +optional
AdminPasswordSecretRef *corev1.LocalObjectReference `json:"adminPasswordSecretRef,omitempty"`
}
// ClusterDeploymentStatus defines the observed state of ClusterDeployment
type ClusterDeploymentStatus struct {
// InstallRestarts is the total count of container restarts on the clusters install job.
InstallRestarts int `json:"installRestarts,omitempty"`
// APIURL is the URL where the cluster's API can be accessed.
APIURL string `json:"apiURL,omitempty"`
// WebConsoleURL is the URL for the cluster's web console UI.
WebConsoleURL string `json:"webConsoleURL,omitempty"`
// InstallerImage is the name of the installer image to use when installing the target cluster
// +optional
InstallerImage *string `json:"installerImage,omitempty"`
// InstallVersion is the version of OpenShift as reported by the release image
// resolved for the installation.
// +optional
InstallVersion *string `json:"installVersion,omitempty"`
// CLIImage is the name of the oc cli image to use when installing the target cluster
// +optional
CLIImage *string `json:"cliImage,omitempty"`
// Conditions includes more detailed status for the cluster deployment
// +optional
Conditions []ClusterDeploymentCondition `json:"conditions,omitempty"`
// CertificateBundles contains of the status of the certificate bundles associated with this cluster deployment.
// +optional
CertificateBundles []CertificateBundleStatus `json:"certificateBundles,omitempty"`
// TODO: Use of *Timestamp fields here is slightly off from latest API conventions,
// should use InstalledTime instead if we ever get to a V2 of the API.
// InstallStartedTimestamp is the time when all pre-requisites were met and cluster installation was launched.
InstallStartedTimestamp *metav1.Time `json:"installStartedTimestamp,omitempty"`
// InstalledTimestamp is the time we first detected that the cluster has been successfully installed.
InstalledTimestamp *metav1.Time `json:"installedTimestamp,omitempty"`
// PowerState indicates the powerstate of cluster
// +optional
PowerState ClusterPowerState `json:"powerState,omitempty"`
// ProvisionRef is a reference to the last ClusterProvision created for the deployment
// +optional
ProvisionRef *corev1.LocalObjectReference `json:"provisionRef,omitempty"`
// Platform contains the observed state for the specific platform upon which to
// perform the installation.
// +optional
Platform *PlatformStatus `json:"platformStatus,omitempty"`
}
// ClusterDeploymentCondition contains details for the current condition of a cluster deployment
type ClusterDeploymentCondition struct {
// Type is the type of the condition.
Type ClusterDeploymentConditionType `json:"type"`
// Status is the status of the condition.
Status corev1.ConditionStatus `json:"status"`
// LastProbeTime is the last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
// LastTransitionTime is the last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
// Reason is a unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty"`
}
// ClusterDeploymentConditionType is a valid value for ClusterDeploymentCondition.Type
type ClusterDeploymentConditionType string
const (
// InstallerImageResolutionFailedCondition is a condition that indicates whether the job
// to determine the installer image based on a release image was successful.
InstallerImageResolutionFailedCondition ClusterDeploymentConditionType = "InstallerImageResolutionFailed"
// ControlPlaneCertificateNotFoundCondition is set when a control plane certificate bundle
// is not available, preventing the target cluster's control plane from being configured with
// certificates.
ControlPlaneCertificateNotFoundCondition ClusterDeploymentConditionType = "ControlPlaneCertificateNotFound"
// IngressCertificateNotFoundCondition is a condition indicating that one of the CertificateBundle
// secrets required by an Ingress is not available.
IngressCertificateNotFoundCondition ClusterDeploymentConditionType = "IngressCertificateNotFound"
// UnreachableCondition indicates that Hive is unable to establish an API connection to the remote cluster.
UnreachableCondition ClusterDeploymentConditionType = "Unreachable"
// ActiveAPIURLOverrideCondition indicates that Hive is communicating with the remote cluster using the
// API URL override.
ActiveAPIURLOverrideCondition ClusterDeploymentConditionType = "ActiveAPIURLOverride"
// DNSNotReadyCondition indicates that the the DNSZone object created for the clusterDeployment
// (ie manageDNS==true) has not yet indicated that the DNS zone is successfully responding to queries.
DNSNotReadyCondition ClusterDeploymentConditionType = "DNSNotReady"
// InstallImagesResolvedCondition indicates that the the install images for the clusterDeployment
// have been not been resolved. This usually includes the installer and OpenShift cli images.
InstallImagesNotResolvedCondition ClusterDeploymentConditionType = "InstallImagesNotResolved"
// ProvisionFailedCondition indicates that a provision failed
ProvisionFailedCondition ClusterDeploymentConditionType = "ProvisionFailed"
// SyncSetFailedCondition indicates if any syncset for a cluster deployment failed
SyncSetFailedCondition ClusterDeploymentConditionType = "SyncSetFailed"
// RelocationFailedCondition indicates if a relocation to another Hive instance has failed
RelocationFailedCondition ClusterDeploymentConditionType = "RelocationFailed"
// ClusterHibernatingCondition is set when the ClusterDeployment is either
// transitioning to/from a hibernating state or is in a hibernating state.
ClusterHibernatingCondition ClusterDeploymentConditionType = "Hibernating"
// ClusterReadyCondition works in conjunction with ClusterHibernatingCondition and gives more information
// pertaining to the transition status of the cluster and whether it is running and ready
ClusterReadyCondition ClusterDeploymentConditionType = "Ready"
// InstallLaunchErrorCondition is set when a cluster provision fails to launch an install pod
InstallLaunchErrorCondition ClusterDeploymentConditionType = "InstallLaunchError"
// DeprovisionLaunchErrorCondition is set when a cluster deprovision fails to launch.
DeprovisionLaunchErrorCondition ClusterDeploymentConditionType = "DeprovisionLaunchError"
// ProvisionStoppedCondition is set when cluster provisioning is stopped.
// This indicates that at least one provision attempt was made, but there will be no further
// retries (without InstallAttemptsLimit changes or other hive configuration stopping further retries).
ProvisionStoppedCondition ClusterDeploymentConditionType = "ProvisionStopped"
// Provisioned is True when a cluster is installed; False while it is provisioning or deprovisioning.
// The Reason indicates where it is in that lifecycle.
ProvisionedCondition ClusterDeploymentConditionType = "Provisioned"
// RequirementsMetCondition is set True when all pre-provision requirements have been met,
// and the controllers can begin the cluster install.
RequirementsMetCondition ClusterDeploymentConditionType = "RequirementsMet"
// AuthenticationFailureCondition is true when platform credentials cannot be used because of authentication failure
AuthenticationFailureClusterDeploymentCondition ClusterDeploymentConditionType = "AuthenticationFailure"
// AWSPrivateLinkReadyClusterDeploymentCondition is true when private link access has been
// setup for the cluster.
AWSPrivateLinkReadyClusterDeploymentCondition ClusterDeploymentConditionType = "AWSPrivateLinkReady"
// AWSPrivateLinkFailedClusterDeploymentCondition is true controller fails to setup private link access
// for the cluster.
AWSPrivateLinkFailedClusterDeploymentCondition ClusterDeploymentConditionType = "AWSPrivateLinkFailed"
// These are conditions that are copied from ClusterInstall on to the ClusterDeployment object.
ClusterInstallFailedClusterDeploymentCondition ClusterDeploymentConditionType = "ClusterInstallFailed"
ClusterInstallCompletedClusterDeploymentCondition ClusterDeploymentConditionType = "ClusterInstallCompleted"
ClusterInstallStoppedClusterDeploymentCondition ClusterDeploymentConditionType = "ClusterInstallStopped"
ClusterInstallRequirementsMetClusterDeploymentCondition ClusterDeploymentConditionType = "ClusterInstallRequirementsMet"
)
// PositivePolarityClusterDeploymentConditions is a slice containing all condition types with positive polarity
// For controllers that handle these conditions, the desired state is True
// All cluster deployment condition types that are not in this slice are assumed to have negative polarity
var PositivePolarityClusterDeploymentConditions = []ClusterDeploymentConditionType{
ActiveAPIURLOverrideCondition,
ClusterHibernatingCondition,
ClusterReadyCondition,
AWSPrivateLinkReadyClusterDeploymentCondition,
ClusterInstallCompletedClusterDeploymentCondition,
ClusterInstallRequirementsMetClusterDeploymentCondition,
RequirementsMetCondition,
ProvisionedCondition,
}
// Cluster hibernating and ready reasons
const (
// HibernatingReasonResumingOrRunning is used as the reason for the Hibernating condition when the cluster
// is resuming or running. Precise details are available in the Ready condition.
HibernatingReasonResumingOrRunning = "ResumingOrRunning"
// HibernatingReasonStopping is used as the reason when the cluster is transitioning
// from a Running state to a Hibernating state.
HibernatingReasonStopping = string(ClusterPowerStateStopping)
// HibernatingReasonWaitingForMachinesToStop is used on the Hibernating condition when waiting for cloud VMs to stop
HibernatingReasonWaitingForMachinesToStop = string(ClusterPowerStateWaitingForMachinesToStop)
// HibernatingReasonHibernating is used as the reason when the cluster is in a
// Hibernating state.
HibernatingReasonHibernating = string(ClusterPowerStateHibernating)
// HibernatingReasonUnsupported is used as the reason when the cluster spec
// specifies that the cluster be moved to a Hibernating state, but either the cluster
// version is not compatible with hibernation (< 4.4.8) or the cloud provider of
// the cluster is not supported.
HibernatingReasonUnsupported = "Unsupported"
// HibernatingReasonFailedToStop is used when there was an error stopping machines
// to enter hibernation
HibernatingReasonFailedToStop = string(ClusterPowerStateFailedToStop)
// HibernatingReasonSyncSetsNotApplied is used as the reason when SyncSets have not yet been applied
// for the cluster based on ClusterSync.Status.FirstSucessTime
HibernatingReasonSyncSetsNotApplied = string(ClusterPowerStateSyncSetsNotApplied)
// HibernatingReasonSyncSetsApplied means SyncSets have been successfully applied at some point.
// (It does not necessarily mean they are currently copacetic -- check ClusterSync status
// for that.)
HibernatingReasonSyncSetsApplied = "SyncSetsApplied"
// HibernatingReasonPowerStatePaused indicates that we can't/won't discover the state of the
// cluster's cloud machines because the powerstate-paused annotation is set.
HibernatingReasonPowerStatePaused = "PowerStatePaused"
// ReadyReasonStoppingOrHibernating is used as the reason for the Ready condition when the cluster
// is stopping or hibernating. Precise details are available in the Hibernating condition.
ReadyReasonStoppingOrHibernating = "StoppingOrHibernating"
// ReadyReasonStartingMachines is used to reflect attempt to list and start cloud VMs
ReadyReasonStartingMachines = string(ClusterPowerStateStartingMachines)
// ReadyReasonFailedToStartMachines is used when there was an error starting machines
// to leave hibernation
ReadyReasonFailedToStartMachines = string(ClusterPowerStateFailedToStartMachines)
// ReadyReasonWaitingForMachines is used on the Ready condition when waiting for cloud VMs to start.
ReadyReasonWaitingForMachines = string(ClusterPowerStateWaitingForMachines)
// ReadyReasonWaitingForNodes is used on the Ready condition when waiting for nodes to become Ready.
ReadyReasonWaitingForNodes = string(ClusterPowerStateWaitingForNodes)
// ReadyReasonPausingForClusterOperatorsToSettle is used on the Ready condition when pausing to let ClusterOperators start and post new status before we check it.
ReadyReasonPausingForClusterOperatorsToSettle = string(ClusterPowerStatePausingForClusterOperatorsToSettle)
// ReadyReasonWaitingForClusterOperators is used on the Ready condition when waiting for ClusterOperators to
// get to a good state. (Available=True, Processing=False, Degraded=False)
ReadyReasonWaitingForClusterOperators = string(ClusterPowerStateWaitingForClusterOperators)
// ReadyReasonRunning is used on the Ready condition as the reason when the cluster is running and ready
ReadyReasonRunning = string(ClusterPowerStateRunning)
// ReadyReasonPowerStatePaused indicates that we can't/won't discover the state of the
// cluster's cloud machines because the powerstate-paused annotation is set.
ReadyReasonPowerStatePaused = "PowerStatePaused"
)
// Provisioned status condition reasons
const (
// ProvisionedReasonProvisioning is set while the cluster is still provisioning.
ProvisionedReasonProvisioning = "Provisioning"
// ProvisionedReasonProvisionStopped means cluster provisioning is stopped. The ProvisionStopped condition may contain more detail.
ProvisionedReasonProvisionStopped = "ProvisionStopped"
// ProvisionedReasonProvisioned is set when the provision is successful.
ProvisionedReasonProvisioned = "Provisioned"
// ProvisionedReasonDeprovisioning is set when we start to deprovision the cluster.
ProvisionedReasonDeprovisioning = "Deprovisioning"
// ProvisionedReasonDeprovisionFailed means the deprovision failed terminally.
ProvisionedReasonDeprovisionFailed = "DeprovisionFailed"
// ProvisionedReasonDeprovisioned is set when the cluster has been successfully deprovisioned
ProvisionedReasonDeprovisioned = "Deprovisioned"
)
// InitializedConditionReason is used when a condition is initialized for the first time, and the status of the
// condition is still Unknown
const InitializedConditionReason = "Initialized"
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterDeployment is the Schema for the clusterdeployments API
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="InfraID",type="string",JSONPath=".spec.clusterMetadata.infraID"
// +kubebuilder:printcolumn:name="Platform",type="string",JSONPath=".metadata.labels.hive\\.openshift\\.io/cluster-platform"
// +kubebuilder:printcolumn:name="Region",type="string",JSONPath=".metadata.labels.hive\\.openshift\\.io/cluster-region"
// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".metadata.labels.hive\\.openshift\\.io/version-major-minor-patch"
// +kubebuilder:printcolumn:name="ClusterType",type="string",JSONPath=".metadata.labels.hive\\.openshift\\.io/cluster-type"
// +kubebuilder:printcolumn:name="ProvisionStatus",type="string",JSONPath=".status.conditions[?(@.type=='Provisioned')].reason"
// +kubebuilder:printcolumn:name="PowerState",type="string",JSONPath=".status.powerState"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:resource:path=clusterdeployments,shortName=cd,scope=Namespaced
type ClusterDeployment struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ClusterDeploymentSpec `json:"spec,omitempty"`
Status ClusterDeploymentStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterDeploymentList contains a list of ClusterDeployment
type ClusterDeploymentList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ClusterDeployment `json:"items"`
}
// Platform is the configuration for the specific platform upon which to perform
// the installation. Only one of the platform configuration should be set.
type Platform struct {
// AlibabaCloud is the configuration used when installing on Alibaba Cloud
AlibabaCloud *alibabacloud.Platform `json:"alibabacloud,omitempty"`
// AWS is the configuration used when installing on AWS.
AWS *aws.Platform `json:"aws,omitempty"`
// Azure is the configuration used when installing on Azure.
// +optional
Azure *azure.Platform `json:"azure,omitempty"`
// BareMetal is the configuration used when installing on bare metal.
BareMetal *baremetal.Platform `json:"baremetal,omitempty"`
// GCP is the configuration used when installing on Google Cloud Platform.
// +optional
GCP *gcp.Platform `json:"gcp,omitempty"`
// OpenStack is the configuration used when installing on OpenStack
OpenStack *openstack.Platform `json:"openstack,omitempty"`
// VSphere is the configuration used when installing on vSphere
VSphere *vsphere.Platform `json:"vsphere,omitempty"`
// Ovirt is the configuration used when installing on oVirt
Ovirt *ovirt.Platform `json:"ovirt,omitempty"`
// AgentBareMetal is the configuration used when performing an Assisted Agent based installation
// to bare metal.
AgentBareMetal *agent.BareMetalPlatform `json:"agentBareMetal,omitempty"`
// IBMCloud is the configuration used when installing on IBM Cloud
IBMCloud *ibmcloud.Platform `json:"ibmcloud,omitempty"`
// None indicates platform-agnostic install.
// https://docs.openshift.com/container-platform/4.7/installing/installing_platform_agnostic/installing-platform-agnostic.html
None *none.Platform `json:"none,omitempty"`
}
// PlatformStatus contains the observed state for the specific platform upon which to
// perform the installation
type PlatformStatus struct {
// AWS is the observed state on AWS.
AWS *aws.PlatformStatus `json:"aws,omitempty"`
}
// ClusterIngress contains the configurable pieces for any ClusterIngress objects
// that should exist on the cluster.
type ClusterIngress struct {
// Name of the ClusterIngress object to create.
// +required
Name string `json:"name"`
// Domain (sometimes referred to as shard) is the full DNS suffix that the resulting
// IngressController object will service (eg abcd.mycluster.mydomain.com).
// +required
Domain string `json:"domain"`
// NamespaceSelector allows filtering the list of namespaces serviced by the
// ingress controller.
// +optional
NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"`
// RouteSelector allows filtering the set of Routes serviced by the ingress controller
// +optional
RouteSelector *metav1.LabelSelector `json:"routeSelector,omitempty"`
// ServingCertificate references a CertificateBundle in the ClusterDeployment.Spec that
// should be used for this Ingress
// +optional
ServingCertificate string `json:"servingCertificate,omitempty"`
}
// ControlPlaneConfigSpec contains additional configuration settings for a target
// cluster's control plane.
type ControlPlaneConfigSpec struct {
// ServingCertificates specifies serving certificates for the control plane
// +optional
ServingCertificates ControlPlaneServingCertificateSpec `json:"servingCertificates,omitempty"`
// APIURLOverride is the optional URL override to which Hive will transition for communication with the API
// server of the remote cluster. When a remote cluster is created, Hive will initially communicate using the
// API URL established during installation. If an API URL Override is specified, Hive will periodically attempt
// to connect to the remote cluster using the override URL. Once Hive has determined that the override URL is
// active, Hive will use the override URL for further communications with the API server of the remote cluster.
// +optional
APIURLOverride string `json:"apiURLOverride,omitempty"`
}
// ControlPlaneServingCertificateSpec specifies serving certificate settings for
// the control plane of the target cluster.
type ControlPlaneServingCertificateSpec struct {
// Default references the name of a CertificateBundle in the ClusterDeployment that should be
// used for the control plane's default endpoint.
// +optional
Default string `json:"default,omitempty"`
// Additional is a list of additional domains and certificates that are also associated with
// the control plane's api endpoint.
// +optional
Additional []ControlPlaneAdditionalCertificate `json:"additional,omitempty"`
}
// ControlPlaneAdditionalCertificate defines an additional serving certificate for a control plane
type ControlPlaneAdditionalCertificate struct {
// Name references a CertificateBundle in the ClusterDeployment.Spec that should be
// used for this additional certificate.
Name string `json:"name"`
// Domain is the domain of the additional control plane certificate
Domain string `json:"domain"`
}
// CertificateBundleSpec specifies a certificate bundle associated with a cluster deployment
type CertificateBundleSpec struct {
// Name is an identifier that must be unique within the bundle and must be referenced by
// an ingress or by the control plane serving certs
// +required
Name string `json:"name"`
// Generate indicates whether this bundle should have real certificates generated for it.
// +optional
Generate bool `json:"generate,omitempty"`
// CertificateSecretRef is the reference to the secret that contains the certificate bundle. If
// the certificate bundle is to be generated, it will be generated with the name in this
// reference. Otherwise, it is expected that the secret should exist in the same namespace
// as the ClusterDeployment
CertificateSecretRef corev1.LocalObjectReference `json:"certificateSecretRef"`
}
// CertificateBundleStatus specifies whether a certificate bundle was generated for this
// cluster deployment.
type CertificateBundleStatus struct {
// Name of the certificate bundle
Name string `json:"name"`
// Generated indicates whether the certificate bundle was generated
Generated bool `json:"generated"`
}
// RelocateStatus is the status of a cluster relocate.
// This is used in the value of the "hive.openshift.io/relocate" annotation.
type RelocateStatus string
const (
// RelocateOutgoing indicates that a resource is on the source side of an in-progress relocate
RelocateOutgoing RelocateStatus = "outgoing"
// RelocateComplete indicates that a resource is on the source side of a completed relocate
RelocateComplete RelocateStatus = "complete"
// RelocateIncoming indicates that a resource is on the destination side of an in-progress relocate
RelocateIncoming RelocateStatus = "incoming"
)
func init() {
SchemeBuilder.Register(&ClusterDeployment{}, &ClusterDeploymentList{})
}

209
vendor/github.com/openshift/hive/apis/hive/v1/clusterdeprovision_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,209 @@
package v1
import (
"github.com/openshift/hive/apis/hive/v1/aws"
"github.com/openshift/hive/apis/hive/v1/azure"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ClusterDeprovisionSpec defines the desired state of ClusterDeprovision
type ClusterDeprovisionSpec struct {
// InfraID is the identifier generated during installation for a cluster. It is used for tagging/naming resources in cloud providers.
InfraID string `json:"infraID"`
// ClusterID is a globally unique identifier for the cluster to deprovision. It will be used if specified.
ClusterID string `json:"clusterID,omitempty"`
// ClusterName is the friendly name of the cluster. It is used for subdomains,
// some resource tagging, and other instances where a friendly name for the
// cluster is useful.
ClusterName string `json:"clusterName,omitempty"`
// Platform contains platform-specific configuration for a ClusterDeprovision
Platform ClusterDeprovisionPlatform `json:"platform,omitempty"`
}
// ClusterDeprovisionStatus defines the observed state of ClusterDeprovision
type ClusterDeprovisionStatus struct {
// Completed is true when the uninstall has completed successfully
Completed bool `json:"completed,omitempty"`
// Conditions includes more detailed status for the cluster deprovision
// +optional
Conditions []ClusterDeprovisionCondition `json:"conditions,omitempty"`
}
// ClusterDeprovisionPlatform contains platform-specific configuration for the
// deprovision
type ClusterDeprovisionPlatform struct {
// AlibabaCloud contains Alibaba Cloud specific deprovision settings
AlibabaCloud *AlibabaCloudClusterDeprovision `json:"alibabacloud,omitempty"`
// AWS contains AWS-specific deprovision settings
AWS *AWSClusterDeprovision `json:"aws,omitempty"`
// Azure contains Azure-specific deprovision settings
Azure *AzureClusterDeprovision `json:"azure,omitempty"`
// GCP contains GCP-specific deprovision settings
GCP *GCPClusterDeprovision `json:"gcp,omitempty"`
// OpenStack contains OpenStack-specific deprovision settings
OpenStack *OpenStackClusterDeprovision `json:"openstack,omitempty"`
// VSphere contains VMWare vSphere-specific deprovision settings
VSphere *VSphereClusterDeprovision `json:"vsphere,omitempty"`
// Ovirt contains oVirt-specific deprovision settings
Ovirt *OvirtClusterDeprovision `json:"ovirt,omitempty"`
// IBMCloud contains IBM Cloud specific deprovision settings
IBMCloud *IBMClusterDeprovision `json:"ibmcloud,omitempty"`
}
// AlibabaCloudClusterDeprovision contains AlibabaCloud-specific configuration for a ClusterDeprovision
type AlibabaCloudClusterDeprovision struct {
// Region is the Alibaba region for this deprovision
Region string `json:"region"`
// BaseDomain is the DNS base domain
BaseDomain string `json:"baseDomain"`
// CredentialsSecretRef is the Alibaba account credentials to use for deprovisioning the cluster
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
}
// AWSClusterDeprovision contains AWS-specific configuration for a ClusterDeprovision
type AWSClusterDeprovision struct {
// Region is the AWS region for this deprovisioning
Region string `json:"region"`
// CredentialsSecretRef is the AWS account credentials to use for deprovisioning the cluster
// +optional
CredentialsSecretRef *corev1.LocalObjectReference `json:"credentialsSecretRef,omitempty"`
// CredentialsAssumeRole refers to the IAM role that must be assumed to obtain
// AWS account access for deprovisioning the cluster.
// +optional
CredentialsAssumeRole *aws.AssumeRole `json:"credentialsAssumeRole,omitempty"`
}
// AzureClusterDeprovision contains Azure-specific configuration for a ClusterDeprovision
type AzureClusterDeprovision struct {
// CredentialsSecretRef is the Azure account credentials to use for deprovisioning the cluster
CredentialsSecretRef *corev1.LocalObjectReference `json:"credentialsSecretRef,omitempty"`
// cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK
// with the appropriate Azure API endpoints.
// If empty, the value is equal to "AzurePublicCloud".
// +optional
CloudName *azure.CloudEnvironment `json:"cloudName,omitempty"`
}
// GCPClusterDeprovision contains GCP-specific configuration for a ClusterDeprovision
type GCPClusterDeprovision struct {
// Region is the GCP region for this deprovision
Region string `json:"region"`
// CredentialsSecretRef is the GCP account credentials to use for deprovisioning the cluster
CredentialsSecretRef *corev1.LocalObjectReference `json:"credentialsSecretRef,omitempty"`
}
// OpenStackClusterDeprovision contains OpenStack-specific configuration for a ClusterDeprovision
type OpenStackClusterDeprovision struct {
// Cloud is the secion in the clouds.yaml secret below to use for auth/connectivity.
Cloud string `json:"cloud"`
// CredentialsSecretRef is the OpenStack account credentials to use for deprovisioning the cluster
CredentialsSecretRef *corev1.LocalObjectReference `json:"credentialsSecretRef,omitempty"`
// CertificatesSecretRef refers to a secret that contains CA certificates
// necessary for communicating with the OpenStack.
//
// +optional
CertificatesSecretRef *corev1.LocalObjectReference `json:"certificatesSecretRef,omitempty"`
}
// VSphereClusterDeprovision contains VMware vSphere-specific configuration for a ClusterDeprovision
type VSphereClusterDeprovision struct {
// CredentialsSecretRef is the vSphere account credentials to use for deprovisioning the cluster
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
// CertificatesSecretRef refers to a secret that contains the vSphere CA certificates
// necessary for communicating with the VCenter.
CertificatesSecretRef corev1.LocalObjectReference `json:"certificatesSecretRef"`
// VCenter is the vSphere vCenter hostname.
VCenter string `json:"vCenter"`
}
// OvirtClusterDeprovision contains oVirt-specific configuration for a ClusterDeprovision
type OvirtClusterDeprovision struct {
// The oVirt cluster ID
ClusterID string `json:"clusterID"`
// CredentialsSecretRef is the oVirt account credentials to use for deprovisioning the cluster
// secret fields: ovirt_url, ovirt_username, ovirt_password, ovirt_ca_bundle
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
// CertificatesSecretRef refers to a secret that contains the oVirt CA certificates
// necessary for communicating with the oVirt.
CertificatesSecretRef corev1.LocalObjectReference `json:"certificatesSecretRef"`
}
// IBMClusterDeprovision contains IBM Cloud specific configuration for a ClusterDeprovision
type IBMClusterDeprovision struct {
// CredentialsSecretRef is the IBM Cloud credentials to use for deprovisioning the cluster
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
// Region specifies the IBM Cloud region
Region string `json:"region"`
// BaseDomain is the DNS base domain
BaseDomain string `json:"baseDomain"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterDeprovision is the Schema for the clusterdeprovisions API
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="InfraID",type="string",JSONPath=".spec.infraID"
// +kubebuilder:printcolumn:name="ClusterID",type="string",JSONPath=".spec.clusterID"
// +kubebuilder:printcolumn:name="Completed",type="boolean",JSONPath=".status.completed"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:resource:path=clusterdeprovisions,shortName=cdr,scope=Namespaced
type ClusterDeprovision struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ClusterDeprovisionSpec `json:"spec,omitempty"`
Status ClusterDeprovisionStatus `json:"status,omitempty"`
}
// ClusterDeprovisionCondition contains details for the current condition of a ClusterDeprovision
type ClusterDeprovisionCondition struct {
// Type is the type of the condition.
Type ClusterDeprovisionConditionType `json:"type"`
// Status is the status of the condition.
Status corev1.ConditionStatus `json:"status"`
// LastProbeTime is the last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
// LastTransitionTime is the last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
// Reason is a unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty"`
}
// ClusterDeprovisionConditionType is a valid value for ClusterDeprovisionCondition.Type
type ClusterDeprovisionConditionType string
const (
// AuthenticationFailureClusterDeprovisionCondition is true when credentials cannot be used because of authentication failure
AuthenticationFailureClusterDeprovisionCondition ClusterDeprovisionConditionType = "AuthenticationFailure"
// DeprovisionFailedClusterDeprovisionCondition is true when deprovision attempt failed
DeprovisionFailedClusterDeprovisionCondition ClusterDeprovisionConditionType = "DeprovisionFailed"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterDeprovisionList contains a list of ClusterDeprovision
type ClusterDeprovisionList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ClusterDeprovision `json:"items"`
}
func init() {
SchemeBuilder.Register(&ClusterDeprovision{}, &ClusterDeprovisionList{})
}

46
vendor/github.com/openshift/hive/apis/hive/v1/clusterimageset_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,46 @@
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ClusterImageSetSpec defines the desired state of ClusterImageSet
type ClusterImageSetSpec struct {
// ReleaseImage is the image that contains the payload to use when installing
// a cluster.
ReleaseImage string `json:"releaseImage"`
}
// ClusterImageSetStatus defines the observed state of ClusterImageSet
type ClusterImageSetStatus struct{}
// +genclient:nonNamespaced
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterImageSet is the Schema for the clusterimagesets API
// +k8s:openapi-gen=true
// +kubebuilder:resource:scope=Cluster
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Release",type="string",JSONPath=".spec.releaseImage"
// +kubebuilder:resource:path=clusterimagesets,shortName=imgset,scope=Cluster
type ClusterImageSet struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ClusterImageSetSpec `json:"spec,omitempty"`
Status ClusterImageSetStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterImageSetList contains a list of ClusterImageSet
type ClusterImageSetList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ClusterImageSet `json:"items"`
}
func init() {
SchemeBuilder.Register(&ClusterImageSet{}, &ClusterImageSetList{})
}

46
vendor/github.com/openshift/hive/apis/hive/v1/clusterinstall_conditions.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,46 @@
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Common types that can be used by all ClusterInstall implementations.
// ClusterInstallCondition contains details for the current condition of a cluster install.
type ClusterInstallCondition struct {
// Type is the type of the condition.
Type string `json:"type"`
// Status is the status of the condition.
Status corev1.ConditionStatus `json:"status"`
// LastProbeTime is the last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
// LastTransitionTime is the last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
// Reason is a unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty"`
}
const (
// ClusterInstallRequirementsMet is True when all pre-install requirements have been met.
ClusterInstallRequirementsMet = "RequirementsMet"
// ClusterInstallCompleted is True when the requested install has been completed successfully.
ClusterInstallCompleted = "Completed"
// ClusterInstallFailed is True when an attempt to install the cluster has failed.
// The ClusterInstall controllers may still be retrying if supported, and this condition will
// go back to False if a later attempt succeeds.
ClusterInstallFailed = "Failed"
// ClusterInstallStopped is True the controllers are no longer working on this
// ClusterInstall. Combine with Completed or Failed to know if the overall request was
// successful or not.
ClusterInstallStopped = "Stopped"
)

225
vendor/github.com/openshift/hive/apis/hive/v1/clusterpool_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,225 @@
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ClusterPoolSpec defines the desired state of the ClusterPool.
type ClusterPoolSpec struct {
// Platform encompasses the desired platform for the cluster.
// +required
Platform Platform `json:"platform"`
// PullSecretRef is the reference to the secret to use when pulling images.
// +optional
PullSecretRef *corev1.LocalObjectReference `json:"pullSecretRef,omitempty"`
// Size is the default number of clusters that we should keep provisioned and waiting for use.
// +kubebuilder:validation:Minimum=0
// +required
Size int32 `json:"size"`
// RunningCount is the number of clusters we should keep running. The remainder will be kept hibernated until claimed.
// By default no clusters will be kept running (all will be hibernated).
// +kubebuilder:validation:Minimum=0
// +optional
RunningCount int32 `json:"runningCount,omitempty"`
// MaxSize is the maximum number of clusters that will be provisioned including clusters that have been claimed
// and ones waiting to be used.
// By default there is no limit.
// +optional
MaxSize *int32 `json:"maxSize,omitempty"`
// MaxConcurrent is the maximum number of clusters that will be provisioned or deprovisioned at an time. This includes the
// claimed clusters being deprovisioned.
// By default there is no limit.
// +optional
MaxConcurrent *int32 `json:"maxConcurrent,omitempty"`
// BaseDomain is the base domain to use for all clusters created in this pool.
// +required
BaseDomain string `json:"baseDomain"`
// ImageSetRef is a reference to a ClusterImageSet. The release image specified in the ClusterImageSet will be used
// by clusters created for this cluster pool.
ImageSetRef ClusterImageSetReference `json:"imageSetRef"`
// Labels to be applied to new ClusterDeployments created for the pool. ClusterDeployments that have already been
// claimed will not be affected when this value is modified.
// +optional
Labels map[string]string `json:"labels,omitempty"`
// Annotations to be applied to new ClusterDeployments created for the pool. ClusterDeployments that have already been
// claimed will not be affected when this value is modified.
// +optional
Annotations map[string]string `json:"annotations,omitempty"`
// InstallConfigSecretTemplateRef is a secret with the key install-config.yaml consisting of the content of the install-config.yaml
// to be used as a template for all clusters in this pool.
// Cluster specific settings (name, basedomain) will be injected dynamically when the ClusterDeployment install-config Secret is generated.
// +optional
InstallConfigSecretTemplateRef *corev1.LocalObjectReference `json:"installConfigSecretTemplateRef,omitempty"`
// HibernateAfter will be applied to new ClusterDeployments created for the pool. HibernateAfter will transition
// clusters in the clusterpool to hibernating power state after it has been running for the given duration. The time
// that a cluster has been running is the time since the cluster was installed or the time since the cluster last came
// out of hibernation.
// This is a Duration value; see https://pkg.go.dev/time#ParseDuration for accepted formats.
// Note: due to discrepancies in validation vs parsing, we use a Pattern instead of `Format=duration`. See
// https://bugzilla.redhat.com/show_bug.cgi?id=2050332
// https://github.com/kubernetes/apimachinery/issues/131
// https://github.com/kubernetes/apiextensions-apiserver/issues/56
// +optional
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$"
HibernateAfter *metav1.Duration `json:"hibernateAfter,omitempty"`
// InstallAttemptsLimit is the maximum number of times Hive will attempt to install the cluster.
// +optional
InstallAttemptsLimit *int32 `json:"installAttemptsLimit,omitempty"`
// SkipMachinePools allows creating clusterpools where the machinepools are not managed by hive after cluster creation
// +optional
SkipMachinePools bool `json:"skipMachinePools,omitempty"`
// ClaimLifetime defines the lifetimes for claims for the cluster pool.
// +optional
ClaimLifetime *ClusterPoolClaimLifetime `json:"claimLifetime,omitempty"`
// HibernationConfig configures the hibernation/resume behavior of ClusterDeployments owned by the ClusterPool.
// +optional
HibernationConfig *HibernationConfig `json:"hibernationConfig"`
}
type HibernationConfig struct {
// ResumeTimeout is the maximum amount of time we will wait for an unclaimed ClusterDeployment to resume from
// hibernation (e.g. at the behest of runningCount, or in preparation for being claimed). If this time is
// exceeded, the ClusterDeployment will be considered Broken and we will replace it. The default (unspecified
// or zero) means no timeout -- we will allow the ClusterDeployment to continue trying to resume "forever".
// This is a Duration value; see https://pkg.go.dev/time#ParseDuration for accepted formats.
// Note: due to discrepancies in validation vs parsing, we use a Pattern instead of `Format=duration`. See
// https://bugzilla.redhat.com/show_bug.cgi?id=2050332
// https://github.com/kubernetes/apimachinery/issues/131
// https://github.com/kubernetes/apiextensions-apiserver/issues/56
// +optional
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$"
ResumeTimeout metav1.Duration `json:"resumeTimeout"`
}
// ClusterPoolClaimLifetime defines the lifetimes for claims for the cluster pool.
type ClusterPoolClaimLifetime struct {
// Default is the default lifetime of the claim when no lifetime is set on the claim itself.
// This is a Duration value; see https://pkg.go.dev/time#ParseDuration for accepted formats.
// Note: due to discrepancies in validation vs parsing, we use a Pattern instead of `Format=duration`. See
// https://bugzilla.redhat.com/show_bug.cgi?id=2050332
// https://github.com/kubernetes/apimachinery/issues/131
// https://github.com/kubernetes/apiextensions-apiserver/issues/56
// +optional
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$"
Default *metav1.Duration `json:"default,omitempty"`
// Maximum is the maximum lifetime of the claim after it is assigned a cluster. If the claim still exists
// when the lifetime has elapsed, the claim will be deleted by Hive.
// The lifetime of a claim is the mimimum of the lifetimes set by the cluster pool and the claim itself.
// This is a Duration value; see https://pkg.go.dev/time#ParseDuration for accepted formats.
// Note: due to discrepancies in validation vs parsing, we use a Pattern instead of `Format=duration`. See
// https://bugzilla.redhat.com/show_bug.cgi?id=2050332
// https://github.com/kubernetes/apimachinery/issues/131
// https://github.com/kubernetes/apiextensions-apiserver/issues/56
// +optional
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$"
Maximum *metav1.Duration `json:"maximum,omitempty"`
}
// ClusterPoolStatus defines the observed state of ClusterPool
type ClusterPoolStatus struct {
// Size is the number of unclaimed clusters that have been created for the pool.
Size int32 `json:"size"`
// Standby is the number of unclaimed clusters that are installed, but not running.
// +optional
Standby int32 `json:"standby"`
// Ready is the number of unclaimed clusters that are installed and are running and ready to be claimed.
Ready int32 `json:"ready"`
// Conditions includes more detailed status for the cluster pool
// +optional
Conditions []ClusterPoolCondition `json:"conditions,omitempty"`
}
// ClusterPoolCondition contains details for the current condition of a cluster pool
type ClusterPoolCondition struct {
// Type is the type of the condition.
Type ClusterPoolConditionType `json:"type"`
// Status is the status of the condition.
Status corev1.ConditionStatus `json:"status"`
// LastProbeTime is the last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
// LastTransitionTime is the last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
// Reason is a unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty"`
}
// ClusterPoolConditionType is a valid value for ClusterPoolCondition.Type
type ClusterPoolConditionType string
const (
// ClusterPoolMissingDependenciesCondition is set when a cluster pool is missing dependencies required to create a
// cluster. Dependencies include resources such as the ClusterImageSet and the credentials Secret.
ClusterPoolMissingDependenciesCondition ClusterPoolConditionType = "MissingDependencies"
// ClusterPoolCapacityAvailableCondition is set to provide information on whether the cluster pool has capacity
// available to create more clusters for the pool.
ClusterPoolCapacityAvailableCondition ClusterPoolConditionType = "CapacityAvailable"
// ClusterPoolAllClustersCurrentCondition indicates whether all unassigned (installing or ready)
// ClusterDeployments in the pool match the current configuration of the ClusterPool.
ClusterPoolAllClustersCurrentCondition ClusterPoolConditionType = "AllClustersCurrent"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterPool represents a pool of clusters that should be kept ready to be given out to users. Clusters are removed
// from the pool once claimed and then automatically replaced with a new one.
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:subresource:scale:specpath=.spec.size,statuspath=.status.size
// +kubebuilder:printcolumn:name="Size",type="string",JSONPath=".spec.size"
// +kubebuilder:printcolumn:name="Standby",type="string",JSONPath=".status.standby"
// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready"
// +kubebuilder:printcolumn:name="BaseDomain",type="string",JSONPath=".spec.baseDomain"
// +kubebuilder:printcolumn:name="ImageSet",type="string",JSONPath=".spec.imageSetRef.name"
// +kubebuilder:resource:path=clusterpools,shortName=cp
type ClusterPool struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ClusterPoolSpec `json:"spec"`
Status ClusterPoolStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterPoolList contains a list of ClusterPools
type ClusterPoolList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ClusterPool `json:"items"`
}
func init() {
SchemeBuilder.Register(&ClusterPool{}, &ClusterPoolList{})
}

146
vendor/github.com/openshift/hive/apis/hive/v1/clusterprovision_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,146 @@
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// ClusterProvisionSpec defines the results of provisioning a cluster.
type ClusterProvisionSpec struct {
// ClusterDeploymentRef references the cluster deployment provisioned.
ClusterDeploymentRef corev1.LocalObjectReference `json:"clusterDeploymentRef"`
// PodSpec is the spec to use for the installer pod.
PodSpec corev1.PodSpec `json:"podSpec"`
// Attempt is which attempt number of the cluster deployment that this ClusterProvision is
Attempt int `json:"attempt"`
// Stage is the stage of provisioning that the cluster deployment has reached.
Stage ClusterProvisionStage `json:"stage"`
// ClusterID is a globally unique identifier for this cluster generated during installation. Used for reporting metrics among other places.
ClusterID *string `json:"clusterID,omitempty"`
// InfraID is an identifier for this cluster generated during installation and used for tagging/naming resources in cloud providers.
InfraID *string `json:"infraID,omitempty"`
// InstallLog is the log from the installer.
InstallLog *string `json:"installLog,omitempty"`
// Metadata is the metadata.json generated by the installer, providing metadata information about the cluster created.
Metadata *runtime.RawExtension `json:"metadata,omitempty"`
// AdminKubeconfigSecretRef references the secret containing the admin kubeconfig for this cluster.
AdminKubeconfigSecretRef *corev1.LocalObjectReference `json:"adminKubeconfigSecretRef,omitempty"`
// AdminPasswordSecretRef references the secret containing the admin username/password which can be used to login to this cluster.
AdminPasswordSecretRef *corev1.LocalObjectReference `json:"adminPasswordSecretRef,omitempty"`
// PrevClusterID is the cluster ID of the previous failed provision attempt.
PrevClusterID *string `json:"prevClusterID,omitempty"`
// PrevInfraID is the infra ID of the previous failed provision attempt.
PrevInfraID *string `json:"prevInfraID,omitempty"`
// PrevProvisionName is the name of the previous failed provision attempt.
PrevProvisionName *string `json:"prevProvisionName,omitempty"`
}
// ClusterProvisionStatus defines the observed state of ClusterProvision.
type ClusterProvisionStatus struct {
// JobRef is the reference to the job performing the provision.
JobRef *corev1.LocalObjectReference `json:"jobRef,omitempty"`
// Conditions includes more detailed status for the cluster provision
// +optional
Conditions []ClusterProvisionCondition `json:"conditions,omitempty"`
}
// ClusterProvisionStage is the stage of provisioning.
type ClusterProvisionStage string
const (
// ClusterProvisionStageInitializing indicates that pre-provision initialization is underway.
ClusterProvisionStageInitializing ClusterProvisionStage = "initializing"
// ClusterProvisionStageProvisioning indicates that the cluster provision is ongoing.
ClusterProvisionStageProvisioning ClusterProvisionStage = "provisioning"
// ClusterProvisionStageComplete indicates that the cluster provision completed successfully.
ClusterProvisionStageComplete ClusterProvisionStage = "complete"
// ClusterProvisionStageFailed indicates that the cluster provision failed.
ClusterProvisionStageFailed ClusterProvisionStage = "failed"
)
// ClusterProvisionCondition contains details for the current condition of a cluster provision
type ClusterProvisionCondition struct {
// Type is the type of the condition.
Type ClusterProvisionConditionType `json:"type"`
// Status is the status of the condition.
Status corev1.ConditionStatus `json:"status"`
// LastProbeTime is the last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
// LastTransitionTime is the last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
// Reason is a unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty"`
}
// ClusterProvisionConditionType is a valid value for ClusterProvisionCondition.Type
type ClusterProvisionConditionType string
const (
// ClusterProvisionInitializedCondition is set when a cluster provision has finished initialization.
ClusterProvisionInitializedCondition ClusterProvisionConditionType = "ClusterProvisionInitialized"
// ClusterProvisionCompletedCondition is set when a cluster provision completes.
ClusterProvisionCompletedCondition ClusterProvisionConditionType = "ClusterProvisionCompleted"
// ClusterProvisionFailedCondition is set when a cluster provision fails.
ClusterProvisionFailedCondition ClusterProvisionConditionType = "ClusterProvisionFailed"
// ClusterProvisionJobCreated is set when the install job is created for a cluster provision.
ClusterProvisionJobCreated ClusterProvisionConditionType = "ClusterProvisionJobCreated"
// InstallPodStuckCondition is set when the install pod is stuck
InstallPodStuckCondition ClusterProvisionConditionType = "InstallPodStuck"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterProvision is the Schema for the clusterprovisions API
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="ClusterDeployment",type="string",JSONPath=".spec.clusterDeploymentRef.name"
// +kubebuilder:printcolumn:name="Stage",type="string",JSONPath=".spec.stage"
// +kubebuilder:printcolumn:name="InfraID",type="string",JSONPath=".spec.infraID"
// +kubebuilder:resource:path=clusterprovisions,scope=Namespaced
type ClusterProvision struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
// +kubebuilder:pruning:PreserveUnknownFields
Spec ClusterProvisionSpec `json:"spec,omitempty"`
Status ClusterProvisionStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterProvisionList contains a list of ClusterProvision
type ClusterProvisionList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ClusterProvision `json:"items"`
}
func init() {
SchemeBuilder.Register(&ClusterProvision{}, &ClusterProvisionList{})
}

56
vendor/github.com/openshift/hive/apis/hive/v1/clusterrelocate_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,56 @@
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ClusterRelocateSpec defines the relocation of clusters from one Hive instance to another.
type ClusterRelocateSpec struct {
// KubeconfigSecretRef is a reference to the secret containing the kubeconfig for the destination Hive instance.
// The kubeconfig must be in a data field where the key is "kubeconfig".
KubeconfigSecretRef KubeconfigSecretReference `json:"kubeconfigSecretRef"`
// ClusterDeploymentSelector is a LabelSelector indicating which clusters will be relocated.
ClusterDeploymentSelector metav1.LabelSelector `json:"clusterDeploymentSelector"`
}
// KubeconfigSecretReference is a reference to a secret containing the kubeconfig for a remote cluster.
type KubeconfigSecretReference struct {
// Name is the name of the secret.
Name string `json:"name"`
// Namespace is the namespace where the secret lives.
Namespace string `json:"namespace"`
}
// ClusterRelocateStatus defines the observed state of ClusterRelocate.
type ClusterRelocateStatus struct{}
// +genclient:nonNamespaced
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterRelocate is the Schema for the ClusterRelocates API
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:name="Selector",type="string",JSONPath=".spec.clusterDeploymentSelector"
// +kubebuilder:resource:path=clusterrelocates
type ClusterRelocate struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ClusterRelocateSpec `json:"spec,omitempty"`
Status ClusterRelocateStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterRelocateList contains a list of ClusterRelocate
type ClusterRelocateList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ClusterRelocate `json:"items"`
}
func init() {
SchemeBuilder.Register(&ClusterRelocate{}, &ClusterRelocateList{})
}

59
vendor/github.com/openshift/hive/apis/hive/v1/clusterstate_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,59 @@
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
configv1 "github.com/openshift/api/config/v1"
)
// ClusterStateSpec defines the desired state of ClusterState
type ClusterStateSpec struct {
}
// ClusterStateStatus defines the observed state of ClusterState
type ClusterStateStatus struct {
// LastUpdated is the last time that operator state was updated
LastUpdated *metav1.Time `json:"lastUpdated,omitempty"`
// ClusterOperators contains the state for every cluster operator in the
// target cluster
ClusterOperators []ClusterOperatorState `json:"clusterOperators,omitempty"`
}
// ClusterOperatorState summarizes the status of a single cluster operator
type ClusterOperatorState struct {
// Name is the name of the cluster operator
Name string `json:"name"`
// Conditions is the set of conditions in the status of the cluster operator
// on the target cluster
Conditions []configv1.ClusterOperatorStatusCondition `json:"conditions,omitempty"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterState is the Schema for the clusterstates API
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope=Namespaced
type ClusterState struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ClusterStateSpec `json:"spec,omitempty"`
Status ClusterStateStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterStateList contains a list of ClusterState
type ClusterStateList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ClusterState `json:"items"`
}
func init() {
SchemeBuilder.Register(&ClusterState{}, &ClusterStateList{})
}

230
vendor/github.com/openshift/hive/apis/hive/v1/dnszone_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,230 @@
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/openshift/hive/apis/hive/v1/aws"
"github.com/openshift/hive/apis/hive/v1/azure"
)
const (
// FinalizerDNSZone is used on DNSZones to ensure we successfully deprovision
// the cloud objects before cleaning up the API object.
FinalizerDNSZone string = "hive.openshift.io/dnszone"
// FinalizerDNSEndpoint is used on DNSZones to ensure we successfully
// delete the parent-link records before cleaning up the API object.
FinalizerDNSEndpoint string = "hive.openshift.io/dnsendpoint"
)
// DNSZoneSpec defines the desired state of DNSZone
type DNSZoneSpec struct {
// Zone is the DNS zone to host
Zone string `json:"zone"`
// LinkToParentDomain specifies whether DNS records should
// be automatically created to link this DNSZone with a
// parent domain.
// +optional
LinkToParentDomain bool `json:"linkToParentDomain,omitempty"`
// PreserveOnDelete allows the user to disconnect a DNSZone from Hive without deprovisioning it.
// This can also be used to abandon ongoing DNSZone deprovision.
// Typically set automatically due to PreserveOnDelete being set on a ClusterDeployment.
// +optional
PreserveOnDelete bool `json:"preserveOnDelete,omitempty"`
// AWS specifies AWS-specific cloud configuration
// +optional
AWS *AWSDNSZoneSpec `json:"aws,omitempty"`
// GCP specifies GCP-specific cloud configuration
// +optional
GCP *GCPDNSZoneSpec `json:"gcp,omitempty"`
// Azure specifes Azure-specific cloud configuration
// +optional
Azure *AzureDNSZoneSpec `json:"azure,omitempty"`
}
// AWSDNSZoneSpec contains AWS-specific DNSZone specifications
type AWSDNSZoneSpec struct {
// CredentialsSecretRef contains a reference to a secret that contains AWS credentials
// for CRUD operations
// +optional
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef,omitempty"`
// CredentialsAssumeRole refers to the IAM role that must be assumed to obtain
// AWS account access for the DNS CRUD operations.
// +optional
CredentialsAssumeRole *aws.AssumeRole `json:"credentialsAssumeRole,omitempty"`
// AdditionalTags is a set of additional tags to set on the DNS hosted zone. In addition
// to these tags,the DNS Zone controller will set a hive.openhsift.io/hostedzone tag
// identifying the HostedZone record that it belongs to.
AdditionalTags []AWSResourceTag `json:"additionalTags,omitempty"`
// Region is the AWS region to use for route53 operations.
// This defaults to us-east-1.
// For AWS China, use cn-northwest-1.
// +optional
Region string `json:"region,omitempty"`
}
// AWSResourceTag represents a tag that is applied to an AWS cloud resource
type AWSResourceTag struct {
// Key is the key for the tag
Key string `json:"key"`
// Value is the value for the tag
Value string `json:"value"`
}
// GCPDNSZoneSpec contains GCP-specific DNSZone specifications
type GCPDNSZoneSpec struct {
// CredentialsSecretRef references a secret that will be used to authenticate with
// GCP CloudDNS. It will need permission to create and manage CloudDNS Hosted Zones.
// Secret should have a key named 'osServiceAccount.json'.
// The credentials must specify the project to use.
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
}
// AzureDNSZoneSpec contains Azure-specific DNSZone specifications
type AzureDNSZoneSpec struct {
// CredentialsSecretRef references a secret that will be used to authenticate with
// Azure CloudDNS. It will need permission to create and manage CloudDNS Hosted Zones.
// Secret should have a key named 'osServicePrincipal.json'.
// The credentials must specify the project to use.
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
// ResourceGroupName specifies the Azure resource group in which the Hosted Zone should be created.
ResourceGroupName string `json:"resourceGroupName"`
// CloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK
// with the appropriate Azure API endpoints.
// If empty, the value is equal to "AzurePublicCloud".
// +optional
CloudName azure.CloudEnvironment `json:"cloudName,omitempty"`
}
// DNSZoneStatus defines the observed state of DNSZone
type DNSZoneStatus struct {
// LastSyncTimestamp is the time that the zone was last sync'd.
// +optional
LastSyncTimestamp *metav1.Time `json:"lastSyncTimestamp,omitempty"`
// LastSyncGeneration is the generation of the zone resource that was last sync'd. This is used to know
// if the Object has changed and we should sync immediately.
// +optional
LastSyncGeneration int64 `json:"lastSyncGeneration,omitempty"`
// NameServers is a list of nameservers for this DNS zone
// +optional
NameServers []string `json:"nameServers,omitempty"`
// AWSDNSZoneStatus contains status information specific to AWS
// +optional
AWS *AWSDNSZoneStatus `json:"aws,omitempty"`
// GCPDNSZoneStatus contains status information specific to GCP
// +optional
GCP *GCPDNSZoneStatus `json:"gcp,omitempty"`
// AzureDNSZoneStatus contains status information specific to Azure
Azure *AzureDNSZoneStatus `json:"azure,omitempty"`
// Conditions includes more detailed status for the DNSZone
// +optional
Conditions []DNSZoneCondition `json:"conditions,omitempty"`
}
// AWSDNSZoneStatus contains status information specific to AWS DNS zones
type AWSDNSZoneStatus struct {
// ZoneID is the ID of the zone in AWS
// +optional
ZoneID *string `json:"zoneID,omitempty"`
}
// AzureDNSZoneStatus contains status information specific to Azure DNS zones
type AzureDNSZoneStatus struct {
}
// GCPDNSZoneStatus contains status information specific to GCP Cloud DNS zones
type GCPDNSZoneStatus struct {
// ZoneName is the name of the zone in GCP Cloud DNS
// +optional
ZoneName *string `json:"zoneName,omitempty"`
}
// DNSZoneCondition contains details for the current condition of a DNSZone
type DNSZoneCondition struct {
// Type is the type of the condition.
Type DNSZoneConditionType `json:"type"`
// Status is the status of the condition.
Status corev1.ConditionStatus `json:"status"`
// LastProbeTime is the last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
// LastTransitionTime is the last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
// Reason is a unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty"`
}
// DNSZoneConditionType is a valid value for DNSZoneCondition.Type
type DNSZoneConditionType string
const (
// ZoneAvailableDNSZoneCondition is true if the DNSZone is responding to DNS queries
ZoneAvailableDNSZoneCondition DNSZoneConditionType = "ZoneAvailable"
// ParentLinkCreatedCondition is true if the parent link has been created
ParentLinkCreatedCondition DNSZoneConditionType = "ParentLinkCreated"
// DomainNotManaged is true if we try to reconcile a DNSZone and the HiveConfig
// does not contain a ManagedDNS entry for the domain in the DNSZone
DomainNotManaged DNSZoneConditionType = "DomainNotManaged"
// InsufficientCredentialsCondition is true when credentials cannot be used to create a
// DNS zone because of insufficient permissions
InsufficientCredentialsCondition DNSZoneConditionType = "InsufficientCredentials"
// AuthenticationFailureCondition is true when credentials cannot be used to create a
// DNS zone because they fail authentication
AuthenticationFailureCondition DNSZoneConditionType = "AuthenticationFailure"
// APIOptInRequiredCondition is true when the user account used for managing DNS
// needs to enable the DNS apis.
APIOptInRequiredCondition DNSZoneConditionType = "APIOptInRequired"
// GenericDNSErrorsCondition is true when there's some DNS Zone related error that isn't related to
// authentication or credentials, and needs to be bubbled up to ClusterDeployment
GenericDNSErrorsCondition DNSZoneConditionType = "DNSError"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DNSZone is the Schema for the dnszones API
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope=Namespaced
type DNSZone struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DNSZoneSpec `json:"spec,omitempty"`
Status DNSZoneStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DNSZoneList contains a list of DNSZone
type DNSZoneList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []DNSZone `json:"items"`
}
func init() {
SchemeBuilder.Register(&DNSZone{}, &DNSZoneList{})
}

7
vendor/github.com/openshift/hive/apis/hive/v1/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,7 @@
// Package v1 contains API Schema definitions for the hive v1 API group
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/hive/apis/hive
// +k8s:defaulter-gen=TypeMeta
// +groupName=hive.openshift.io
package v1

13
vendor/github.com/openshift/hive/apis/hive/v1/gcp/clouduid.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,13 @@
package gcp
import (
"crypto/md5"
"fmt"
)
// CloudControllerUID generates a UID used by the GCP cloud controller provider
// to generate certain load balancing resources
func CloudControllerUID(infraID string) string {
hash := md5.Sum([]byte(infraID))
return fmt.Sprintf("%x", hash)[:16]
}

4
vendor/github.com/openshift/hive/apis/hive/v1/gcp/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
// Package gcp contains API Schema definitions for GCP clusters.
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/hive/apis/hive
package gcp

74
vendor/github.com/openshift/hive/apis/hive/v1/gcp/machinepools.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,74 @@
package gcp
// MachinePool stores the configuration for a machine pool installed on GCP.
type MachinePool struct {
// Zones is list of availability zones that can be used.
Zones []string `json:"zones,omitempty"`
// InstanceType defines the GCP instance type.
// eg. n1-standard-4
InstanceType string `json:"type"`
// OSDisk defines the storage for instances.
//
// +optional
OSDisk OSDisk `json:"osDisk"`
}
// OSDisk defines the disk for machines on GCP.
type OSDisk struct {
// DiskType defines the type of disk.
// The valid values are pd-standard and pd-ssd.
// Defaulted internally to pd-ssd.
// +kubebuilder:validation:Enum=pd-ssd;pd-standard
// +optional
DiskType string `json:"diskType,omitempty"`
// DiskSizeGB defines the size of disk in GB.
// Defaulted internally to 128.
//
// +kubebuilder:validation:Minimum=16
// +kubebuilder:validation:Maximum=65536
// +optional
DiskSizeGB int64 `json:"diskSizeGB,omitempty"`
// EncryptionKey defines the KMS key to be used to encrypt the disk.
//
// +optional
EncryptionKey *EncryptionKeyReference `json:"encryptionKey,omitempty"`
}
// KMSKeyReference gathers required fields for looking up a GCP KMS Key
type KMSKeyReference struct {
// Name is the name of the customer managed encryption key to be used for the disk encryption.
Name string `json:"name"`
// KeyRing is the name of the KMS Key Ring which the KMS Key belongs to.
KeyRing string `json:"keyRing"`
// ProjectID is the ID of the Project in which the KMS Key Ring exists.
// Defaults to the VM ProjectID if not set.
//
// +optional
ProjectID string `json:"projectID,omitempty"`
// Location is the GCP location in which the Key Ring exists.
Location string `json:"location"`
}
// EncryptionKeyReference describes the encryptionKey to use for a disk's encryption.
type EncryptionKeyReference struct {
// KMSKey is a reference to a KMS Key to use for the encryption.
//
// +optional
KMSKey *KMSKeyReference `json:"kmsKey,omitempty"`
// KMSKeyServiceAccount is the service account being used for the
// encryption request for the given KMS key. If absent, the Compute
// Engine default service account is used.
// See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account
// for details on the default service account.
//
// +optional
KMSKeyServiceAccount string `json:"kmsKeyServiceAccount,omitempty"`
}

7
vendor/github.com/openshift/hive/apis/hive/v1/gcp/metadata.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,7 @@
package gcp
// Metadata contains GCP metadata (e.g. for uninstalling the cluster).
type Metadata struct {
Region string `json:"region"`
ProjectID string `json:"projectID"`
}

16
vendor/github.com/openshift/hive/apis/hive/v1/gcp/platform.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,16 @@
package gcp
import (
corev1 "k8s.io/api/core/v1"
)
// Platform stores all the global configuration that all machinesets
// use.
type Platform struct {
// CredentialsSecretRef refers to a secret that contains the GCP account access
// credentials.
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
// Region specifies the GCP region where the cluster will be created.
Region string `json:"region"`
}

119
vendor/github.com/openshift/hive/apis/hive/v1/gcp/zz_generated.deepcopy.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,119 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package gcp
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *EncryptionKeyReference) DeepCopyInto(out *EncryptionKeyReference) {
*out = *in
if in.KMSKey != nil {
in, out := &in.KMSKey, &out.KMSKey
*out = new(KMSKeyReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionKeyReference.
func (in *EncryptionKeyReference) DeepCopy() *EncryptionKeyReference {
if in == nil {
return nil
}
out := new(EncryptionKeyReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KMSKeyReference) DeepCopyInto(out *KMSKeyReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSKeyReference.
func (in *KMSKeyReference) DeepCopy() *KMSKeyReference {
if in == nil {
return nil
}
out := new(KMSKeyReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MachinePool) DeepCopyInto(out *MachinePool) {
*out = *in
if in.Zones != nil {
in, out := &in.Zones, &out.Zones
*out = make([]string, len(*in))
copy(*out, *in)
}
in.OSDisk.DeepCopyInto(&out.OSDisk)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool.
func (in *MachinePool) DeepCopy() *MachinePool {
if in == nil {
return nil
}
out := new(MachinePool)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Metadata) DeepCopyInto(out *Metadata) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metadata.
func (in *Metadata) DeepCopy() *Metadata {
if in == nil {
return nil
}
out := new(Metadata)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSDisk) DeepCopyInto(out *OSDisk) {
*out = *in
if in.EncryptionKey != nil {
in, out := &in.EncryptionKey, &out.EncryptionKey
*out = new(EncryptionKeyReference)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDisk.
func (in *OSDisk) DeepCopy() *OSDisk {
if in == nil {
return nil
}
out := new(OSDisk)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Platform) DeepCopyInto(out *Platform) {
*out = *in
out.CredentialsSecretRef = in.CredentialsSecretRef
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform.
func (in *Platform) DeepCopy() *Platform {
if in == nil {
return nil
}
out := new(Platform)
in.DeepCopyInto(out)
return out
}

629
vendor/github.com/openshift/hive/apis/hive/v1/hiveconfig_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,629 @@
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/openshift/hive/apis/hive/v1/azure"
"github.com/openshift/hive/apis/hive/v1/metricsconfig"
)
// HiveConfigSpec defines the desired state of Hive
type HiveConfigSpec struct {
// TargetNamespace is the namespace where the core Hive components should be run. Defaults to "hive". Will be
// created if it does not already exist. All resource references in HiveConfig can be assumed to be in the
// TargetNamespace.
// NOTE: Whereas it is possible to edit this value, causing hive to "move" its core components to the new
// namespace, the old namespace is not deleted, as it will still contain resources created by kubernetes
// and/or other OpenShift controllers.
// +optional
TargetNamespace string `json:"targetNamespace,omitempty"`
// ManagedDomains is the list of DNS domains that are managed by the Hive cluster
// When specifying 'manageDNS: true' in a ClusterDeployment, the ClusterDeployment's
// baseDomain should be a direct child of one of these domains, otherwise the
// ClusterDeployment creation will result in a validation error.
// +optional
ManagedDomains []ManageDNSConfig `json:"managedDomains,omitempty"`
// AdditionalCertificateAuthoritiesSecretRef is a list of references to secrets in the
// TargetNamespace that contain an additional Certificate Authority to use when communicating
// with target clusters. These certificate authorities will be used in addition to any self-signed
// CA generated by each cluster on installation. The cert data should be stored in the Secret key named 'ca.crt'.
// +optional
AdditionalCertificateAuthoritiesSecretRef []corev1.LocalObjectReference `json:"additionalCertificateAuthoritiesSecretRef,omitempty"`
// GlobalPullSecretRef is used to specify a pull secret that will be used globally by all of the cluster deployments.
// For each cluster deployment, the contents of GlobalPullSecret will be merged with the specific pull secret for
// a cluster deployment(if specified), with precedence given to the contents of the pull secret for the cluster deployment.
// The global pull secret is assumed to be in the TargetNamespace.
// +optional
GlobalPullSecretRef *corev1.LocalObjectReference `json:"globalPullSecretRef,omitempty"`
// Backup specifies configuration for backup integration.
// If absent, backup integration will be disabled.
// +optional
Backup BackupConfig `json:"backup,omitempty"`
// FailedProvisionConfig is used to configure settings related to handling provision failures.
// +optional
FailedProvisionConfig FailedProvisionConfig `json:"failedProvisionConfig,omitempty"`
// ServiceProviderCredentialsConfig is used to configure credentials related to being a service provider on
// various cloud platforms.
// +optional
ServiceProviderCredentialsConfig ServiceProviderCredentials `json:"serviceProviderCredentialsConfig,omitempty"`
// LogLevel is the level of logging to use for the Hive controllers.
// Acceptable levels, from coarsest to finest, are panic, fatal, error, warn, info, debug, and trace.
// The default level is info.
// +optional
LogLevel string `json:"logLevel,omitempty"`
// SyncSetReapplyInterval is a string duration indicating how much time must pass before SyncSet resources
// will be reapplied.
// The default reapply interval is two hours.
SyncSetReapplyInterval string `json:"syncSetReapplyInterval,omitempty"`
// MaintenanceMode can be set to true to disable the hive controllers in situations where we need to ensure
// nothing is running that will add or act upon finalizers on Hive types. This should rarely be needed.
// Sets replicas to 0 for the hive-controllers deployment to accomplish this.
MaintenanceMode *bool `json:"maintenanceMode,omitempty"`
// DeprovisionsDisabled can be set to true to block deprovision jobs from running.
DeprovisionsDisabled *bool `json:"deprovisionsDisabled,omitempty"`
// DeleteProtection can be set to "enabled" to turn on automatic delete protection for ClusterDeployments. When
// enabled, Hive will add the "hive.openshift.io/protected-delete" annotation to new ClusterDeployments. Once a
// ClusterDeployment has been installed, a user must remove the annotation from a ClusterDeployment prior to
// deleting it.
// +kubebuilder:validation:Enum=enabled
// +optional
DeleteProtection DeleteProtectionType `json:"deleteProtection,omitempty"`
// DisabledControllers allows selectively disabling Hive controllers by name.
// The name of an individual controller matches the name of the controller as seen in the Hive logging output.
DisabledControllers []string `json:"disabledControllers,omitempty"`
// ControllersConfig is used to configure different hive controllers
// +optional
ControllersConfig *ControllersConfig `json:"controllersConfig,omitempty"`
// AWSPrivateLink defines the configuration for the aws-private-link controller.
// It provides 3 major pieces of information required by the controller,
// 1. The Credentials that should be used to create AWS PrivateLink resources other than
// what exist in the customer's account.
// 2. A list of VPCs that can be used by the controller to choose one to create AWS VPC Endpoints
// for the AWS VPC Endpoint Services created for ClusterDeployments in their
// corresponding regions.
// 3. A list of VPCs that should be able to resolve the DNS addresses setup for Private Link.
AWSPrivateLink *AWSPrivateLinkConfig `json:"awsPrivateLink,omitempty"`
// ReleaseImageVerificationConfigMapRef is a reference to the ConfigMap that
// will be used to verify release images.
//
// The config map structure is exactly the same as the config map used for verification of release
// images for OpenShift 4 during upgrades. Therefore you can usually set this to the config map shipped
// as part of OpenShift (openshift-config-managed/release-verification).
//
// See https://github.com/openshift/cluster-update-keys for more details.
// The keys within the config map in the data field define how verification is performed:
//
// verifier-public-key-*: One or more GPG public keys in ASCII form that must have signed the
// release image by digest.
//
// store-*: A URL (scheme file://, http://, or https://) location that contains signatures. These
// signatures are in the atomic container signature format. The URL will have the digest
// of the image appended to it as "<STORE>/<ALGO>=<DIGEST>/signature-<NUMBER>" as described
// in the container image signing format. The docker-image-manifest section of the
// signature must match the release image digest. Signatures are searched starting at
// NUMBER 1 and incrementing if the signature exists but is not valid. The signature is a
// GPG signed and encrypted JSON message. The file store is provided for testing only at
// the current time, although future versions of the CVO might allow host mounting of
// signatures.
//
// See https://github.com/containers/image/blob/ab49b0a48428c623a8f03b41b9083d48966b34a9/docs/signature-protocols.md
// for a description of the signature store
//
// The returned verifier will require that any new release image will only be considered verified
// if each provided public key has signed the release image digest. The signature may be in any
// store and the lookup order is internally defined.
//
// If not set, no verification will be performed.
// +optional
ReleaseImageVerificationConfigMapRef *ReleaseImageVerificationConfigMapReference `json:"releaseImageVerificationConfigMapRef,omitempty"`
// ArgoCD specifies configuration for ArgoCD integration. If enabled, Hive will automatically add provisioned
// clusters to ArgoCD, and remove them when they are deprovisioned.
ArgoCD ArgoCDConfig `json:"argoCDConfig,omitempty"`
FeatureGates *FeatureGateSelection `json:"featureGates,omitempty"`
// ExportMetrics specifies whether the operator should enable metrics for hive controllers
// to be extracted for prometheus.
// When set to true, the operator deploys ServiceMonitors so that the prometheus instances that
// extract metrics. The operator also sets up RBAC in the TargetNamespace so that openshift
// prometheus in the cluster can list/access objects required to pull metrics.
ExportMetrics bool `json:"exportMetrics,omitempty"`
// MetricsConfig encapsulates metrics specific configurations, like opting in for certain metrics.
// +optional
MetricsConfig *metricsconfig.MetricsConfig `json:"metricsConfig,omitempty"`
}
// ReleaseImageVerificationConfigMapReference is a reference to the ConfigMap that
// will be used to verify release images.
type ReleaseImageVerificationConfigMapReference struct {
// Namespace of the ConfigMap
Namespace string `json:"namespace"`
// Name of the ConfigMap
Name string `json:"name"`
}
// AWSPrivateLinkConfig defines the configuration for the aws-private-link controller.
type AWSPrivateLinkConfig struct {
// CredentialsSecretRef references a secret in the TargetNamespace that will be used to authenticate with
// AWS for creating the resources for AWS PrivateLink.
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
// EndpointVPCInventory is a list of VPCs and the corresponding subnets in various AWS regions.
// The controller uses this list to choose a VPC for creating AWS VPC Endpoints. Since the
// VPC Endpoints must be in the same region as the ClusterDeployment, we must have VPCs in that
// region to be able to setup Private Link.
EndpointVPCInventory []AWSPrivateLinkInventory `json:"endpointVPCInventory,omitempty"`
// AssociatedVPCs is the list of VPCs that should be able to resolve the DNS addresses
// setup for Private Link. This allows clients in VPC to resolve the AWS PrivateLink address
// using AWS's default DNS resolver for Private Route53 Hosted Zones.
//
// This list should at minimum include the VPC where the current Hive controller is running.
AssociatedVPCs []AWSAssociatedVPC `json:"associatedVPCs,omitempty"`
// DNSRecordType defines what type of DNS record should be created in Private Hosted Zone
// for the customer cluster's API endpoint (which is the VPC Endpoint's regional DNS name).
//
// +kubebuilder:default=Alias
// +optional
DNSRecordType AWSPrivateLinkDNSRecordType `json:"dnsRecordType,omitempty"`
}
// AWSPrivateLinkDNSRecordType defines what type of DNS record should be created in Private Hosted Zone
// for the customer cluster's API endpoint (which is the VPC Endpoint's regional DNS name).
// +kubebuilder:validation:Enum=Alias;ARecord
type AWSPrivateLinkDNSRecordType string
const (
// AliasAWSPrivateLinkDNSRecordType uses Route53 Alias record type for pointing the customer cluster's
// API DNS name to the DNS name of the VPC endpoint. This is the default and should be used for most
// cases as it is provided at no extra cost in terms of DNS queries and usually resolves faster in AWS
// environments.
AliasAWSPrivateLinkDNSRecordType AWSPrivateLinkDNSRecordType = "Alias"
// ARecordAWSPrivateLinkDNSRecordType uses Route53 A record type for pointing the customer cluster's
// API DNS name to the DNS name of the VPC endpoint. This should be used when Alias record type cannot
// be used or other restrictions prevent use of Alias records.
ARecordAWSPrivateLinkDNSRecordType AWSPrivateLinkDNSRecordType = "ARecord"
)
// AWSPrivateLinkInventory is a VPC and its corresponding subnets in an AWS region.
// This VPC will be used to create an AWS VPC Endpoint whenever there is a VPC Endpoint Service
// created for a ClusterDeployment.
type AWSPrivateLinkInventory struct {
AWSPrivateLinkVPC `json:",inline"`
Subnets []AWSPrivateLinkSubnet `json:"subnets"`
}
// AWSAssociatedVPC defines a VPC that should be able to resolve the DNS addresses
// setup for Private Link.
type AWSAssociatedVPC struct {
AWSPrivateLinkVPC `json:",inline"`
// CredentialsSecretRef references a secret in the TargetNamespace that will be used to authenticate with
// AWS for associating the VPC with the Private HostedZone created for PrivateLink.
// When not provided, the common credentials for the controller should be used.
//
// +optional
CredentialsSecretRef *corev1.LocalObjectReference `json:"credentialsSecretRef"`
}
// AWSPrivateLinkVPC defines an AWS VPC in a region.
type AWSPrivateLinkVPC struct {
VPCID string `json:"vpcID"`
Region string `json:"region"`
}
// AWSPrivateLinkSubnet defines a subnet in the an AWS VPC.
type AWSPrivateLinkSubnet struct {
SubnetID string `json:"subnetID"`
AvailabilityZone string `json:"availabilityZone"`
}
// ServiceProviderCredentials is used to configure credentials related to being a service provider on
// various cloud platforms.
type ServiceProviderCredentials struct {
// AWS is used to configure credentials related to being a service provider on AWS.
// +optional
AWS *AWSServiceProviderCredentials `json:"aws,omitempty"`
}
// AWSServiceProviderCredentials is used to configure credentials related to being a service
// provider on AWS.
type AWSServiceProviderCredentials struct {
// CredentialsSecretRef references a secret in the TargetNamespace that will be used to authenticate with
// AWS to become the Service Provider. Being a Service Provider allows the controllers
// to assume the role in customer AWS accounts to manager clusters.
// +optional
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef,omitempty"`
}
// FeatureSet defines the set of feature gates that should be used.
// +kubebuilder:validation:Enum="";Custom
type FeatureSet string
var (
// DefaultFeatureSet feature set is the default things supported as part of normal supported platform.
DefaultFeatureSet FeatureSet = ""
// CustomFeatureSet allows the enabling or disabling of any feature. Turning this feature set on IS NOT SUPPORTED.
// Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations
// it might leave object in a state that is unrecoverable.
CustomFeatureSet FeatureSet = "Custom"
)
// FeatureGateSelection allows selecting feature gates for the controller.
type FeatureGateSelection struct {
// featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting.
// +unionDiscriminator
// +optional
FeatureSet FeatureSet `json:"featureSet,omitempty"`
// custom allows the enabling or disabling of any feature.
// Because of its nature, this setting cannot be validated. If you have any typos or accidentally apply invalid combinations
// might cause unknown behavior. featureSet must equal "Custom" must be set to use this field.
// +optional
// +nullable
Custom *FeatureGatesEnabled `json:"custom,omitempty"`
}
// FeatureGatesEnabled is list of feature gates that must be enabled.
type FeatureGatesEnabled struct {
// enabled is a list of all feature gates that you want to force on
// +optional
Enabled []string `json:"enabled,omitempty"`
}
// FeatureSets Contains a map of Feature names to Enabled/Disabled Feature.
var FeatureSets = map[FeatureSet]*FeatureGatesEnabled{
DefaultFeatureSet: {
Enabled: []string{},
},
CustomFeatureSet: {
Enabled: []string{},
},
}
// HiveConfigStatus defines the observed state of Hive
type HiveConfigStatus struct {
// AggregatorClientCAHash keeps an md5 hash of the aggregator client CA
// configmap data from the openshift-config-managed namespace. When the configmap changes,
// admission is redeployed.
AggregatorClientCAHash string `json:"aggregatorClientCAHash,omitempty"`
// ObservedGeneration will record the most recently processed HiveConfig object's generation.
ObservedGeneration int64 `json:"observedGeneration,omitempty"`
// ConfigApplied will be set by the hive operator to indicate whether or not the LastGenerationObserved
// was successfully reconciled.
ConfigApplied bool `json:"configApplied,omitempty"`
// Conditions includes more detailed status for the HiveConfig
// +optional
Conditions []HiveConfigCondition `json:"conditions,omitempty"`
}
// HiveConfigCondition contains details for the current condition of a HiveConfig
type HiveConfigCondition struct {
// Type is the type of the condition.
Type HiveConfigConditionType `json:"type"`
// Status is the status of the condition.
Status corev1.ConditionStatus `json:"status"`
// LastProbeTime is the last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
// LastTransitionTime is the last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
// Reason is a unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty"`
}
// HiveConfigConditionType is a valid value for HiveConfigCondition.Type
type HiveConfigConditionType string
const (
// HiveReadyCondition is set when hive is deployed successfully and ready to provision clusters
HiveReadyCondition HiveConfigConditionType = "Ready"
)
// ArgoCDConfig contains settings for integration with ArgoCD.
type ArgoCDConfig struct {
// Enabled dictates if ArgoCD gitops integration is enabled.
// If not specified, the default is disabled.
Enabled bool `json:"enabled"`
// Namespace specifies the namespace where ArgoCD is installed. Used for the location of cluster secrets.
// Defaults to "argocd"
// +optional
Namespace string `json:"namespace,omitempty"`
}
// BackupConfig contains settings for the Velero backup integration.
type BackupConfig struct {
// Velero specifies configuration for the Velero backup integration.
// +optional
Velero VeleroBackupConfig `json:"velero,omitempty"`
// MinBackupPeriodSeconds specifies that a minimum of MinBackupPeriodSeconds will occur in between each backup.
// This is used to rate limit backups. This potentially batches together multiple changes into 1 backup.
// No backups will be lost as changes that happen during this interval are queued up and will result in a
// backup happening once the interval has been completed.
// +optional
MinBackupPeriodSeconds *int `json:"minBackupPeriodSeconds,omitempty"`
}
// VeleroBackupConfig contains settings for the Velero backup integration.
type VeleroBackupConfig struct {
// Enabled dictates if Velero backup integration is enabled.
// If not specified, the default is disabled.
// +optional
Enabled bool `json:"enabled,omitempty"`
// Namespace specifies in which namespace velero backup objects should be created.
// If not specified, the default is a namespace named "velero".
// +optional
Namespace string `json:"namespace,omitempty"`
}
// FailedProvisionConfig contains settings to control behavior undertaken by Hive when an installation attempt fails.
type FailedProvisionConfig struct {
// TODO: Figure out how to mark SkipGatherLogs as deprecated (more than just a comment)
// DEPRECATED: This flag is no longer respected and will be removed in the future.
SkipGatherLogs bool `json:"skipGatherLogs,omitempty"`
AWS *FailedProvisionAWSConfig `json:"aws,omitempty"`
// RetryReasons is a list of installFailingReason strings from the [additional-]install-log-regexes ConfigMaps.
// If specified, Hive will only retry a failed installation if it results in one of the listed reasons. If
// omitted (not the same thing as empty!), Hive will retry regardless of the failure reason. (The total number
// of install attempts is still constrained by ClusterDeployment.Spec.InstallAttemptsLimit.)
RetryReasons *[]string `json:"retryReasons,omitempty"`
}
// ManageDNSConfig contains the domain being managed, and the cloud-specific
// details for accessing/managing the domain.
type ManageDNSConfig struct {
// Domains is the list of domains that hive will be managing entries for with the provided credentials.
Domains []string `json:"domains"`
// AWS contains AWS-specific settings for external DNS
// +optional
AWS *ManageDNSAWSConfig `json:"aws,omitempty"`
// GCP contains GCP-specific settings for external DNS
// +optional
GCP *ManageDNSGCPConfig `json:"gcp,omitempty"`
// Azure contains Azure-specific settings for external DNS
// +optional
Azure *ManageDNSAzureConfig `json:"azure,omitempty"`
// As other cloud providers are supported, additional fields will be
// added for each of those cloud providers. Only a single cloud provider
// may be configured at a time.
}
// FailedProvisionAWSConfig contains AWS-specific info to upload log files.
type FailedProvisionAWSConfig struct {
// CredentialsSecretRef references a secret in the TargetNamespace that will be used to authenticate with
// AWS S3. It will need permission to upload logs to S3.
// Secret should have keys named aws_access_key_id and aws_secret_access_key that contain the AWS credentials.
// Example Secret:
// data:
// aws_access_key_id: minio
// aws_secret_access_key: minio123
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
// Region is the AWS region to use for S3 operations.
// This defaults to us-east-1.
// For AWS China, use cn-northwest-1.
// +optional
Region string `json:"region,omitempty"`
// ServiceEndpoint is the url to connect to an S3 compatible provider.
ServiceEndpoint string `json:"serviceEndpoint,omitempty"`
// Bucket is the S3 bucket to store the logs in.
Bucket string `json:"bucket,omitempty"`
}
// ManageDNSAWSConfig contains AWS-specific info to manage a given domain.
type ManageDNSAWSConfig struct {
// CredentialsSecretRef references a secret in the TargetNamespace that will be used to authenticate with
// AWS Route53. It will need permission to manage entries for the domain
// listed in the parent ManageDNSConfig object.
// Secret should have AWS keys named 'aws_access_key_id' and 'aws_secret_access_key'.
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
// Region is the AWS region to use for route53 operations.
// This defaults to us-east-1.
// For AWS China, use cn-northwest-1.
// +optional
Region string `json:"region,omitempty"`
}
// ManageDNSGCPConfig contains GCP-specific info to manage a given domain.
type ManageDNSGCPConfig struct {
// CredentialsSecretRef references a secret in the TargetNamespace that will be used to authenticate with
// GCP DNS. It will need permission to manage entries in each of the
// managed domains for this cluster.
// listed in the parent ManageDNSConfig object.
// Secret should have a key named 'osServiceAccount.json'.
// The credentials must specify the project to use.
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
}
type DeleteProtectionType string
const (
DeleteProtectionEnabled DeleteProtectionType = "enabled"
)
// ManageDNSAzureConfig contains Azure-specific info to manage a given domain
type ManageDNSAzureConfig struct {
// CredentialsSecretRef references a secret in the TargetNamespace that will be used to authenticate with
// Azure DNS. It wil need permission to manage entries in each of the
// managed domains listed in the parent ManageDNSConfig object.
// Secret should have a key named 'osServicePrincipal.json'
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
// ResourceGroupName specifies the Azure resource group containing the DNS zones
// for the domains being managed.
ResourceGroupName string `json:"resourceGroupName"`
// CloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK
// with the appropriate Azure API endpoints.
// If empty, the value is equal to "AzurePublicCloud".
// +optional
CloudName azure.CloudEnvironment `json:"cloudName,omitempty"`
}
// ControllerConfig contains the configuration for a controller
type ControllerConfig struct {
// ConcurrentReconciles specifies number of concurrent reconciles for a controller
// +optional
ConcurrentReconciles *int32 `json:"concurrentReconciles,omitempty"`
// ClientQPS specifies client rate limiter QPS for a controller
// +optional
ClientQPS *int32 `json:"clientQPS,omitempty"`
// ClientBurst specifies client rate limiter burst for a controller
// +optional
ClientBurst *int32 `json:"clientBurst,omitempty"`
// QueueQPS specifies workqueue rate limiter QPS for a controller
// +optional
QueueQPS *int32 `json:"queueQPS,omitempty"`
// QueueBurst specifies workqueue rate limiter burst for a controller
// +optional
QueueBurst *int32 `json:"queueBurst,omitempty"`
// Replicas specifies the number of replicas the specific controller pod should use.
// This is ONLY for controllers that have been split out into their own pods.
// This is ignored for all others.
Replicas *int32 `json:"replicas,omitempty"`
}
// +kubebuilder:validation:Enum=clusterDeployment;clusterrelocate;clusterstate;clusterversion;controlPlaneCerts;dnsendpoint;dnszone;remoteingress;remotemachineset;machinepool;syncidentityprovider;unreachable;velerobackup;clusterprovision;clusterDeprovision;clusterpool;clusterpoolnamespace;hibernation;clusterclaim;metrics;clustersync
type ControllerName string
func (controllerName ControllerName) String() string {
return string(controllerName)
}
// ControllerNames is a slice of controller names
type ControllerNames []ControllerName
// Contains says whether or not the controller name is in the slice of controller names.
func (c ControllerNames) Contains(controllerName ControllerName) bool {
for _, curControllerName := range c {
if curControllerName == controllerName {
return true
}
}
return false
}
// WARNING: All the controller names below should also be added to the kubebuilder validation of the type ControllerName
const (
ClusterClaimControllerName ControllerName = "clusterclaim"
ClusterDeploymentControllerName ControllerName = "clusterDeployment"
ClusterDeprovisionControllerName ControllerName = "clusterDeprovision"
ClusterpoolControllerName ControllerName = "clusterpool"
ClusterpoolNamespaceControllerName ControllerName = "clusterpoolnamespace"
ClusterProvisionControllerName ControllerName = "clusterProvision"
ClusterRelocateControllerName ControllerName = "clusterRelocate"
ClusterStateControllerName ControllerName = "clusterState"
ClusterVersionControllerName ControllerName = "clusterversion"
ControlPlaneCertsControllerName ControllerName = "controlPlaneCerts"
DNSEndpointControllerName ControllerName = "dnsendpoint"
DNSZoneControllerName ControllerName = "dnszone"
FakeClusterInstallControllerName ControllerName = "fakeclusterinstall"
HibernationControllerName ControllerName = "hibernation"
RemoteIngressControllerName ControllerName = "remoteingress"
SyncIdentityProviderControllerName ControllerName = "syncidentityprovider"
UnreachableControllerName ControllerName = "unreachable"
VeleroBackupControllerName ControllerName = "velerobackup"
MetricsControllerName ControllerName = "metrics"
ClustersyncControllerName ControllerName = "clustersync"
AWSPrivateLinkControllerName ControllerName = "awsprivatelink"
HiveControllerName ControllerName = "hive"
// DeprecatedRemoteMachinesetControllerName was deprecated but can be used to disable the
// MachinePool controller which supercedes it for compatability.
DeprecatedRemoteMachinesetControllerName ControllerName = "remotemachineset"
MachinePoolControllerName ControllerName = "machinepool"
)
// SpecificControllerConfig contains the configuration for a specific controller
type SpecificControllerConfig struct {
// Name specifies the name of the controller
Name ControllerName `json:"name"`
// ControllerConfig contains the configuration for the controller specified by Name field
Config ControllerConfig `json:"config"`
}
// ControllersConfig contains default as well as controller specific configurations
type ControllersConfig struct {
// Default specifies default configuration for all the controllers, can be used to override following coded defaults
// default for concurrent reconciles is 5
// default for client qps is 5
// default for client burst is 10
// default for queue qps is 10
// default for queue burst is 100
// +optional
Default *ControllerConfig `json:"default,omitempty"`
// Controllers contains a list of configurations for different controllers
// +optional
Controllers []SpecificControllerConfig `json:"controllers,omitempty"`
}
// +genclient:nonNamespaced
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// HiveConfig is the Schema for the hives API
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:scope=Cluster
type HiveConfig struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec HiveConfigSpec `json:"spec,omitempty"`
Status HiveConfigStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// HiveConfigList contains a list of Hive
type HiveConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []HiveConfig `json:"items"`
}
func init() {
SchemeBuilder.Register(&HiveConfig{}, &HiveConfigList{})
}

7
vendor/github.com/openshift/hive/apis/hive/v1/ibmcloud/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,7 @@
// Package ibmcloud contains API Schema definitions for IBM Cloud clusters.
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/hive/apis/hive
package ibmcloud
// Name is name for the ibmcloud platform.
const Name string = "ibmcloud"

69
vendor/github.com/openshift/hive/apis/hive/v1/ibmcloud/machinepool.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,69 @@
package ibmcloud
// MachinePool stores the configuration for a machine pool installed on IBM Cloud.
type MachinePool struct {
// InstanceType is the VSI machine profile.
InstanceType string `json:"type,omitempty"`
// Zones is the list of availability zones used for machines in the pool.
// +optional
Zones []string `json:"zones,omitempty"`
// BootVolume is the configuration for the machine's boot volume.
// +optional
BootVolume *BootVolume `json:"bootVolume,omitempty"`
// DedicatedHosts is the configuration for the machine's dedicated host and profile.
// +optional
DedicatedHosts []DedicatedHost `json:"dedicatedHosts,omitempty"`
}
// BootVolume stores the configuration for an individual machine's boot volume.
type BootVolume struct {
// EncryptionKey is the CRN referencing a Key Protect or Hyper Protect
// Crypto Services key to use for volume encryption. If not specified, a
// provider managed encryption key will be used.
// +optional
EncryptionKey string `json:"encryptionKey,omitempty"`
}
// DedicatedHost stores the configuration for the machine's dedicated host platform.
type DedicatedHost struct {
// Name is the name of the dedicated host to provision the machine on. If
// specified, machines will be created on pre-existing dedicated host.
// +optional
Name string `json:"name,omitempty"`
// Profile is the profile ID for the dedicated host. If specified, new
// dedicated host will be created for machines.
// +optional
Profile string `json:"profile,omitempty"`
}
// Set sets the values from `required` to `a`.
func (a *MachinePool) Set(required *MachinePool) {
if required == nil || a == nil {
return
}
if required.InstanceType != "" {
a.InstanceType = required.InstanceType
}
if len(required.Zones) > 0 {
a.Zones = required.Zones
}
if required.BootVolume != nil {
if a.BootVolume == nil {
a.BootVolume = &BootVolume{}
}
if required.BootVolume.EncryptionKey != "" {
a.BootVolume.EncryptionKey = required.BootVolume.EncryptionKey
}
}
if len(required.DedicatedHosts) > 0 {
a.DedicatedHosts = required.DedicatedHosts
}
}

25
vendor/github.com/openshift/hive/apis/hive/v1/ibmcloud/platform.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,25 @@
package ibmcloud
import (
corev1 "k8s.io/api/core/v1"
)
// Platform stores all the global configuration that all machinesets use.
type Platform struct {
// CredentialsSecretRef refers to a secret that contains IBM Cloud account access
// credentials.
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
// AccountID is the IBM Cloud Account ID.
// AccountID is DEPRECATED and is gathered via the IBM Cloud API for the provided
// credentials. This field will be ignored.
// +optional
AccountID string `json:"accountID,omitempty"`
// CISInstanceCRN is the IBM Cloud Internet Services Instance CRN
// CISInstanceCRN is DEPRECATED and gathered via the IBM Cloud API for the provided
// credentials and cluster deployment base domain. This field will be ignored.
// +optional
CISInstanceCRN string `json:"cisInstanceCRN,omitempty"`
// Region specifies the IBM Cloud region where the cluster will be
// created.
Region string `json:"region"`
}

86
vendor/github.com/openshift/hive/apis/hive/v1/ibmcloud/zz_generated.deepcopy.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,86 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package ibmcloud
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *BootVolume) DeepCopyInto(out *BootVolume) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BootVolume.
func (in *BootVolume) DeepCopy() *BootVolume {
if in == nil {
return nil
}
out := new(BootVolume)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DedicatedHost) DeepCopyInto(out *DedicatedHost) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DedicatedHost.
func (in *DedicatedHost) DeepCopy() *DedicatedHost {
if in == nil {
return nil
}
out := new(DedicatedHost)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MachinePool) DeepCopyInto(out *MachinePool) {
*out = *in
if in.Zones != nil {
in, out := &in.Zones, &out.Zones
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.BootVolume != nil {
in, out := &in.BootVolume, &out.BootVolume
*out = new(BootVolume)
**out = **in
}
if in.DedicatedHosts != nil {
in, out := &in.DedicatedHosts, &out.DedicatedHosts
*out = make([]DedicatedHost, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool.
func (in *MachinePool) DeepCopy() *MachinePool {
if in == nil {
return nil
}
out := new(MachinePool)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Platform) DeepCopyInto(out *Platform) {
*out = *in
out.CredentialsSecretRef = in.CredentialsSecretRef
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform.
func (in *Platform) DeepCopy() *Platform {
if in == nil {
return nil
}
out := new(Platform)
in.DeepCopyInto(out)
return out
}

206
vendor/github.com/openshift/hive/apis/hive/v1/machinepool_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,206 @@
package v1
import (
"github.com/openshift/hive/apis/hive/v1/alibabacloud"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/openshift/hive/apis/hive/v1/aws"
"github.com/openshift/hive/apis/hive/v1/azure"
"github.com/openshift/hive/apis/hive/v1/gcp"
"github.com/openshift/hive/apis/hive/v1/ibmcloud"
"github.com/openshift/hive/apis/hive/v1/openstack"
"github.com/openshift/hive/apis/hive/v1/ovirt"
"github.com/openshift/hive/apis/hive/v1/vsphere"
)
const (
// MachinePoolImageIDOverrideAnnotation can be applied to MachinePools to control the precise image ID to be used
// for the MachineSets we reconcile for this pool. This feature is presently only implemented for AWS, and
// is intended for very limited use cases we do not recommend pursuing regularly. As such it is not currently
// part of our official API.
MachinePoolImageIDOverrideAnnotation = "hive.openshift.io/image-id-override"
)
// MachinePoolSpec defines the desired state of MachinePool
type MachinePoolSpec struct {
// ClusterDeploymentRef references the cluster deployment to which this
// machine pool belongs.
ClusterDeploymentRef corev1.LocalObjectReference `json:"clusterDeploymentRef"`
// Name is the name of the machine pool.
Name string `json:"name"`
// Replicas is the count of machines for this machine pool.
// Replicas and autoscaling cannot be used together.
// Default is 1, if autoscaling is not used.
// +optional
Replicas *int64 `json:"replicas,omitempty"`
// Autoscaling is the details for auto-scaling the machine pool.
// Replicas and autoscaling cannot be used together.
// +optional
Autoscaling *MachinePoolAutoscaling `json:"autoscaling,omitempty"`
// Platform is configuration for machine pool specific to the platform.
Platform MachinePoolPlatform `json:"platform"`
// Map of label string keys and values that will be applied to the created MachineSet's
// MachineSpec. This list will overwrite any modifications made to Node labels on an
// ongoing basis.
// +optional
Labels map[string]string `json:"labels,omitempty"`
// List of taints that will be applied to the created MachineSet's MachineSpec.
// This list will overwrite any modifications made to Node taints on an ongoing basis.
// +optional
Taints []corev1.Taint `json:"taints,omitempty"`
}
// MachinePoolAutoscaling details how the machine pool is to be auto-scaled.
type MachinePoolAutoscaling struct {
// MinReplicas is the minimum number of replicas for the machine pool.
MinReplicas int32 `json:"minReplicas"`
// MaxReplicas is the maximum number of replicas for the machine pool.
MaxReplicas int32 `json:"maxReplicas"`
}
// MachinePoolPlatform is the platform-specific configuration for a machine
// pool. Only one of the platforms should be set.
type MachinePoolPlatform struct {
// AlibabaCloud is the configuration used when installing on Alibaba Cloud.
AlibabaCloud *alibabacloud.MachinePool `json:"alibabacloud,omitempty"`
// AWS is the configuration used when installing on AWS.
AWS *aws.MachinePoolPlatform `json:"aws,omitempty"`
// Azure is the configuration used when installing on Azure.
Azure *azure.MachinePool `json:"azure,omitempty"`
// GCP is the configuration used when installing on GCP.
GCP *gcp.MachinePool `json:"gcp,omitempty"`
// OpenStack is the configuration used when installing on OpenStack.
OpenStack *openstack.MachinePool `json:"openstack,omitempty"`
// VSphere is the configuration used when installing on vSphere
VSphere *vsphere.MachinePool `json:"vsphere,omitempty"`
// Ovirt is the configuration used when installing on oVirt.
Ovirt *ovirt.MachinePool `json:"ovirt,omitempty"`
// IBMCloud is the configuration used when installing on IBM Cloud.
IBMCloud *ibmcloud.MachinePool `json:"ibmcloud,omitempty"`
}
// MachinePoolStatus defines the observed state of MachinePool
type MachinePoolStatus struct {
// Replicas is the current number of replicas for the machine pool.
// +optional
Replicas int32 `json:"replicas,omitempty"`
// MachineSets is the status of the machine sets for the machine pool on the remote cluster.
MachineSets []MachineSetStatus `json:"machineSets,omitempty"`
// Conditions includes more detailed status for the cluster deployment
// +optional
Conditions []MachinePoolCondition `json:"conditions,omitempty"`
}
// MachineSetStatus is the status of a machineset in the remote cluster.
type MachineSetStatus struct {
// Name is the name of the machine set.
Name string `json:"name"`
// Replicas is the current number of replicas for the machine set.
Replicas int32 `json:"replicas"`
// The number of ready replicas for this MachineSet. A machine is considered ready
// when the node has been created and is "Ready". It is transferred as-is from the
// MachineSet from remote cluster.
// +optional
ReadyReplicas int32 `json:"readyReplicas,omitempty"`
// MinReplicas is the minimum number of replicas for the machine set.
MinReplicas int32 `json:"minReplicas"`
// MaxReplicas is the maximum number of replicas for the machine set.
MaxReplicas int32 `json:"maxReplicas"`
// In the event that there is a terminal problem reconciling the
// replicas, both ErrorReason and ErrorMessage will be set. ErrorReason
// will be populated with a succinct value suitable for machine
// interpretation, while ErrorMessage will contain a more verbose
// string suitable for logging and human consumption.
// +optional
ErrorReason *string `json:"errorReason,omitempty"`
// +optional
ErrorMessage *string `json:"errorMessage,omitempty"`
}
// MachinePoolCondition contains details for the current condition of a machine pool
type MachinePoolCondition struct {
// Type is the type of the condition.
Type MachinePoolConditionType `json:"type"`
// Status is the status of the condition.
Status corev1.ConditionStatus `json:"status"`
// LastProbeTime is the last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
// LastTransitionTime is the last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
// Reason is a unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty"`
}
// MachinePoolConditionType is a valid value for MachinePoolCondition.Type
type MachinePoolConditionType string
const (
// NotEnoughReplicasMachinePoolCondition is true when the minReplicas field
// is set too low for the number of machinesets for the machine pool.
NotEnoughReplicasMachinePoolCondition MachinePoolConditionType = "NotEnoughReplicas"
// NoMachinePoolNameLeasesAvailable is true when the cloud provider requires a name lease for the in-cluster MachineSet, but no
// leases are available.
NoMachinePoolNameLeasesAvailable MachinePoolConditionType = "NoMachinePoolNameLeasesAvailable"
// InvalidSubnetsMachinePoolCondition is true when there are missing or invalid entries in the subnet field
InvalidSubnetsMachinePoolCondition MachinePoolConditionType = "InvalidSubnets"
// UnsupportedConfigurationMachinePoolCondition is true when the configuration of the MachinePool is unsupported
// by the cluster.
UnsupportedConfigurationMachinePoolCondition MachinePoolConditionType = "UnsupportedConfiguration"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// MachinePool is the Schema for the machinepools API
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas
// +kubebuilder:printcolumn:name="PoolName",type="string",JSONPath=".spec.name"
// +kubebuilder:printcolumn:name="ClusterDeployment",type="string",JSONPath=".spec.clusterDeploymentRef.name"
// +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas"
// +kubebuilder:resource:path=machinepools,scope=Namespaced
type MachinePool struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec MachinePoolSpec `json:"spec,omitempty"`
Status MachinePoolStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// MachinePoolList contains a list of MachinePool
type MachinePoolList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []MachinePool `json:"items"`
}
func init() {
SchemeBuilder.Register(&MachinePool{}, &MachinePoolList{})
}

46
vendor/github.com/openshift/hive/apis/hive/v1/machinepoolnamelease_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,46 @@
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// MachinePoolNameLeaseSpec is a minimal resource for obtaining unique machine pool names of a limited length.
type MachinePoolNameLeaseSpec struct {
}
// MachinePoolNameLeaseStatus defines the observed state of MachinePoolNameLease.
type MachinePoolNameLeaseStatus struct {
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// MachinePoolNameLease is the Schema for the MachinePoolNameLeases API. This resource is mostly empty
// as we're primarily relying on the name to determine if a lease is available.
// Note that not all cloud providers require the use of a lease for naming, at present this
// is only required for GCP where we're extremely restricted on name lengths.
// +k8s:openapi-gen=true
// +kubebuilder:printcolumn:name="MachinePool",type="string",JSONPath=".metadata.labels.hive\\.openshift\\.io/machine-pool-name"
// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.hive\\.openshift\\.io/cluster-deployment-name"
// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
// +kubebuilder:resource:scope=Namespaced
type MachinePoolNameLease struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec MachinePoolNameLeaseSpec `json:"spec,omitempty"`
Status MachinePoolNameLeaseStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// MachinePoolNameLeaseList contains a list of MachinePoolNameLeases.
type MachinePoolNameLeaseList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []MachinePoolNameLease `json:"items"`
}
func init() {
SchemeBuilder.Register(&MachinePoolNameLease{}, &MachinePoolNameLeaseList{})
}

12
vendor/github.com/openshift/hive/apis/hive/v1/metaruntimeobject.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,12 @@
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// MetaRuntimeObject allows for the generic specification of hive objects since all hive objects implement both the meta and runtime object interfaces.
type MetaRuntimeObject interface {
metav1.Object
runtime.Object
}

4
vendor/github.com/openshift/hive/apis/hive/v1/metricsconfig/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
// Package metricsconfig contains API Schema definitions for configurations specific to metrics controller.
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/hive/apis/hive
package metricsconfig

41
vendor/github.com/openshift/hive/apis/hive/v1/metricsconfig/durationMetrics.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,41 @@
package metricsconfig
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// MetricsWithDuration represents metrics that report time as values,like transition seconds.
// The purpose of these metrics should be to track outliers - ensure their duration is not set too low.
type MetricsWithDuration struct {
// Name of the metric. It will correspond to an optional relevant metric in hive
// +kubebuilder:validation:Enum=currentStopping;currentResuming;currentWaitingForCO;cumulativeHibernated;cumulativeResumed
Name DurationMetricType `json:"name"`
// Duration is the minimum time taken - the relevant metric will be logged only if the value reported by that metric
// is more than the time mentioned here. For example, if a user opts-in for current clusters stopping and mentions
// 1 hour here, only the clusters stopping for more than an hour will be reported.
// This is a Duration value; see https://pkg.go.dev/time#ParseDuration for accepted formats.
// +kubebuilder:validation:Type=string
// +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$"
Duration *metav1.Duration `json:"duration"`
}
// DurationMetricType is a valid value for MetricsWithDuration.Name
type DurationMetricType string
const (
// Metrics logged per cluster
// CurrentStopping corresponds to hive_cluster_deployments_stopping_seconds
CurrentStopping DurationMetricType = "currentStopping"
// CurrentResuming corresponds to hive_cluster_deployments_resuming_seconds
CurrentResuming DurationMetricType = "currentResuming"
// CurrentWaitingForCO corresponds to hive_cluster_deployments_waiting_for_cluster_operators_seconds
CurrentWaitingForCO DurationMetricType = "currentWaitingForCO"
// CurrentClusterSyncFailing corresponds to hive_clustersync_failing_seconds
CurrentClusterSyncFailing DurationMetricType = "currentClusterSyncFailing"
// These metrics will not be cleared and can potentially blow up the cardinality
// CumulativeHibernated corresponds to hive_cluster_deployment_hibernation_transition_seconds
CumulativeHibernated DurationMetricType = "cumulativeHibernated"
// CumulativeResumed corresponds to hive_cluster_deployment_running_transition_seconds
CumulativeResumed DurationMetricType = "cumulativeResumed"
)

7
vendor/github.com/openshift/hive/apis/hive/v1/metricsconfig/metricsConfig.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,7 @@
package metricsconfig
type MetricsConfig struct {
// Optional metrics and their configurations
// +optional
MetricsWithDuration []MetricsWithDuration `json:"metricsWithDuration"`
}

54
vendor/github.com/openshift/hive/apis/hive/v1/metricsconfig/zz_generated.deepcopy.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,54 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package metricsconfig
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricsConfig) DeepCopyInto(out *MetricsConfig) {
*out = *in
if in.MetricsWithDuration != nil {
in, out := &in.MetricsWithDuration, &out.MetricsWithDuration
*out = make([]MetricsWithDuration, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsConfig.
func (in *MetricsConfig) DeepCopy() *MetricsConfig {
if in == nil {
return nil
}
out := new(MetricsConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricsWithDuration) DeepCopyInto(out *MetricsWithDuration) {
*out = *in
if in.Duration != nil {
in, out := &in.Duration, &out.Duration
*out = new(v1.Duration)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsWithDuration.
func (in *MetricsWithDuration) DeepCopy() *MetricsWithDuration {
if in == nil {
return nil
}
out := new(MetricsWithDuration)
in.DeepCopyInto(out)
return out
}

4
vendor/github.com/openshift/hive/apis/hive/v1/none/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
// Package none contains API Schema definitions for platform-agnostic installations.
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/hive/apis/hive
package none

6
vendor/github.com/openshift/hive/apis/hive/v1/none/platform.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,6 @@
package none
// Platform defines agent based install configuration for platform-agnostic clusters.
// Can only be used with spec.installStrategy.agent.
type Platform struct {
}

22
vendor/github.com/openshift/hive/apis/hive/v1/none/zz_generated.deepcopy.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,22 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package none
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Platform) DeepCopyInto(out *Platform) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform.
func (in *Platform) DeepCopy() *Platform {
if in == nil {
return nil
}
out := new(Platform)
in.DeepCopyInto(out)
return out
}

4
vendor/github.com/openshift/hive/apis/hive/v1/openstack/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
// Package openstack contains API Schema definitions for OpenStack clusters.
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/hive/apis/hive
package openstack

46
vendor/github.com/openshift/hive/apis/hive/v1/openstack/machinepools.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,46 @@
package openstack
// MachinePool stores the configuration for a machine pool installed
// on OpenStack.
type MachinePool struct {
// Flavor defines the OpenStack Nova flavor.
// eg. m1.large
// The json key here differs from the installer which uses both "computeFlavor" and type "type" depending on which
// type you're looking at, and the resulting field on the MachineSet is "flavor". We are opting to stay consistent
// with the end result.
Flavor string `json:"flavor"`
// RootVolume defines the root volume for instances in the machine pool.
// The instances use ephemeral disks if not set.
// +optional
RootVolume *RootVolume `json:"rootVolume,omitempty"`
}
// Set sets the values from `required` to `a`.
func (o *MachinePool) Set(required *MachinePool) {
if required == nil || o == nil {
return
}
if required.Flavor != "" {
o.Flavor = required.Flavor
}
if required.RootVolume != nil {
if o.RootVolume == nil {
o.RootVolume = new(RootVolume)
}
o.RootVolume.Size = required.RootVolume.Size
o.RootVolume.Type = required.RootVolume.Type
}
}
// RootVolume defines the storage for an instance.
type RootVolume struct {
// Size defines the size of the volume in gibibytes (GiB).
// Required
Size int `json:"size"`
// Type defines the type of the volume.
// Required
Type string `json:"type"`
}

42
vendor/github.com/openshift/hive/apis/hive/v1/openstack/platform.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,42 @@
package openstack
import (
corev1 "k8s.io/api/core/v1"
)
// Platform stores all the global OpenStack configuration
type Platform struct {
// CredentialsSecretRef refers to a secret that contains the OpenStack account access
// credentials.
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
// CertificatesSecretRef refers to a secret that contains CA certificates
// necessary for communicating with the OpenStack.
// There is additional configuration required for the OpenShift cluster to trust
// the certificates provided in this secret.
// The "clouds.yaml" file included in the credentialsSecretRef Secret must also include
// a reference to the certificate bundle file for the OpenShift cluster being created to
// trust the OpenStack endpoints.
// The "clouds.yaml" file must set the "cacert" field to
// either "/etc/openstack-ca/<key name containing the trust bundle in credentialsSecretRef Secret>" or
// "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem".
//
// For example,
// """clouds.yaml
// clouds:
// shiftstack:
// auth: ...
// cacert: "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem"
// """
//
// +optional
CertificatesSecretRef *corev1.LocalObjectReference `json:"certificatesSecretRef,omitempty"`
// Cloud will be used to indicate the OS_CLOUD value to use the right section
// from the clouds.yaml in the CredentialsSecretRef.
Cloud string `json:"cloud"`
// TrunkSupport indicates whether or not to use trunk ports in your OpenShift cluster.
// +optional
TrunkSupport bool `json:"trunkSupport,omitempty"`
}

69
vendor/github.com/openshift/hive/apis/hive/v1/openstack/zz_generated.deepcopy.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,69 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package openstack
import (
v1 "k8s.io/api/core/v1"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MachinePool) DeepCopyInto(out *MachinePool) {
*out = *in
if in.RootVolume != nil {
in, out := &in.RootVolume, &out.RootVolume
*out = new(RootVolume)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool.
func (in *MachinePool) DeepCopy() *MachinePool {
if in == nil {
return nil
}
out := new(MachinePool)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Platform) DeepCopyInto(out *Platform) {
*out = *in
out.CredentialsSecretRef = in.CredentialsSecretRef
if in.CertificatesSecretRef != nil {
in, out := &in.CertificatesSecretRef, &out.CertificatesSecretRef
*out = new(v1.LocalObjectReference)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform.
func (in *Platform) DeepCopy() *Platform {
if in == nil {
return nil
}
out := new(Platform)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RootVolume) DeepCopyInto(out *RootVolume) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootVolume.
func (in *RootVolume) DeepCopy() *RootVolume {
if in == nil {
return nil
}
out := new(RootVolume)
in.DeepCopyInto(out)
return out
}

4
vendor/github.com/openshift/hive/apis/hive/v1/ovirt/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
// Package ovirt contains ovirt-specific structures for
// installer configuration and management.
// +k8s:deepcopy-gen=package,register
package ovirt

61
vendor/github.com/openshift/hive/apis/hive/v1/ovirt/machinepool.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,61 @@
package ovirt
// MachinePool stores the configuration for a machine pool installed
// on ovirt.
type MachinePool struct {
// CPU defines the VM CPU.
// +optional
CPU *CPU `json:"cpu,omitempty"`
// MemoryMB is the size of a VM's memory in MiBs.
// +optional
MemoryMB int32 `json:"memoryMB,omitempty"`
// OSDisk is the the root disk of the node.
// +optional
OSDisk *Disk `json:"osDisk,omitempty"`
// VMType defines the workload type of the VM.
// +kubebuilder:validation:Enum="";desktop;server;high_performance
// +optional
VMType VMType `json:"vmType,omitempty"`
}
// CPU defines the VM cpu, made of (Sockets * Cores).
type CPU struct {
// Sockets is the number of sockets for a VM.
// Total CPUs is (Sockets * Cores)
Sockets int32 `json:"sockets"`
// Cores is the number of cores per socket.
// Total CPUs is (Sockets * Cores)
Cores int32 `json:"cores"`
}
// Disk defines a VM disk
type Disk struct {
// SizeGB size of the bootable disk in GiB.
SizeGB int64 `json:"sizeGB"`
}
// VMType defines the type of the VM, which will change the VM configuration,
// like including or excluding devices (like excluding sound-card),
// device configuration (like using multi-queues for vNic), and several other
// configuration tweaks. This doesn't effect properties like CPU count and amount of memory.
type VMType string
const (
// VMTypeDesktop set the VM type to desktop. Virtual machines optimized to act
// as desktop machines do have a sound card, use an image (thin allocation),
// and are stateless.
VMTypeDesktop VMType = "desktop"
// VMTypeServer sets the VM type to server. Virtual machines optimized to act
// as servers have no sound card, use a cloned disk image, and are not stateless.
VMTypeServer VMType = "server"
// VMTypeHighPerformance sets a VM type to high_performance which sets various
// properties of a VM to optimize for performance, like enabling headless mode,
// disabling usb, smart-card, and sound devices, enabling host cpu pass-through,
// multi-queues for vNics and several more items.
// See https://www.ovirt.org/develop/release-management/features/virt/high-performance-vm.html.
VMTypeHighPerformance VMType = "high_performance"
)

22
vendor/github.com/openshift/hive/apis/hive/v1/ovirt/platform.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,22 @@
package ovirt
import (
corev1 "k8s.io/api/core/v1"
)
// Platform stores all the global oVirt configuration
type Platform struct {
// The target cluster under which all VMs will run
ClusterID string `json:"ovirt_cluster_id"`
// CredentialsSecretRef refers to a secret that contains the oVirt account access
// credentials with fields: ovirt_url, ovirt_username, ovirt_password, ovirt_ca_bundle
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
// CertificatesSecretRef refers to a secret that contains the oVirt CA certificates
// necessary for communicating with oVirt.
CertificatesSecretRef corev1.LocalObjectReference `json:"certificatesSecretRef"`
// The target storage domain under which all VM disk would be created.
StorageDomainID string `json:"storage_domain_id"`
// The target network of all the network interfaces of the nodes. Omitting defaults to ovirtmgmt
// network which is a default network for evert ovirt cluster.
NetworkName string `json:"ovirt_network_name,omitempty"`
}

82
vendor/github.com/openshift/hive/apis/hive/v1/ovirt/zz_generated.deepcopy.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,82 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package ovirt
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CPU) DeepCopyInto(out *CPU) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CPU.
func (in *CPU) DeepCopy() *CPU {
if in == nil {
return nil
}
out := new(CPU)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Disk) DeepCopyInto(out *Disk) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Disk.
func (in *Disk) DeepCopy() *Disk {
if in == nil {
return nil
}
out := new(Disk)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MachinePool) DeepCopyInto(out *MachinePool) {
*out = *in
if in.CPU != nil {
in, out := &in.CPU, &out.CPU
*out = new(CPU)
**out = **in
}
if in.OSDisk != nil {
in, out := &in.OSDisk, &out.OSDisk
*out = new(Disk)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool.
func (in *MachinePool) DeepCopy() *MachinePool {
if in == nil {
return nil
}
out := new(MachinePool)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Platform) DeepCopyInto(out *Platform) {
*out = *in
out.CredentialsSecretRef = in.CredentialsSecretRef
out.CertificatesSecretRef = in.CertificatesSecretRef
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform.
func (in *Platform) DeepCopy() *Platform {
if in == nil {
return nil
}
out := new(Platform)
in.DeepCopyInto(out)
return out
}

36
vendor/github.com/openshift/hive/apis/hive/v1/register.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,36 @@
// NOTE: Boilerplate only. Ignore this file.
// Package v1 contains API Schema definitions for the hive v1 API group
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/hive/apis/hive
// +k8s:defaulter-gen=TypeMeta
// +groupName=hive.openshift.io
package v1
import (
"github.com/openshift/hive/apis/scheme"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
// HiveAPIGroup is the group that all hive objects belong to in the API server.
HiveAPIGroup = "hive.openshift.io"
// HiveAPIVersion is the api version that all hive objects are currently at.
HiveAPIVersion = "v1"
// SchemeGroupVersion is group version used to register these objects
SchemeGroupVersion = schema.GroupVersion{Group: HiveAPIGroup, Version: HiveAPIVersion}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
// AddToScheme is a shortcut for SchemeBuilder.AddToScheme
AddToScheme = SchemeBuilder.AddToScheme
)
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}

98
vendor/github.com/openshift/hive/apis/hive/v1/syncidentityprovider_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,98 @@
package v1
import (
openshiftapiv1 "github.com/openshift/api/config/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// SyncIdentityProviderCommonSpec defines the identity providers to sync
type SyncIdentityProviderCommonSpec struct {
//IdentityProviders is an ordered list of ways for a user to identify themselves
// +required
IdentityProviders []openshiftapiv1.IdentityProvider `json:"identityProviders"`
}
// SelectorSyncIdentityProviderSpec defines the SyncIdentityProviderCommonSpec to sync to
// ClusterDeploymentSelector indicating which clusters the SelectorSyncIdentityProvider applies
// to in any namespace.
type SelectorSyncIdentityProviderSpec struct {
SyncIdentityProviderCommonSpec `json:",inline"`
// ClusterDeploymentSelector is a LabelSelector indicating which clusters the SelectorIdentityProvider
// applies to in any namespace.
// +optional
ClusterDeploymentSelector metav1.LabelSelector `json:"clusterDeploymentSelector,omitempty"`
}
// SyncIdentityProviderSpec defines the SyncIdentityProviderCommonSpec identity providers to sync along with
// ClusterDeploymentRefs indicating which clusters the SyncIdentityProvider applies to in the
// SyncIdentityProvider's namespace.
type SyncIdentityProviderSpec struct {
SyncIdentityProviderCommonSpec `json:",inline"`
// ClusterDeploymentRefs is the list of LocalObjectReference indicating which clusters the
// SyncSet applies to in the SyncSet's namespace.
// +required
ClusterDeploymentRefs []corev1.LocalObjectReference `json:"clusterDeploymentRefs"`
}
// IdentityProviderStatus defines the observed state of SyncSet
type IdentityProviderStatus struct {
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SelectorSyncIdentityProvider is the Schema for the SelectorSyncSet API
// +k8s:openapi-gen=true
// +kubebuilder:resource:scope=Cluster
type SelectorSyncIdentityProvider struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec SelectorSyncIdentityProviderSpec `json:"spec,omitempty"`
Status IdentityProviderStatus `json:"status,omitempty"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SyncIdentityProvider is the Schema for the SyncIdentityProvider API
// +k8s:openapi-gen=true
// +kubebuilder:resource:scope=Namespaced
type SyncIdentityProvider struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec SyncIdentityProviderSpec `json:"spec,omitempty"`
Status IdentityProviderStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SelectorSyncIdentityProviderList contains a list of SelectorSyncIdentityProviders
type SelectorSyncIdentityProviderList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []SelectorSyncIdentityProvider `json:"items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SyncIdentityProviderList contains a list of SyncIdentityProviders
type SyncIdentityProviderList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []SyncIdentityProvider `json:"items"`
}
func init() {
SchemeBuilder.Register(
&SyncIdentityProvider{},
&SyncIdentityProviderList{},
&SelectorSyncIdentityProvider{},
&SelectorSyncIdentityProviderList{},
)
}

326
vendor/github.com/openshift/hive/apis/hive/v1/syncset_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,326 @@
package v1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
)
// SyncSetResourceApplyMode is a string representing the mode with which to
// apply SyncSet Resources.
type SyncSetResourceApplyMode string
const (
// UpsertResourceApplyMode indicates that objects will be updated
// or inserted (created).
UpsertResourceApplyMode SyncSetResourceApplyMode = "Upsert"
// SyncResourceApplyMode inherits the create or update functionality
// of Upsert but also indicates that objects will be deleted if created
// previously and detected missing from defined Resources in the SyncSet.
SyncResourceApplyMode SyncSetResourceApplyMode = "Sync"
)
// SyncSetApplyBehavior is a string representing the behavior to use when
// aplying a syncset to target cluster.
// +kubebuilder:validation:Enum="";Apply;CreateOnly;CreateOrUpdate
type SyncSetApplyBehavior string
const (
// ApplySyncSetApplyBehavior is the default apply behavior. It will result
// in resources getting applied using the 'oc apply' command to the target
// cluster.
ApplySyncSetApplyBehavior SyncSetApplyBehavior = "Apply"
// CreateOnlySyncSetApplyBehavior results in resources only getting created
// if they do not exist, otherwise they are left alone.
CreateOnlySyncSetApplyBehavior SyncSetApplyBehavior = "CreateOnly"
// CreateOrUpdateSyncSetApplyBehavior results in resources getting created if
// they do not exist, otherwise they are updated with the contents of the
// syncset resource. This is different from Apply behavior in that an annotation
// is not added to the target resource with the "lastApplied" value. It allows
// for syncing larger resources, but loses the ability to sync map entry deletes.
CreateOrUpdateSyncSetApplyBehavior SyncSetApplyBehavior = "CreateOrUpdate"
)
// SyncSetPatchApplyMode is a string representing the mode with which to apply
// SyncSet Patches.
type SyncSetPatchApplyMode string
const (
// ApplyOncePatchApplyMode indicates that the patch should be applied
// only once.
ApplyOncePatchApplyMode SyncSetPatchApplyMode = "ApplyOnce"
// AlwaysApplyPatchApplyMode indicates that the patch should be
// continuously applied.
AlwaysApplyPatchApplyMode SyncSetPatchApplyMode = "AlwaysApply"
)
// SyncObjectPatch represents a patch to be applied to a specific object
type SyncObjectPatch struct {
// APIVersion is the Group and Version of the object to be patched.
APIVersion string `json:"apiVersion"`
// Kind is the Kind of the object to be patched.
Kind string `json:"kind"`
// Name is the name of the object to be patched.
Name string `json:"name"`
// Namespace is the Namespace in which the object to patch exists.
// Defaults to the SyncSet's Namespace.
// +optional
Namespace string `json:"namespace,omitempty"`
// Patch is the patch to apply.
Patch string `json:"patch"`
// PatchType indicates the PatchType as "strategic" (default), "json", or "merge".
// +optional
PatchType string `json:"patchType,omitempty"`
}
// SecretReference is a reference to a secret by name and namespace
type SecretReference struct {
// Name is the name of the secret
Name string `json:"name"`
// Namespace is the namespace where the secret lives. If not present for the source
// secret reference, it is assumed to be the same namespace as the syncset with the
// reference.
// +optional
Namespace string `json:"namespace,omitempty"`
}
// SecretMapping defines a source and destination for a secret to be synced by a SyncSet
type SecretMapping struct {
// SourceRef specifies the name and namespace of a secret on the management cluster
SourceRef SecretReference `json:"sourceRef"`
// TargetRef specifies the target name and namespace of the secret on the target cluster
TargetRef SecretReference `json:"targetRef"`
}
// SyncConditionType is a valid value for SyncCondition.Type
type SyncConditionType string
const (
// ApplySuccessSyncCondition indicates whether the resource or patch has been applied.
ApplySuccessSyncCondition SyncConditionType = "ApplySuccess"
// ApplyFailureSyncCondition indicates that a resource or patch has failed to apply.
// It should include a reason and message for the failure.
ApplyFailureSyncCondition SyncConditionType = "ApplyFailure"
// DeletionFailedSyncCondition indicates that resource deletion has failed.
// It should include a reason and message for the failure.
DeletionFailedSyncCondition SyncConditionType = "DeletionFailed"
// UnknownObjectSyncCondition indicates that the resource type cannot be determined.
// It should include a reason and message for the failure.
UnknownObjectSyncCondition SyncConditionType = "UnknownObject"
)
// SyncCondition is a condition in a SyncStatus
type SyncCondition struct {
// Type is the type of the condition.
Type SyncConditionType `json:"type"`
// Status is the status of the condition.
Status corev1.ConditionStatus `json:"status"`
// LastProbeTime is the last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
// LastTransitionTime is the last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
// Reason is a unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human-readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty"`
}
// SyncSetObjectStatus describes the status of resources created or patches that have
// been applied from a SyncSet or SelectorSyncSet.
type SyncSetObjectStatus struct {
// Name is the name of the SyncSet.
Name string `json:"name"`
// Resources is the list of SyncStatus for objects that have been synced.
// +optional
Resources []SyncStatus `json:"resources,omitempty"`
// ResourceApplyMode indicates if the Resource apply mode is "Upsert" (default) or "Sync".
// ApplyMode "Upsert" indicates create and update.
// ApplyMode "Sync" indicates create, update and delete.
// +optional
ResourceApplyMode SyncSetResourceApplyMode `json:"resourceApplyMode,omitempty"`
// Patches is the list of SyncStatus for patches that have been applied.
// +optional
Patches []SyncStatus `json:"patches,omitempty"`
// Secrets is the list of SyncStatus for secrets that have been synced.
// +optional
Secrets []SyncStatus `json:"secrets,omitempty"`
// Conditions is the list of SyncConditions used to indicate UnknownObject
// when a resource type cannot be determined from a SyncSet resource.
// +optional
Conditions []SyncCondition `json:"conditions,omitempty"`
}
// SyncStatus describes objects that have been created or patches that
// have been applied using the unique md5 sum of the object or patch.
type SyncStatus struct {
// APIVersion is the Group and Version of the object that was synced or
// patched.
APIVersion string `json:"apiVersion"`
// Kind is the Kind of the object that was synced or patched.
Kind string `json:"kind"`
// Resource is the resource name for the object that was synced.
// This will be populated for resources, but not patches
// +optional
Resource string `json:"resource,omitempty"`
// Name is the name of the object that was synced or patched.
Name string `json:"name"`
// Namespace is the Namespace of the object that was synced or patched.
Namespace string `json:"namespace"`
// Hash is the unique md5 hash of the resource or patch.
Hash string `json:"hash"`
// Conditions is the list of conditions indicating success or failure of object
// create, update and delete as well as patch application.
Conditions []SyncCondition `json:"conditions"`
}
// SyncSetCommonSpec defines the resources and patches to sync
type SyncSetCommonSpec struct {
// Resources is the list of objects to sync from RawExtension definitions.
// +optional
Resources []runtime.RawExtension `json:"resources,omitempty"`
// ResourceApplyMode indicates if the Resource apply mode is "Upsert" (default) or "Sync".
// ApplyMode "Upsert" indicates create and update.
// ApplyMode "Sync" indicates create, update and delete.
// +optional
ResourceApplyMode SyncSetResourceApplyMode `json:"resourceApplyMode,omitempty"`
// Patches is the list of patches to apply.
// +optional
Patches []SyncObjectPatch `json:"patches,omitempty"`
// Secrets is the list of secrets to sync along with their respective destinations.
// +optional
Secrets []SecretMapping `json:"secretMappings,omitempty"`
// ApplyBehavior indicates how resources in this syncset will be applied to the target
// cluster. The default value of "Apply" indicates that resources should be applied
// using the 'oc apply' command. If no value is set, "Apply" is assumed.
// A value of "CreateOnly" indicates that the resource will only be created if it does
// not already exist in the target cluster. Otherwise, it will be left alone.
// A value of "CreateOrUpdate" indicates that the resource will be created/updated without
// the use of the 'oc apply' command, allowing larger resources to be synced, but losing
// some functionality of the 'oc apply' command such as the ability to remove annotations,
// labels, and other map entries in general.
// +optional
ApplyBehavior SyncSetApplyBehavior `json:"applyBehavior,omitempty"`
}
// SelectorSyncSetSpec defines the SyncSetCommonSpec resources and patches to sync along
// with a ClusterDeploymentSelector indicating which clusters the SelectorSyncSet applies
// to in any namespace.
type SelectorSyncSetSpec struct {
SyncSetCommonSpec `json:",inline"`
// ClusterDeploymentSelector is a LabelSelector indicating which clusters the SelectorSyncSet
// applies to in any namespace.
// +optional
ClusterDeploymentSelector metav1.LabelSelector `json:"clusterDeploymentSelector,omitempty"`
}
// SyncSetSpec defines the SyncSetCommonSpec resources and patches to sync along with
// ClusterDeploymentRefs indicating which clusters the SyncSet applies to in the
// SyncSet's namespace.
type SyncSetSpec struct {
SyncSetCommonSpec `json:",inline"`
// ClusterDeploymentRefs is the list of LocalObjectReference indicating which clusters the
// SyncSet applies to in the SyncSet's namespace.
// +required
ClusterDeploymentRefs []corev1.LocalObjectReference `json:"clusterDeploymentRefs"`
}
// SyncSetStatus defines the observed state of a SyncSet
type SyncSetStatus struct {
}
// SelectorSyncSetStatus defines the observed state of a SelectorSyncSet
type SelectorSyncSetStatus struct {
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SelectorSyncSet is the Schema for the SelectorSyncSet API
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=selectorsyncsets,shortName=sss,scope=Cluster
type SelectorSyncSet struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec SelectorSyncSetSpec `json:"spec,omitempty"`
Status SelectorSyncSetStatus `json:"status,omitempty"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SyncSet is the Schema for the SyncSet API
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=syncsets,shortName=ss,scope=Namespaced
type SyncSet struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec SyncSetSpec `json:"spec,omitempty"`
Status SyncSetStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SelectorSyncSetList contains a list of SyncSets
type SelectorSyncSetList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []SelectorSyncSet `json:"items"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// SyncSetList contains a list of SyncSets
type SyncSetList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []SyncSet `json:"items"`
}
func init() {
SchemeBuilder.Register(
&SyncSet{},
&SyncSetList{},
&SelectorSyncSet{},
&SelectorSyncSetList{},
)
}

3
vendor/github.com/openshift/hive/apis/hive/v1/vsphere/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,3 @@
// Package vsphere contains contains API Schema definitions for vSphere clusters.
// +k8s:deepcopy-gen=package,register
package vsphere

24
vendor/github.com/openshift/hive/apis/hive/v1/vsphere/machinepools.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,24 @@
package vsphere
// MachinePool stores the configuration for a machine pool installed
// on vSphere.
type MachinePool struct {
// NumCPUs is the total number of virtual processor cores to assign a vm.
NumCPUs int32 `json:"cpus"`
// NumCoresPerSocket is the number of cores per socket in a vm. The number
// of vCPUs on the vm will be NumCPUs/NumCoresPerSocket.
NumCoresPerSocket int32 `json:"coresPerSocket"`
// Memory is the size of a VM's memory in MB.
MemoryMiB int64 `json:"memoryMB"`
// OSDisk defines the storage for instance.
OSDisk `json:"osDisk"`
}
// OSDisk defines the disk for a virtual machine.
type OSDisk struct {
// DiskSizeGB defines the size of disk in GB.
DiskSizeGB int32 `json:"diskSizeGB"`
}

35
vendor/github.com/openshift/hive/apis/hive/v1/vsphere/platform.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,35 @@
package vsphere
import (
corev1 "k8s.io/api/core/v1"
)
// Platform stores any global configuration used for vSphere platforms.
type Platform struct {
// VCenter is the domain name or IP address of the vCenter.
VCenter string `json:"vCenter"`
// CredentialsSecretRef refers to a secret that contains the vSphere account access
// credentials: GOVC_USERNAME, GOVC_PASSWORD fields.
CredentialsSecretRef corev1.LocalObjectReference `json:"credentialsSecretRef"`
// CertificatesSecretRef refers to a secret that contains the vSphere CA certificates
// necessary for communicating with the VCenter.
CertificatesSecretRef corev1.LocalObjectReference `json:"certificatesSecretRef"`
// Datacenter is the name of the datacenter to use in the vCenter.
Datacenter string `json:"datacenter"`
// DefaultDatastore is the default datastore to use for provisioning volumes.
DefaultDatastore string `json:"defaultDatastore"`
// Folder is the name of the folder that will be used and/or created for
// virtual machines.
Folder string `json:"folder,omitempty"`
// Cluster is the name of the cluster virtual machines will be cloned into.
Cluster string `json:"cluster,omitempty"`
// Network specifies the name of the network to be used by the cluster.
Network string `json:"network,omitempty"`
}

57
vendor/github.com/openshift/hive/apis/hive/v1/vsphere/zz_generated.deepcopy.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,57 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package vsphere
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MachinePool) DeepCopyInto(out *MachinePool) {
*out = *in
out.OSDisk = in.OSDisk
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePool.
func (in *MachinePool) DeepCopy() *MachinePool {
if in == nil {
return nil
}
out := new(MachinePool)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSDisk) DeepCopyInto(out *OSDisk) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDisk.
func (in *OSDisk) DeepCopy() *OSDisk {
if in == nil {
return nil
}
out := new(OSDisk)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Platform) DeepCopyInto(out *Platform) {
*out = *in
out.CredentialsSecretRef = in.CredentialsSecretRef
out.CertificatesSecretRef = in.CertificatesSecretRef
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Platform.
func (in *Platform) DeepCopy() *Platform {
if in == nil {
return nil
}
out := new(Platform)
in.DeepCopyInto(out)
return out
}

3765
vendor/github.com/openshift/hive/apis/hive/v1/zz_generated.deepcopy.go сгенерированный поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

152
vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/clustersync_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,152 @@
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterSync is the status of all of the SelectorSyncSets and SyncSets that apply to a ClusterDeployment.
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
// +kubebuilder:resource:path=clustersyncs,shortName=csync,scope=Namespaced
// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=`.status.conditions[0].reason`
// +kubebuilder:printcolumn:name="Message",type=string,priority=1,JSONPath=`.status.conditions[?(@.type=="Failed")].message`
type ClusterSync struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ClusterSyncSpec `json:"spec,omitempty"`
Status ClusterSyncStatus `json:"status,omitempty"`
}
// ClusterSyncSpec defines the desired state of ClusterSync
type ClusterSyncSpec struct{}
// ClusterSyncStatus defines the observed state of ClusterSync
type ClusterSyncStatus struct {
// SyncSets is the sync status of all of the SyncSets for the cluster.
// +optional
SyncSets []SyncStatus `json:"syncSets,omitempty"`
// SelectorSyncSets is the sync status of all of the SelectorSyncSets for the cluster.
// +optional
SelectorSyncSets []SyncStatus `json:"selectorSyncSets,omitempty"`
// Conditions is a list of conditions associated with syncing to the cluster.
// +optional
Conditions []ClusterSyncCondition `json:"conditions,omitempty"`
// FirstSuccessTime is the time we first successfully applied all (selector)syncsets to a cluster.
// +optional
FirstSuccessTime *metav1.Time `json:"firstSuccessTime,omitempty"`
// ControlledByReplica indicates which replica of the hive-clustersync StatefulSet is responsible
// for (the CD related to) this clustersync. Note that this value indicates the replica that most
// recently handled the ClusterSync. If the hive-clustersync statefulset is scaled up or down, the
// controlling replica can change, potentially causing logs to be spread across multiple pods.
ControlledByReplica *int64 `json:"controlledByReplica,omitempty"`
}
// SyncStatus is the status of applying a specific SyncSet or SelectorSyncSet to the cluster.
type SyncStatus struct {
// Name is the name of the SyncSet or SelectorSyncSet.
Name string `json:"name"`
// ObservedGeneration is the generation of the SyncSet or SelectorSyncSet that was last observed.
ObservedGeneration int64 `json:"observedGeneration"`
// ResourcesToDelete is the list of resources in the cluster that should be deleted when the SyncSet or SelectorSyncSet
// is deleted or is no longer matched to the cluster.
// +optional
ResourcesToDelete []SyncResourceReference `json:"resourcesToDelete,omitempty"`
// Result is the result of the last attempt to apply the SyncSet or SelectorSyncSet to the cluster.
Result SyncSetResult `json:"result"`
// FailureMessage is a message describing why the SyncSet or SelectorSyncSet could not be applied. This is only
// set when Result is Failure.
// +optional
FailureMessage string `json:"failureMessage,omitempty"`
// LastTransitionTime is the time when this status last changed.
LastTransitionTime metav1.Time `json:"lastTransitionTime"`
// FirstSuccessTime is the time when the SyncSet or SelectorSyncSet was first successfully applied to the cluster.
// +optional
FirstSuccessTime *metav1.Time `json:"firstSuccessTime,omitempty"`
}
// SyncResourceReference is a reference to a resource that is synced to a cluster via a SyncSet or SelectorSyncSet.
type SyncResourceReference struct {
// APIVersion is the Group and Version of the resource.
APIVersion string `json:"apiVersion"`
// Kind is the Kind of the resource.
// +optional
Kind string `json:"kind"`
// Name is the name of the resource.
Name string `json:"name"`
// Namespace is the namespace of the resource.
// +optional
Namespace string `json:"namespace,omitempty"`
}
// SyncSetResult is the result of a sync attempt.
// +kubebuilder:validation:Enum=Success;Failure
type SyncSetResult string
const (
// SuccessSyncSetResult is the result when the SyncSet or SelectorSyncSet was applied successfully to the cluster.
SuccessSyncSetResult SyncSetResult = "Success"
// FailureSyncSetResult is the result when there was an error when attempting to apply the SyncSet or SelectorSyncSet
// to the cluster
FailureSyncSetResult SyncSetResult = "Failure"
)
// ClusterSyncCondition contains details for the current condition of a ClusterSync
type ClusterSyncCondition struct {
// Type is the type of the condition.
Type ClusterSyncConditionType `json:"type"`
// Status is the status of the condition.
Status corev1.ConditionStatus `json:"status"`
// LastProbeTime is the last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime,omitempty"`
// LastTransitionTime is the last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
// Reason is a unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// Message is a human-readable message indicating details about the last transition.
// +optional
Message string `json:"message,omitempty"`
}
// ClusterSyncConditionType is a valid value for ClusterSyncCondition.Type
type ClusterSyncConditionType string
const (
// ClusterSyncFailed is the type of condition used to indicate whether there are SyncSets or SelectorSyncSets which
// have not been applied due to an error.
ClusterSyncFailed ClusterSyncConditionType = "Failed"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterSyncList contains a list of ClusterSync
type ClusterSyncList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ClusterSync `json:"items"`
}
func init() {
SchemeBuilder.Register(&ClusterSync{}, &ClusterSyncList{})
}

37
vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/clustersynclease_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,37 @@
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterSyncLease is a record of the last time that SyncSets and SelectorSyncSets were applied to a cluster.
// +k8s:openapi-gen=true
// +kubebuilder:resource:path=clustersyncleases,shortName=csl,scope=Namespaced
type ClusterSyncLease struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ClusterSyncLeaseSpec `json:"spec,omitempty"`
}
// ClusterSyncLeaseSpec is the specification of a ClusterSyncLease.
type ClusterSyncLeaseSpec struct {
// RenewTime is the time when SyncSets and SelectorSyncSets were last applied to the cluster.
RenewTime metav1.MicroTime `json:"renewTime"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ClusterSyncLeaseList contains a list of ClusterSyncLeases.
type ClusterSyncLeaseList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []ClusterSyncLease `json:"items"`
}
func init() {
SchemeBuilder.Register(&ClusterSyncLease{}, &ClusterSyncLeaseList{})
}

7
vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,7 @@
// Package v1alpha1 contains API Schema definitions for the hiveinternal v1alpha1 API group
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/hive/apis/hiveinternal
// +k8s:defaulter-gen=TypeMeta
// +groupName=hiveinternal.openshift.io
package v1alpha1

56
vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/fakeclusterinstall_types.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,56 @@
package v1alpha1
import (
hivev1 "github.com/openshift/hive/apis/hive/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// FakeClusterInstallSpec defines the desired state of the FakeClusterInstall.
type FakeClusterInstallSpec struct {
// ImageSetRef is a reference to a ClusterImageSet. The release image specified in the ClusterImageSet will be used
// to install the cluster.
ImageSetRef hivev1.ClusterImageSetReference `json:"imageSetRef"`
// ClusterDeploymentRef is a reference to the ClusterDeployment associated with this AgentClusterInstall.
ClusterDeploymentRef corev1.LocalObjectReference `json:"clusterDeploymentRef"`
// ClusterMetadata contains metadata information about the installed cluster. It should be populated once the cluster install is completed. (it can be populated sooner if desired, but Hive will not copy back to ClusterDeployment until the Installed condition goes True.
ClusterMetadata *hivev1.ClusterMetadata `json:"clusterMetadata,omitempty"`
}
// FakeClusterInstallStatus defines the observed state of the FakeClusterInstall.
type FakeClusterInstallStatus struct {
// Conditions includes more detailed status for the cluster install.
// +optional
Conditions []hivev1.ClusterInstallCondition `json:"conditions,omitempty"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// FakeClusterInstall represents a fake request to provision an agent based cluster.
//
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
type FakeClusterInstall struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec FakeClusterInstallSpec `json:"spec"`
Status FakeClusterInstallStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// FakeClusterInstallList contains a list of FakeClusterInstall
type FakeClusterInstallList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []FakeClusterInstall `json:"items"`
}
func init() {
SchemeBuilder.Register(&FakeClusterInstall{}, &FakeClusterInstallList{})
}

36
vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/register.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,36 @@
// NOTE: Boilerplate only. Ignore this file.
// Package v1alpha1 contains API Schema definitions for the hiveinternal v1alpha1 API group
// +k8s:openapi-gen=true
// +k8s:deepcopy-gen=package,register
// +k8s:conversion-gen=github.com/openshift/hive/apis/hiveinternal
// +k8s:defaulter-gen=TypeMeta
// +groupName=hiveinternal.openshift.io
package v1alpha1
import (
"github.com/openshift/hive/apis/scheme"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
// HiveInternalAPIGroup is the group that all hiveinternal objects belong to in the API server.
HiveInternalAPIGroup = "hiveinternal.openshift.io"
// HiveInternalAPIVersion is the api version that all hiveinternal objects are currently at.
HiveInternalAPIVersion = "v1alpha1"
// SchemeGroupVersion is group version used to register these objects
SchemeGroupVersion = schema.GroupVersion{Group: HiveInternalAPIGroup, Version: HiveInternalAPIVersion}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
// AddToScheme is a shortcut for SchemeBuilder.AddToScheme
AddToScheme = SchemeBuilder.AddToScheme
)
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}

378
vendor/github.com/openshift/hive/apis/hiveinternal/v1alpha1/zz_generated.deepcopy.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,378 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
import (
v1 "github.com/openshift/hive/apis/hive/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSync) DeepCopyInto(out *ClusterSync) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSync.
func (in *ClusterSync) DeepCopy() *ClusterSync {
if in == nil {
return nil
}
out := new(ClusterSync)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterSync) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSyncCondition) DeepCopyInto(out *ClusterSyncCondition) {
*out = *in
in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncCondition.
func (in *ClusterSyncCondition) DeepCopy() *ClusterSyncCondition {
if in == nil {
return nil
}
out := new(ClusterSyncCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSyncLease) DeepCopyInto(out *ClusterSyncLease) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncLease.
func (in *ClusterSyncLease) DeepCopy() *ClusterSyncLease {
if in == nil {
return nil
}
out := new(ClusterSyncLease)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterSyncLease) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSyncLeaseList) DeepCopyInto(out *ClusterSyncLeaseList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterSyncLease, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncLeaseList.
func (in *ClusterSyncLeaseList) DeepCopy() *ClusterSyncLeaseList {
if in == nil {
return nil
}
out := new(ClusterSyncLeaseList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterSyncLeaseList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSyncLeaseSpec) DeepCopyInto(out *ClusterSyncLeaseSpec) {
*out = *in
in.RenewTime.DeepCopyInto(&out.RenewTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncLeaseSpec.
func (in *ClusterSyncLeaseSpec) DeepCopy() *ClusterSyncLeaseSpec {
if in == nil {
return nil
}
out := new(ClusterSyncLeaseSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSyncList) DeepCopyInto(out *ClusterSyncList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ClusterSync, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncList.
func (in *ClusterSyncList) DeepCopy() *ClusterSyncList {
if in == nil {
return nil
}
out := new(ClusterSyncList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ClusterSyncList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSyncSpec) DeepCopyInto(out *ClusterSyncSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncSpec.
func (in *ClusterSyncSpec) DeepCopy() *ClusterSyncSpec {
if in == nil {
return nil
}
out := new(ClusterSyncSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ClusterSyncStatus) DeepCopyInto(out *ClusterSyncStatus) {
*out = *in
if in.SyncSets != nil {
in, out := &in.SyncSets, &out.SyncSets
*out = make([]SyncStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.SelectorSyncSets != nil {
in, out := &in.SelectorSyncSets, &out.SelectorSyncSets
*out = make([]SyncStatus, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]ClusterSyncCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.FirstSuccessTime != nil {
in, out := &in.FirstSuccessTime, &out.FirstSuccessTime
*out = (*in).DeepCopy()
}
if in.ControlledByReplica != nil {
in, out := &in.ControlledByReplica, &out.ControlledByReplica
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSyncStatus.
func (in *ClusterSyncStatus) DeepCopy() *ClusterSyncStatus {
if in == nil {
return nil
}
out := new(ClusterSyncStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FakeClusterInstall) DeepCopyInto(out *FakeClusterInstall) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FakeClusterInstall.
func (in *FakeClusterInstall) DeepCopy() *FakeClusterInstall {
if in == nil {
return nil
}
out := new(FakeClusterInstall)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *FakeClusterInstall) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FakeClusterInstallList) DeepCopyInto(out *FakeClusterInstallList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]FakeClusterInstall, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FakeClusterInstallList.
func (in *FakeClusterInstallList) DeepCopy() *FakeClusterInstallList {
if in == nil {
return nil
}
out := new(FakeClusterInstallList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *FakeClusterInstallList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FakeClusterInstallSpec) DeepCopyInto(out *FakeClusterInstallSpec) {
*out = *in
out.ImageSetRef = in.ImageSetRef
out.ClusterDeploymentRef = in.ClusterDeploymentRef
if in.ClusterMetadata != nil {
in, out := &in.ClusterMetadata, &out.ClusterMetadata
*out = new(v1.ClusterMetadata)
(*in).DeepCopyInto(*out)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FakeClusterInstallSpec.
func (in *FakeClusterInstallSpec) DeepCopy() *FakeClusterInstallSpec {
if in == nil {
return nil
}
out := new(FakeClusterInstallSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FakeClusterInstallStatus) DeepCopyInto(out *FakeClusterInstallStatus) {
*out = *in
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]v1.ClusterInstallCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FakeClusterInstallStatus.
func (in *FakeClusterInstallStatus) DeepCopy() *FakeClusterInstallStatus {
if in == nil {
return nil
}
out := new(FakeClusterInstallStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyncResourceReference) DeepCopyInto(out *SyncResourceReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncResourceReference.
func (in *SyncResourceReference) DeepCopy() *SyncResourceReference {
if in == nil {
return nil
}
out := new(SyncResourceReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SyncStatus) DeepCopyInto(out *SyncStatus) {
*out = *in
if in.ResourcesToDelete != nil {
in, out := &in.ResourcesToDelete, &out.ResourcesToDelete
*out = make([]SyncResourceReference, len(*in))
copy(*out, *in)
}
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
if in.FirstSuccessTime != nil {
in, out := &in.FirstSuccessTime, &out.FirstSuccessTime
*out = (*in).DeepCopy()
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SyncStatus.
func (in *SyncStatus) DeepCopy() *SyncStatus {
if in == nil {
return nil
}
out := new(SyncStatus)
in.DeepCopyInto(out)
return out
}

97
vendor/github.com/openshift/hive/apis/scheme/scheme.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,97 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Copied from controller-runtime to avoid a dependency on specific versions
// causing problems with anyone importing hive's api package.
// Package scheme contains utilities for gradually building Schemes,
// which contain information associating Go types with Kubernetes
// groups, versions, and kinds.
//
// Each API group should define a utility function
// called AddToScheme for adding its types to a Scheme:
//
// // in package myapigroupv1...
// var (
// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"}
// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion}
// AddToScheme = SchemeBuilder.AddToScheme
// )
//
// func init() {
// SchemeBuilder.Register(&MyType{}, &MyTypeList)
// }
// var (
// scheme *runtime.Scheme = runtime.NewScheme()
// )
//
// This also true of the built-in Kubernetes types. Then, in the entrypoint for
// your manager, assemble the scheme containing exactly the types you need,
// panicing if scheme registration failed. For instance, if our controller needs
// types from the core/v1 API group (e.g. Pod), plus types from my.api.group/v1:
//
// func init() {
// utilruntime.Must(myapigroupv1.AddToScheme(scheme))
// utilruntime.Must(kubernetesscheme.AddToScheme(scheme))
// }
//
// func main() {
// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{
// Scheme: scheme,
// })
// // ...
// }
//
package scheme
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// Builder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds.
type Builder struct {
GroupVersion schema.GroupVersion
runtime.SchemeBuilder
}
// Register adds one or more objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld.
func (bld *Builder) Register(object ...runtime.Object) *Builder {
bld.SchemeBuilder.Register(func(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(bld.GroupVersion, object...)
metav1.AddToGroupVersion(scheme, bld.GroupVersion)
return nil
})
return bld
}
// RegisterAll registers all types from the Builder argument. RegisterAll mutates bld.
func (bld *Builder) RegisterAll(b *Builder) *Builder {
bld.SchemeBuilder = append(bld.SchemeBuilder, b.SchemeBuilder...)
return bld
}
// AddToScheme adds all registered types to s.
func (bld *Builder) AddToScheme(s *runtime.Scheme) error {
return bld.SchemeBuilder.AddToScheme(s)
}
// Build returns a new Scheme containing the registered types.
func (bld *Builder) Build() (*runtime.Scheme, error) {
s := runtime.NewScheme()
return s, bld.AddToScheme(s)
}

95
vendor/github.com/openshift/hive/pkg/client/clientset/versioned/clientset.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,95 @@
// Code generated by client-gen. DO NOT EDIT.
package versioned
import (
"fmt"
hivev1 "github.com/openshift/hive/pkg/client/clientset/versioned/typed/hive/v1"
hiveinternalv1alpha1 "github.com/openshift/hive/pkg/client/clientset/versioned/typed/hiveinternal/v1alpha1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
)
type Interface interface {
Discovery() discovery.DiscoveryInterface
HiveV1() hivev1.HiveV1Interface
HiveinternalV1alpha1() hiveinternalv1alpha1.HiveinternalV1alpha1Interface
}
// Clientset contains the clients for groups. Each group has exactly one
// version included in a Clientset.
type Clientset struct {
*discovery.DiscoveryClient
hiveV1 *hivev1.HiveV1Client
hiveinternalV1alpha1 *hiveinternalv1alpha1.HiveinternalV1alpha1Client
}
// HiveV1 retrieves the HiveV1Client
func (c *Clientset) HiveV1() hivev1.HiveV1Interface {
return c.hiveV1
}
// HiveinternalV1alpha1 retrieves the HiveinternalV1alpha1Client
func (c *Clientset) HiveinternalV1alpha1() hiveinternalv1alpha1.HiveinternalV1alpha1Interface {
return c.hiveinternalV1alpha1
}
// Discovery retrieves the DiscoveryClient
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
if c == nil {
return nil
}
return c.DiscoveryClient
}
// NewForConfig creates a new Clientset for the given config.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfig will generate a rate-limiter in configShallowCopy.
func NewForConfig(c *rest.Config) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
if configShallowCopy.Burst <= 0 {
return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
}
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
}
var cs Clientset
var err error
cs.hiveV1, err = hivev1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.hiveinternalV1alpha1, err = hiveinternalv1alpha1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
return &cs, nil
}
// NewForConfigOrDie creates a new Clientset for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
var cs Clientset
cs.hiveV1 = hivev1.NewForConfigOrDie(c)
cs.hiveinternalV1alpha1 = hiveinternalv1alpha1.NewForConfigOrDie(c)
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
return &cs
}
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.hiveV1 = hivev1.New(c)
cs.hiveinternalV1alpha1 = hiveinternalv1alpha1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs
}

4
vendor/github.com/openshift/hive/pkg/client/clientset/versioned/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated clientset.
package versioned

4
vendor/github.com/openshift/hive/pkg/client/clientset/versioned/scheme/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
// Code generated by client-gen. DO NOT EDIT.
// This package contains the scheme of the automatically generated clientset.
package scheme

42
vendor/github.com/openshift/hive/pkg/client/clientset/versioned/scheme/register.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,42 @@
// Code generated by client-gen. DO NOT EDIT.
package scheme
import (
hivev1 "github.com/openshift/hive/apis/hive/v1"
hiveinternalv1alpha1 "github.com/openshift/hive/apis/hiveinternal/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
hivev1.AddToScheme,
hiveinternalv1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(Scheme))
}

179
vendor/github.com/openshift/hive/pkg/client/clientset/versioned/typed/hive/v1/checkpoint.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,179 @@
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
"context"
"time"
v1 "github.com/openshift/hive/apis/hive/v1"
scheme "github.com/openshift/hive/pkg/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// CheckpointsGetter has a method to return a CheckpointInterface.
// A group's client should implement this interface.
type CheckpointsGetter interface {
Checkpoints(namespace string) CheckpointInterface
}
// CheckpointInterface has methods to work with Checkpoint resources.
type CheckpointInterface interface {
Create(ctx context.Context, checkpoint *v1.Checkpoint, opts metav1.CreateOptions) (*v1.Checkpoint, error)
Update(ctx context.Context, checkpoint *v1.Checkpoint, opts metav1.UpdateOptions) (*v1.Checkpoint, error)
UpdateStatus(ctx context.Context, checkpoint *v1.Checkpoint, opts metav1.UpdateOptions) (*v1.Checkpoint, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Checkpoint, error)
List(ctx context.Context, opts metav1.ListOptions) (*v1.CheckpointList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Checkpoint, err error)
CheckpointExpansion
}
// checkpoints implements CheckpointInterface
type checkpoints struct {
client rest.Interface
ns string
}
// newCheckpoints returns a Checkpoints
func newCheckpoints(c *HiveV1Client, namespace string) *checkpoints {
return &checkpoints{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the checkpoint, and returns the corresponding checkpoint object, and an error if there is any.
func (c *checkpoints) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Checkpoint, err error) {
result = &v1.Checkpoint{}
err = c.client.Get().
Namespace(c.ns).
Resource("checkpoints").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of Checkpoints that match those selectors.
func (c *checkpoints) List(ctx context.Context, opts metav1.ListOptions) (result *v1.CheckpointList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1.CheckpointList{}
err = c.client.Get().
Namespace(c.ns).
Resource("checkpoints").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested checkpoints.
func (c *checkpoints) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("checkpoints").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a checkpoint and creates it. Returns the server's representation of the checkpoint, and an error, if there is any.
func (c *checkpoints) Create(ctx context.Context, checkpoint *v1.Checkpoint, opts metav1.CreateOptions) (result *v1.Checkpoint, err error) {
result = &v1.Checkpoint{}
err = c.client.Post().
Namespace(c.ns).
Resource("checkpoints").
VersionedParams(&opts, scheme.ParameterCodec).
Body(checkpoint).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a checkpoint and updates it. Returns the server's representation of the checkpoint, and an error, if there is any.
func (c *checkpoints) Update(ctx context.Context, checkpoint *v1.Checkpoint, opts metav1.UpdateOptions) (result *v1.Checkpoint, err error) {
result = &v1.Checkpoint{}
err = c.client.Put().
Namespace(c.ns).
Resource("checkpoints").
Name(checkpoint.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(checkpoint).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *checkpoints) UpdateStatus(ctx context.Context, checkpoint *v1.Checkpoint, opts metav1.UpdateOptions) (result *v1.Checkpoint, err error) {
result = &v1.Checkpoint{}
err = c.client.Put().
Namespace(c.ns).
Resource("checkpoints").
Name(checkpoint.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(checkpoint).
Do(ctx).
Into(result)
return
}
// Delete takes name of the checkpoint and deletes it. Returns an error if one occurs.
func (c *checkpoints) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("checkpoints").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *checkpoints) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("checkpoints").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched checkpoint.
func (c *checkpoints) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Checkpoint, err error) {
result = &v1.Checkpoint{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("checkpoints").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

179
vendor/github.com/openshift/hive/pkg/client/clientset/versioned/typed/hive/v1/clusterclaim.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,179 @@
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
"context"
"time"
v1 "github.com/openshift/hive/apis/hive/v1"
scheme "github.com/openshift/hive/pkg/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// ClusterClaimsGetter has a method to return a ClusterClaimInterface.
// A group's client should implement this interface.
type ClusterClaimsGetter interface {
ClusterClaims(namespace string) ClusterClaimInterface
}
// ClusterClaimInterface has methods to work with ClusterClaim resources.
type ClusterClaimInterface interface {
Create(ctx context.Context, clusterClaim *v1.ClusterClaim, opts metav1.CreateOptions) (*v1.ClusterClaim, error)
Update(ctx context.Context, clusterClaim *v1.ClusterClaim, opts metav1.UpdateOptions) (*v1.ClusterClaim, error)
UpdateStatus(ctx context.Context, clusterClaim *v1.ClusterClaim, opts metav1.UpdateOptions) (*v1.ClusterClaim, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterClaim, error)
List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterClaimList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterClaim, err error)
ClusterClaimExpansion
}
// clusterClaims implements ClusterClaimInterface
type clusterClaims struct {
client rest.Interface
ns string
}
// newClusterClaims returns a ClusterClaims
func newClusterClaims(c *HiveV1Client, namespace string) *clusterClaims {
return &clusterClaims{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the clusterClaim, and returns the corresponding clusterClaim object, and an error if there is any.
func (c *clusterClaims) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterClaim, err error) {
result = &v1.ClusterClaim{}
err = c.client.Get().
Namespace(c.ns).
Resource("clusterclaims").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of ClusterClaims that match those selectors.
func (c *clusterClaims) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterClaimList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1.ClusterClaimList{}
err = c.client.Get().
Namespace(c.ns).
Resource("clusterclaims").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested clusterClaims.
func (c *clusterClaims) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("clusterclaims").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a clusterClaim and creates it. Returns the server's representation of the clusterClaim, and an error, if there is any.
func (c *clusterClaims) Create(ctx context.Context, clusterClaim *v1.ClusterClaim, opts metav1.CreateOptions) (result *v1.ClusterClaim, err error) {
result = &v1.ClusterClaim{}
err = c.client.Post().
Namespace(c.ns).
Resource("clusterclaims").
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterClaim).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a clusterClaim and updates it. Returns the server's representation of the clusterClaim, and an error, if there is any.
func (c *clusterClaims) Update(ctx context.Context, clusterClaim *v1.ClusterClaim, opts metav1.UpdateOptions) (result *v1.ClusterClaim, err error) {
result = &v1.ClusterClaim{}
err = c.client.Put().
Namespace(c.ns).
Resource("clusterclaims").
Name(clusterClaim.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterClaim).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *clusterClaims) UpdateStatus(ctx context.Context, clusterClaim *v1.ClusterClaim, opts metav1.UpdateOptions) (result *v1.ClusterClaim, err error) {
result = &v1.ClusterClaim{}
err = c.client.Put().
Namespace(c.ns).
Resource("clusterclaims").
Name(clusterClaim.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterClaim).
Do(ctx).
Into(result)
return
}
// Delete takes name of the clusterClaim and deletes it. Returns an error if one occurs.
func (c *clusterClaims) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("clusterclaims").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *clusterClaims) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("clusterclaims").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched clusterClaim.
func (c *clusterClaims) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterClaim, err error) {
result = &v1.ClusterClaim{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("clusterclaims").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

179
vendor/github.com/openshift/hive/pkg/client/clientset/versioned/typed/hive/v1/clusterdeployment.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,179 @@
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
"context"
"time"
v1 "github.com/openshift/hive/apis/hive/v1"
scheme "github.com/openshift/hive/pkg/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// ClusterDeploymentsGetter has a method to return a ClusterDeploymentInterface.
// A group's client should implement this interface.
type ClusterDeploymentsGetter interface {
ClusterDeployments(namespace string) ClusterDeploymentInterface
}
// ClusterDeploymentInterface has methods to work with ClusterDeployment resources.
type ClusterDeploymentInterface interface {
Create(ctx context.Context, clusterDeployment *v1.ClusterDeployment, opts metav1.CreateOptions) (*v1.ClusterDeployment, error)
Update(ctx context.Context, clusterDeployment *v1.ClusterDeployment, opts metav1.UpdateOptions) (*v1.ClusterDeployment, error)
UpdateStatus(ctx context.Context, clusterDeployment *v1.ClusterDeployment, opts metav1.UpdateOptions) (*v1.ClusterDeployment, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterDeployment, error)
List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterDeploymentList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterDeployment, err error)
ClusterDeploymentExpansion
}
// clusterDeployments implements ClusterDeploymentInterface
type clusterDeployments struct {
client rest.Interface
ns string
}
// newClusterDeployments returns a ClusterDeployments
func newClusterDeployments(c *HiveV1Client, namespace string) *clusterDeployments {
return &clusterDeployments{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the clusterDeployment, and returns the corresponding clusterDeployment object, and an error if there is any.
func (c *clusterDeployments) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterDeployment, err error) {
result = &v1.ClusterDeployment{}
err = c.client.Get().
Namespace(c.ns).
Resource("clusterdeployments").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of ClusterDeployments that match those selectors.
func (c *clusterDeployments) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterDeploymentList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1.ClusterDeploymentList{}
err = c.client.Get().
Namespace(c.ns).
Resource("clusterdeployments").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested clusterDeployments.
func (c *clusterDeployments) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("clusterdeployments").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a clusterDeployment and creates it. Returns the server's representation of the clusterDeployment, and an error, if there is any.
func (c *clusterDeployments) Create(ctx context.Context, clusterDeployment *v1.ClusterDeployment, opts metav1.CreateOptions) (result *v1.ClusterDeployment, err error) {
result = &v1.ClusterDeployment{}
err = c.client.Post().
Namespace(c.ns).
Resource("clusterdeployments").
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterDeployment).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a clusterDeployment and updates it. Returns the server's representation of the clusterDeployment, and an error, if there is any.
func (c *clusterDeployments) Update(ctx context.Context, clusterDeployment *v1.ClusterDeployment, opts metav1.UpdateOptions) (result *v1.ClusterDeployment, err error) {
result = &v1.ClusterDeployment{}
err = c.client.Put().
Namespace(c.ns).
Resource("clusterdeployments").
Name(clusterDeployment.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterDeployment).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *clusterDeployments) UpdateStatus(ctx context.Context, clusterDeployment *v1.ClusterDeployment, opts metav1.UpdateOptions) (result *v1.ClusterDeployment, err error) {
result = &v1.ClusterDeployment{}
err = c.client.Put().
Namespace(c.ns).
Resource("clusterdeployments").
Name(clusterDeployment.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterDeployment).
Do(ctx).
Into(result)
return
}
// Delete takes name of the clusterDeployment and deletes it. Returns an error if one occurs.
func (c *clusterDeployments) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("clusterdeployments").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *clusterDeployments) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("clusterdeployments").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched clusterDeployment.
func (c *clusterDeployments) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterDeployment, err error) {
result = &v1.ClusterDeployment{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("clusterdeployments").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

179
vendor/github.com/openshift/hive/pkg/client/clientset/versioned/typed/hive/v1/clusterdeprovision.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,179 @@
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
"context"
"time"
v1 "github.com/openshift/hive/apis/hive/v1"
scheme "github.com/openshift/hive/pkg/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// ClusterDeprovisionsGetter has a method to return a ClusterDeprovisionInterface.
// A group's client should implement this interface.
type ClusterDeprovisionsGetter interface {
ClusterDeprovisions(namespace string) ClusterDeprovisionInterface
}
// ClusterDeprovisionInterface has methods to work with ClusterDeprovision resources.
type ClusterDeprovisionInterface interface {
Create(ctx context.Context, clusterDeprovision *v1.ClusterDeprovision, opts metav1.CreateOptions) (*v1.ClusterDeprovision, error)
Update(ctx context.Context, clusterDeprovision *v1.ClusterDeprovision, opts metav1.UpdateOptions) (*v1.ClusterDeprovision, error)
UpdateStatus(ctx context.Context, clusterDeprovision *v1.ClusterDeprovision, opts metav1.UpdateOptions) (*v1.ClusterDeprovision, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterDeprovision, error)
List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterDeprovisionList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterDeprovision, err error)
ClusterDeprovisionExpansion
}
// clusterDeprovisions implements ClusterDeprovisionInterface
type clusterDeprovisions struct {
client rest.Interface
ns string
}
// newClusterDeprovisions returns a ClusterDeprovisions
func newClusterDeprovisions(c *HiveV1Client, namespace string) *clusterDeprovisions {
return &clusterDeprovisions{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the clusterDeprovision, and returns the corresponding clusterDeprovision object, and an error if there is any.
func (c *clusterDeprovisions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterDeprovision, err error) {
result = &v1.ClusterDeprovision{}
err = c.client.Get().
Namespace(c.ns).
Resource("clusterdeprovisions").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of ClusterDeprovisions that match those selectors.
func (c *clusterDeprovisions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterDeprovisionList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1.ClusterDeprovisionList{}
err = c.client.Get().
Namespace(c.ns).
Resource("clusterdeprovisions").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested clusterDeprovisions.
func (c *clusterDeprovisions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("clusterdeprovisions").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a clusterDeprovision and creates it. Returns the server's representation of the clusterDeprovision, and an error, if there is any.
func (c *clusterDeprovisions) Create(ctx context.Context, clusterDeprovision *v1.ClusterDeprovision, opts metav1.CreateOptions) (result *v1.ClusterDeprovision, err error) {
result = &v1.ClusterDeprovision{}
err = c.client.Post().
Namespace(c.ns).
Resource("clusterdeprovisions").
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterDeprovision).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a clusterDeprovision and updates it. Returns the server's representation of the clusterDeprovision, and an error, if there is any.
func (c *clusterDeprovisions) Update(ctx context.Context, clusterDeprovision *v1.ClusterDeprovision, opts metav1.UpdateOptions) (result *v1.ClusterDeprovision, err error) {
result = &v1.ClusterDeprovision{}
err = c.client.Put().
Namespace(c.ns).
Resource("clusterdeprovisions").
Name(clusterDeprovision.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterDeprovision).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *clusterDeprovisions) UpdateStatus(ctx context.Context, clusterDeprovision *v1.ClusterDeprovision, opts metav1.UpdateOptions) (result *v1.ClusterDeprovision, err error) {
result = &v1.ClusterDeprovision{}
err = c.client.Put().
Namespace(c.ns).
Resource("clusterdeprovisions").
Name(clusterDeprovision.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterDeprovision).
Do(ctx).
Into(result)
return
}
// Delete takes name of the clusterDeprovision and deletes it. Returns an error if one occurs.
func (c *clusterDeprovisions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("clusterdeprovisions").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *clusterDeprovisions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("clusterdeprovisions").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched clusterDeprovision.
func (c *clusterDeprovisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterDeprovision, err error) {
result = &v1.ClusterDeprovision{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("clusterdeprovisions").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

168
vendor/github.com/openshift/hive/pkg/client/clientset/versioned/typed/hive/v1/clusterimageset.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,168 @@
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
"context"
"time"
v1 "github.com/openshift/hive/apis/hive/v1"
scheme "github.com/openshift/hive/pkg/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// ClusterImageSetsGetter has a method to return a ClusterImageSetInterface.
// A group's client should implement this interface.
type ClusterImageSetsGetter interface {
ClusterImageSets() ClusterImageSetInterface
}
// ClusterImageSetInterface has methods to work with ClusterImageSet resources.
type ClusterImageSetInterface interface {
Create(ctx context.Context, clusterImageSet *v1.ClusterImageSet, opts metav1.CreateOptions) (*v1.ClusterImageSet, error)
Update(ctx context.Context, clusterImageSet *v1.ClusterImageSet, opts metav1.UpdateOptions) (*v1.ClusterImageSet, error)
UpdateStatus(ctx context.Context, clusterImageSet *v1.ClusterImageSet, opts metav1.UpdateOptions) (*v1.ClusterImageSet, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterImageSet, error)
List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterImageSetList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterImageSet, err error)
ClusterImageSetExpansion
}
// clusterImageSets implements ClusterImageSetInterface
type clusterImageSets struct {
client rest.Interface
}
// newClusterImageSets returns a ClusterImageSets
func newClusterImageSets(c *HiveV1Client) *clusterImageSets {
return &clusterImageSets{
client: c.RESTClient(),
}
}
// Get takes name of the clusterImageSet, and returns the corresponding clusterImageSet object, and an error if there is any.
func (c *clusterImageSets) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterImageSet, err error) {
result = &v1.ClusterImageSet{}
err = c.client.Get().
Resource("clusterimagesets").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of ClusterImageSets that match those selectors.
func (c *clusterImageSets) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterImageSetList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1.ClusterImageSetList{}
err = c.client.Get().
Resource("clusterimagesets").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested clusterImageSets.
func (c *clusterImageSets) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Resource("clusterimagesets").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a clusterImageSet and creates it. Returns the server's representation of the clusterImageSet, and an error, if there is any.
func (c *clusterImageSets) Create(ctx context.Context, clusterImageSet *v1.ClusterImageSet, opts metav1.CreateOptions) (result *v1.ClusterImageSet, err error) {
result = &v1.ClusterImageSet{}
err = c.client.Post().
Resource("clusterimagesets").
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterImageSet).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a clusterImageSet and updates it. Returns the server's representation of the clusterImageSet, and an error, if there is any.
func (c *clusterImageSets) Update(ctx context.Context, clusterImageSet *v1.ClusterImageSet, opts metav1.UpdateOptions) (result *v1.ClusterImageSet, err error) {
result = &v1.ClusterImageSet{}
err = c.client.Put().
Resource("clusterimagesets").
Name(clusterImageSet.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterImageSet).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *clusterImageSets) UpdateStatus(ctx context.Context, clusterImageSet *v1.ClusterImageSet, opts metav1.UpdateOptions) (result *v1.ClusterImageSet, err error) {
result = &v1.ClusterImageSet{}
err = c.client.Put().
Resource("clusterimagesets").
Name(clusterImageSet.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterImageSet).
Do(ctx).
Into(result)
return
}
// Delete takes name of the clusterImageSet and deletes it. Returns an error if one occurs.
func (c *clusterImageSets) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
return c.client.Delete().
Resource("clusterimagesets").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *clusterImageSets) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Resource("clusterimagesets").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched clusterImageSet.
func (c *clusterImageSets) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterImageSet, err error) {
result = &v1.ClusterImageSet{}
err = c.client.Patch(pt).
Resource("clusterimagesets").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

179
vendor/github.com/openshift/hive/pkg/client/clientset/versioned/typed/hive/v1/clusterpool.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,179 @@
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
"context"
"time"
v1 "github.com/openshift/hive/apis/hive/v1"
scheme "github.com/openshift/hive/pkg/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// ClusterPoolsGetter has a method to return a ClusterPoolInterface.
// A group's client should implement this interface.
type ClusterPoolsGetter interface {
ClusterPools(namespace string) ClusterPoolInterface
}
// ClusterPoolInterface has methods to work with ClusterPool resources.
type ClusterPoolInterface interface {
Create(ctx context.Context, clusterPool *v1.ClusterPool, opts metav1.CreateOptions) (*v1.ClusterPool, error)
Update(ctx context.Context, clusterPool *v1.ClusterPool, opts metav1.UpdateOptions) (*v1.ClusterPool, error)
UpdateStatus(ctx context.Context, clusterPool *v1.ClusterPool, opts metav1.UpdateOptions) (*v1.ClusterPool, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterPool, error)
List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterPoolList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterPool, err error)
ClusterPoolExpansion
}
// clusterPools implements ClusterPoolInterface
type clusterPools struct {
client rest.Interface
ns string
}
// newClusterPools returns a ClusterPools
func newClusterPools(c *HiveV1Client, namespace string) *clusterPools {
return &clusterPools{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the clusterPool, and returns the corresponding clusterPool object, and an error if there is any.
func (c *clusterPools) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterPool, err error) {
result = &v1.ClusterPool{}
err = c.client.Get().
Namespace(c.ns).
Resource("clusterpools").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of ClusterPools that match those selectors.
func (c *clusterPools) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterPoolList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1.ClusterPoolList{}
err = c.client.Get().
Namespace(c.ns).
Resource("clusterpools").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested clusterPools.
func (c *clusterPools) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("clusterpools").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a clusterPool and creates it. Returns the server's representation of the clusterPool, and an error, if there is any.
func (c *clusterPools) Create(ctx context.Context, clusterPool *v1.ClusterPool, opts metav1.CreateOptions) (result *v1.ClusterPool, err error) {
result = &v1.ClusterPool{}
err = c.client.Post().
Namespace(c.ns).
Resource("clusterpools").
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterPool).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a clusterPool and updates it. Returns the server's representation of the clusterPool, and an error, if there is any.
func (c *clusterPools) Update(ctx context.Context, clusterPool *v1.ClusterPool, opts metav1.UpdateOptions) (result *v1.ClusterPool, err error) {
result = &v1.ClusterPool{}
err = c.client.Put().
Namespace(c.ns).
Resource("clusterpools").
Name(clusterPool.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterPool).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *clusterPools) UpdateStatus(ctx context.Context, clusterPool *v1.ClusterPool, opts metav1.UpdateOptions) (result *v1.ClusterPool, err error) {
result = &v1.ClusterPool{}
err = c.client.Put().
Namespace(c.ns).
Resource("clusterpools").
Name(clusterPool.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterPool).
Do(ctx).
Into(result)
return
}
// Delete takes name of the clusterPool and deletes it. Returns an error if one occurs.
func (c *clusterPools) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("clusterpools").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *clusterPools) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("clusterpools").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched clusterPool.
func (c *clusterPools) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterPool, err error) {
result = &v1.ClusterPool{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("clusterpools").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

179
vendor/github.com/openshift/hive/pkg/client/clientset/versioned/typed/hive/v1/clusterprovision.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,179 @@
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
"context"
"time"
v1 "github.com/openshift/hive/apis/hive/v1"
scheme "github.com/openshift/hive/pkg/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// ClusterProvisionsGetter has a method to return a ClusterProvisionInterface.
// A group's client should implement this interface.
type ClusterProvisionsGetter interface {
ClusterProvisions(namespace string) ClusterProvisionInterface
}
// ClusterProvisionInterface has methods to work with ClusterProvision resources.
type ClusterProvisionInterface interface {
Create(ctx context.Context, clusterProvision *v1.ClusterProvision, opts metav1.CreateOptions) (*v1.ClusterProvision, error)
Update(ctx context.Context, clusterProvision *v1.ClusterProvision, opts metav1.UpdateOptions) (*v1.ClusterProvision, error)
UpdateStatus(ctx context.Context, clusterProvision *v1.ClusterProvision, opts metav1.UpdateOptions) (*v1.ClusterProvision, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterProvision, error)
List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterProvisionList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterProvision, err error)
ClusterProvisionExpansion
}
// clusterProvisions implements ClusterProvisionInterface
type clusterProvisions struct {
client rest.Interface
ns string
}
// newClusterProvisions returns a ClusterProvisions
func newClusterProvisions(c *HiveV1Client, namespace string) *clusterProvisions {
return &clusterProvisions{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the clusterProvision, and returns the corresponding clusterProvision object, and an error if there is any.
func (c *clusterProvisions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterProvision, err error) {
result = &v1.ClusterProvision{}
err = c.client.Get().
Namespace(c.ns).
Resource("clusterprovisions").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of ClusterProvisions that match those selectors.
func (c *clusterProvisions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterProvisionList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1.ClusterProvisionList{}
err = c.client.Get().
Namespace(c.ns).
Resource("clusterprovisions").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested clusterProvisions.
func (c *clusterProvisions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("clusterprovisions").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a clusterProvision and creates it. Returns the server's representation of the clusterProvision, and an error, if there is any.
func (c *clusterProvisions) Create(ctx context.Context, clusterProvision *v1.ClusterProvision, opts metav1.CreateOptions) (result *v1.ClusterProvision, err error) {
result = &v1.ClusterProvision{}
err = c.client.Post().
Namespace(c.ns).
Resource("clusterprovisions").
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterProvision).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a clusterProvision and updates it. Returns the server's representation of the clusterProvision, and an error, if there is any.
func (c *clusterProvisions) Update(ctx context.Context, clusterProvision *v1.ClusterProvision, opts metav1.UpdateOptions) (result *v1.ClusterProvision, err error) {
result = &v1.ClusterProvision{}
err = c.client.Put().
Namespace(c.ns).
Resource("clusterprovisions").
Name(clusterProvision.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterProvision).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *clusterProvisions) UpdateStatus(ctx context.Context, clusterProvision *v1.ClusterProvision, opts metav1.UpdateOptions) (result *v1.ClusterProvision, err error) {
result = &v1.ClusterProvision{}
err = c.client.Put().
Namespace(c.ns).
Resource("clusterprovisions").
Name(clusterProvision.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterProvision).
Do(ctx).
Into(result)
return
}
// Delete takes name of the clusterProvision and deletes it. Returns an error if one occurs.
func (c *clusterProvisions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("clusterprovisions").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *clusterProvisions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("clusterprovisions").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched clusterProvision.
func (c *clusterProvisions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterProvision, err error) {
result = &v1.ClusterProvision{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("clusterprovisions").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

168
vendor/github.com/openshift/hive/pkg/client/clientset/versioned/typed/hive/v1/clusterrelocate.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,168 @@
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
"context"
"time"
v1 "github.com/openshift/hive/apis/hive/v1"
scheme "github.com/openshift/hive/pkg/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// ClusterRelocatesGetter has a method to return a ClusterRelocateInterface.
// A group's client should implement this interface.
type ClusterRelocatesGetter interface {
ClusterRelocates() ClusterRelocateInterface
}
// ClusterRelocateInterface has methods to work with ClusterRelocate resources.
type ClusterRelocateInterface interface {
Create(ctx context.Context, clusterRelocate *v1.ClusterRelocate, opts metav1.CreateOptions) (*v1.ClusterRelocate, error)
Update(ctx context.Context, clusterRelocate *v1.ClusterRelocate, opts metav1.UpdateOptions) (*v1.ClusterRelocate, error)
UpdateStatus(ctx context.Context, clusterRelocate *v1.ClusterRelocate, opts metav1.UpdateOptions) (*v1.ClusterRelocate, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterRelocate, error)
List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterRelocateList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRelocate, err error)
ClusterRelocateExpansion
}
// clusterRelocates implements ClusterRelocateInterface
type clusterRelocates struct {
client rest.Interface
}
// newClusterRelocates returns a ClusterRelocates
func newClusterRelocates(c *HiveV1Client) *clusterRelocates {
return &clusterRelocates{
client: c.RESTClient(),
}
}
// Get takes name of the clusterRelocate, and returns the corresponding clusterRelocate object, and an error if there is any.
func (c *clusterRelocates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterRelocate, err error) {
result = &v1.ClusterRelocate{}
err = c.client.Get().
Resource("clusterrelocates").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of ClusterRelocates that match those selectors.
func (c *clusterRelocates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterRelocateList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1.ClusterRelocateList{}
err = c.client.Get().
Resource("clusterrelocates").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested clusterRelocates.
func (c *clusterRelocates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Resource("clusterrelocates").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a clusterRelocate and creates it. Returns the server's representation of the clusterRelocate, and an error, if there is any.
func (c *clusterRelocates) Create(ctx context.Context, clusterRelocate *v1.ClusterRelocate, opts metav1.CreateOptions) (result *v1.ClusterRelocate, err error) {
result = &v1.ClusterRelocate{}
err = c.client.Post().
Resource("clusterrelocates").
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterRelocate).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a clusterRelocate and updates it. Returns the server's representation of the clusterRelocate, and an error, if there is any.
func (c *clusterRelocates) Update(ctx context.Context, clusterRelocate *v1.ClusterRelocate, opts metav1.UpdateOptions) (result *v1.ClusterRelocate, err error) {
result = &v1.ClusterRelocate{}
err = c.client.Put().
Resource("clusterrelocates").
Name(clusterRelocate.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterRelocate).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *clusterRelocates) UpdateStatus(ctx context.Context, clusterRelocate *v1.ClusterRelocate, opts metav1.UpdateOptions) (result *v1.ClusterRelocate, err error) {
result = &v1.ClusterRelocate{}
err = c.client.Put().
Resource("clusterrelocates").
Name(clusterRelocate.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterRelocate).
Do(ctx).
Into(result)
return
}
// Delete takes name of the clusterRelocate and deletes it. Returns an error if one occurs.
func (c *clusterRelocates) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
return c.client.Delete().
Resource("clusterrelocates").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *clusterRelocates) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Resource("clusterrelocates").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched clusterRelocate.
func (c *clusterRelocates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterRelocate, err error) {
result = &v1.ClusterRelocate{}
err = c.client.Patch(pt).
Resource("clusterrelocates").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

179
vendor/github.com/openshift/hive/pkg/client/clientset/versioned/typed/hive/v1/clusterstate.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,179 @@
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
"context"
"time"
v1 "github.com/openshift/hive/apis/hive/v1"
scheme "github.com/openshift/hive/pkg/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// ClusterStatesGetter has a method to return a ClusterStateInterface.
// A group's client should implement this interface.
type ClusterStatesGetter interface {
ClusterStates(namespace string) ClusterStateInterface
}
// ClusterStateInterface has methods to work with ClusterState resources.
type ClusterStateInterface interface {
Create(ctx context.Context, clusterState *v1.ClusterState, opts metav1.CreateOptions) (*v1.ClusterState, error)
Update(ctx context.Context, clusterState *v1.ClusterState, opts metav1.UpdateOptions) (*v1.ClusterState, error)
UpdateStatus(ctx context.Context, clusterState *v1.ClusterState, opts metav1.UpdateOptions) (*v1.ClusterState, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ClusterState, error)
List(ctx context.Context, opts metav1.ListOptions) (*v1.ClusterStateList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterState, err error)
ClusterStateExpansion
}
// clusterStates implements ClusterStateInterface
type clusterStates struct {
client rest.Interface
ns string
}
// newClusterStates returns a ClusterStates
func newClusterStates(c *HiveV1Client, namespace string) *clusterStates {
return &clusterStates{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the clusterState, and returns the corresponding clusterState object, and an error if there is any.
func (c *clusterStates) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ClusterState, err error) {
result = &v1.ClusterState{}
err = c.client.Get().
Namespace(c.ns).
Resource("clusterstates").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of ClusterStates that match those selectors.
func (c *clusterStates) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ClusterStateList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1.ClusterStateList{}
err = c.client.Get().
Namespace(c.ns).
Resource("clusterstates").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested clusterStates.
func (c *clusterStates) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("clusterstates").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a clusterState and creates it. Returns the server's representation of the clusterState, and an error, if there is any.
func (c *clusterStates) Create(ctx context.Context, clusterState *v1.ClusterState, opts metav1.CreateOptions) (result *v1.ClusterState, err error) {
result = &v1.ClusterState{}
err = c.client.Post().
Namespace(c.ns).
Resource("clusterstates").
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterState).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a clusterState and updates it. Returns the server's representation of the clusterState, and an error, if there is any.
func (c *clusterStates) Update(ctx context.Context, clusterState *v1.ClusterState, opts metav1.UpdateOptions) (result *v1.ClusterState, err error) {
result = &v1.ClusterState{}
err = c.client.Put().
Namespace(c.ns).
Resource("clusterstates").
Name(clusterState.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterState).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *clusterStates) UpdateStatus(ctx context.Context, clusterState *v1.ClusterState, opts metav1.UpdateOptions) (result *v1.ClusterState, err error) {
result = &v1.ClusterState{}
err = c.client.Put().
Namespace(c.ns).
Resource("clusterstates").
Name(clusterState.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(clusterState).
Do(ctx).
Into(result)
return
}
// Delete takes name of the clusterState and deletes it. Returns an error if one occurs.
func (c *clusterStates) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("clusterstates").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *clusterStates) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("clusterstates").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched clusterState.
func (c *clusterStates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ClusterState, err error) {
result = &v1.ClusterState{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("clusterstates").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

179
vendor/github.com/openshift/hive/pkg/client/clientset/versioned/typed/hive/v1/dnszone.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,179 @@
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
"context"
"time"
v1 "github.com/openshift/hive/apis/hive/v1"
scheme "github.com/openshift/hive/pkg/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// DNSZonesGetter has a method to return a DNSZoneInterface.
// A group's client should implement this interface.
type DNSZonesGetter interface {
DNSZones(namespace string) DNSZoneInterface
}
// DNSZoneInterface has methods to work with DNSZone resources.
type DNSZoneInterface interface {
Create(ctx context.Context, dNSZone *v1.DNSZone, opts metav1.CreateOptions) (*v1.DNSZone, error)
Update(ctx context.Context, dNSZone *v1.DNSZone, opts metav1.UpdateOptions) (*v1.DNSZone, error)
UpdateStatus(ctx context.Context, dNSZone *v1.DNSZone, opts metav1.UpdateOptions) (*v1.DNSZone, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.DNSZone, error)
List(ctx context.Context, opts metav1.ListOptions) (*v1.DNSZoneList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DNSZone, err error)
DNSZoneExpansion
}
// dNSZones implements DNSZoneInterface
type dNSZones struct {
client rest.Interface
ns string
}
// newDNSZones returns a DNSZones
func newDNSZones(c *HiveV1Client, namespace string) *dNSZones {
return &dNSZones{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the dNSZone, and returns the corresponding dNSZone object, and an error if there is any.
func (c *dNSZones) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.DNSZone, err error) {
result = &v1.DNSZone{}
err = c.client.Get().
Namespace(c.ns).
Resource("dnszones").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of DNSZones that match those selectors.
func (c *dNSZones) List(ctx context.Context, opts metav1.ListOptions) (result *v1.DNSZoneList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1.DNSZoneList{}
err = c.client.Get().
Namespace(c.ns).
Resource("dnszones").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested dNSZones.
func (c *dNSZones) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("dnszones").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a dNSZone and creates it. Returns the server's representation of the dNSZone, and an error, if there is any.
func (c *dNSZones) Create(ctx context.Context, dNSZone *v1.DNSZone, opts metav1.CreateOptions) (result *v1.DNSZone, err error) {
result = &v1.DNSZone{}
err = c.client.Post().
Namespace(c.ns).
Resource("dnszones").
VersionedParams(&opts, scheme.ParameterCodec).
Body(dNSZone).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a dNSZone and updates it. Returns the server's representation of the dNSZone, and an error, if there is any.
func (c *dNSZones) Update(ctx context.Context, dNSZone *v1.DNSZone, opts metav1.UpdateOptions) (result *v1.DNSZone, err error) {
result = &v1.DNSZone{}
err = c.client.Put().
Namespace(c.ns).
Resource("dnszones").
Name(dNSZone.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(dNSZone).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *dNSZones) UpdateStatus(ctx context.Context, dNSZone *v1.DNSZone, opts metav1.UpdateOptions) (result *v1.DNSZone, err error) {
result = &v1.DNSZone{}
err = c.client.Put().
Namespace(c.ns).
Resource("dnszones").
Name(dNSZone.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(dNSZone).
Do(ctx).
Into(result)
return
}
// Delete takes name of the dNSZone and deletes it. Returns an error if one occurs.
func (c *dNSZones) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("dnszones").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *dNSZones) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("dnszones").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched dNSZone.
func (c *dNSZones) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.DNSZone, err error) {
result = &v1.DNSZone{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("dnszones").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

4
vendor/github.com/openshift/hive/pkg/client/clientset/versioned/typed/hive/v1/doc.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,4 @@
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
package v1

37
vendor/github.com/openshift/hive/pkg/client/clientset/versioned/typed/hive/v1/generated_expansion.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,37 @@
// Code generated by client-gen. DO NOT EDIT.
package v1
type CheckpointExpansion interface{}
type ClusterClaimExpansion interface{}
type ClusterDeploymentExpansion interface{}
type ClusterDeprovisionExpansion interface{}
type ClusterImageSetExpansion interface{}
type ClusterPoolExpansion interface{}
type ClusterProvisionExpansion interface{}
type ClusterRelocateExpansion interface{}
type ClusterStateExpansion interface{}
type DNSZoneExpansion interface{}
type HiveConfigExpansion interface{}
type MachinePoolExpansion interface{}
type MachinePoolNameLeaseExpansion interface{}
type SelectorSyncIdentityProviderExpansion interface{}
type SelectorSyncSetExpansion interface{}
type SyncIdentityProviderExpansion interface{}
type SyncSetExpansion interface{}

153
vendor/github.com/openshift/hive/pkg/client/clientset/versioned/typed/hive/v1/hive_client.go сгенерированный поставляемый Normal file
Просмотреть файл

@ -0,0 +1,153 @@
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/openshift/hive/apis/hive/v1"
"github.com/openshift/hive/pkg/client/clientset/versioned/scheme"
rest "k8s.io/client-go/rest"
)
type HiveV1Interface interface {
RESTClient() rest.Interface
CheckpointsGetter
ClusterClaimsGetter
ClusterDeploymentsGetter
ClusterDeprovisionsGetter
ClusterImageSetsGetter
ClusterPoolsGetter
ClusterProvisionsGetter
ClusterRelocatesGetter
ClusterStatesGetter
DNSZonesGetter
HiveConfigsGetter
MachinePoolsGetter
MachinePoolNameLeasesGetter
SelectorSyncIdentityProvidersGetter
SelectorSyncSetsGetter
SyncIdentityProvidersGetter
SyncSetsGetter
}
// HiveV1Client is used to interact with features provided by the hive.openshift.io group.
type HiveV1Client struct {
restClient rest.Interface
}
func (c *HiveV1Client) Checkpoints(namespace string) CheckpointInterface {
return newCheckpoints(c, namespace)
}
func (c *HiveV1Client) ClusterClaims(namespace string) ClusterClaimInterface {
return newClusterClaims(c, namespace)
}
func (c *HiveV1Client) ClusterDeployments(namespace string) ClusterDeploymentInterface {
return newClusterDeployments(c, namespace)
}
func (c *HiveV1Client) ClusterDeprovisions(namespace string) ClusterDeprovisionInterface {
return newClusterDeprovisions(c, namespace)
}
func (c *HiveV1Client) ClusterImageSets() ClusterImageSetInterface {
return newClusterImageSets(c)
}
func (c *HiveV1Client) ClusterPools(namespace string) ClusterPoolInterface {
return newClusterPools(c, namespace)
}
func (c *HiveV1Client) ClusterProvisions(namespace string) ClusterProvisionInterface {
return newClusterProvisions(c, namespace)
}
func (c *HiveV1Client) ClusterRelocates() ClusterRelocateInterface {
return newClusterRelocates(c)
}
func (c *HiveV1Client) ClusterStates(namespace string) ClusterStateInterface {
return newClusterStates(c, namespace)
}
func (c *HiveV1Client) DNSZones(namespace string) DNSZoneInterface {
return newDNSZones(c, namespace)
}
func (c *HiveV1Client) HiveConfigs() HiveConfigInterface {
return newHiveConfigs(c)
}
func (c *HiveV1Client) MachinePools(namespace string) MachinePoolInterface {
return newMachinePools(c, namespace)
}
func (c *HiveV1Client) MachinePoolNameLeases(namespace string) MachinePoolNameLeaseInterface {
return newMachinePoolNameLeases(c, namespace)
}
func (c *HiveV1Client) SelectorSyncIdentityProviders() SelectorSyncIdentityProviderInterface {
return newSelectorSyncIdentityProviders(c)
}
func (c *HiveV1Client) SelectorSyncSets() SelectorSyncSetInterface {
return newSelectorSyncSets(c)
}
func (c *HiveV1Client) SyncIdentityProviders(namespace string) SyncIdentityProviderInterface {
return newSyncIdentityProviders(c, namespace)
}
func (c *HiveV1Client) SyncSets(namespace string) SyncSetInterface {
return newSyncSets(c, namespace)
}
// NewForConfig creates a new HiveV1Client for the given config.
func NewForConfig(c *rest.Config) (*HiveV1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientFor(&config)
if err != nil {
return nil, err
}
return &HiveV1Client{client}, nil
}
// NewForConfigOrDie creates a new HiveV1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *HiveV1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new HiveV1Client for the given RESTClient.
func New(c rest.Interface) *HiveV1Client {
return &HiveV1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *HiveV1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше