зеркало из https://github.com/Azure/ARO-RP.git
Коммит
1bf2c148e8
|
@ -18,10 +18,11 @@
|
|||
revision = "3a14fb1c4737b3995174c5f4d6d08a348b9b4180"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:46ac9b4cbd8bcada2ccf565a928f97dd4d4bff8e3f31580262d87c277b799e0a"
|
||||
digest = "1:19dc0c29c96999771f65e935f640bee2d58445d7b024242f802621866c88812a"
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = [
|
||||
"profiles/latest/dns/mgmt/dns",
|
||||
"profiles/latest/resources/mgmt/resources",
|
||||
"profiles/latest/resources/mgmt/subscriptions",
|
||||
"services/compute/mgmt/2018-10-01/compute",
|
||||
"services/compute/mgmt/2019-03-01/compute",
|
||||
|
@ -37,6 +38,7 @@
|
|||
"services/preview/authorization/mgmt/2018-09-01-preview/authorization",
|
||||
"services/preview/monitor/mgmt/2018-03-01/insights",
|
||||
"services/privatedns/mgmt/2018-09-01/privatedns",
|
||||
"services/resources/mgmt/2019-05-01/resources",
|
||||
"services/resources/mgmt/2019-06-01/subscriptions",
|
||||
"services/resources/mgmt/2019-07-01/features",
|
||||
"services/storage/mgmt/2019-04-01/storage",
|
||||
|
@ -940,8 +942,8 @@
|
|||
revision = "f6563a70e19a12b2f240eaf4e716ef75baf7003e"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:fc1dc3a3bf4428fd72498feefbbd27b72f5e36e7aeec209534abeb1d59958b1a"
|
||||
branch = "release-4.3"
|
||||
digest = "1:454f3ebed85be06f8ed32549474df4fb75624319314f222983d4bae6d4daa1cd"
|
||||
name = "github.com/openshift/cloud-credential-operator"
|
||||
packages = [
|
||||
"pkg/apis/cloudcredential/v1",
|
||||
|
@ -949,7 +951,7 @@
|
|||
"version",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "fa06f17ab6de64c341a4d7bbe58f0cb8265e6f9c"
|
||||
revision = "02be5758f755d9da56cb4b37ae3ea52a9a0c436e"
|
||||
|
||||
[[projects]]
|
||||
branch = "release-4.3"
|
||||
|
@ -1023,7 +1025,7 @@
|
|||
|
||||
[[projects]]
|
||||
branch = "release-4.3-azure"
|
||||
digest = "1:31739f170a9c7f52aa432017931ed12153797216981423cbf92f60169f24bc21"
|
||||
digest = "1:5a21bb3a2985d4893a39eab4a7c8c19c0484fe49c4426320964be5ca96384f2c"
|
||||
name = "github.com/openshift/installer"
|
||||
packages = [
|
||||
"data",
|
||||
|
@ -1059,6 +1061,7 @@
|
|||
"pkg/asset/manifests/gcp",
|
||||
"pkg/asset/manifests/openstack",
|
||||
"pkg/asset/manifests/vsphere",
|
||||
"pkg/asset/openshiftinstall",
|
||||
"pkg/asset/password",
|
||||
"pkg/asset/releaseimage",
|
||||
"pkg/asset/rhcos",
|
||||
|
@ -1108,7 +1111,7 @@
|
|||
"pkg/version",
|
||||
]
|
||||
pruneopts = "T"
|
||||
revision = "453c4e73d49e10dae4472a155f1b6e38a2abf1c9"
|
||||
revision = "693fa39b89731b6f3f7a383a480151ccde57eb8a"
|
||||
source = "https://github.com/jim-minter/installer"
|
||||
|
||||
[[projects]]
|
||||
|
@ -1562,7 +1565,7 @@
|
|||
version = "v1.26.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d95ba5ef55d8b2cdb896d1196efd1859b62c64829b1098c8b8cc395dd34fd25c"
|
||||
digest = "1:e1d4c7eb8bf9417cf31234deae63ffaceb08603e82f46ac109047315f23228ff"
|
||||
name = "gopkg.in/AlecAivazis/survey.v1"
|
||||
packages = [
|
||||
".",
|
||||
|
@ -1570,8 +1573,7 @@
|
|||
"terminal",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "e4af3b345125b0903edb492a33a99a23e9eb3487"
|
||||
version = "v1.8.7"
|
||||
revision = "6773bdf39b7fa13e6a40ece7ac2d01e0d469c205"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd"
|
||||
|
@ -1717,7 +1719,7 @@
|
|||
|
||||
[[projects]]
|
||||
branch = "origin-4.3-kubernetes-1.16.2"
|
||||
digest = "1:eeecff9055f0018aa0ee3c8b162cae14436bbfdaf7a3922fa171900d88cd5073"
|
||||
digest = "1:121980454af2ecc796d3d5c7bf3a4e52773c5aa6b1d7e7528140f76ab70c2f5f"
|
||||
name = "k8s.io/client-go"
|
||||
packages = [
|
||||
"discovery",
|
||||
|
@ -1809,7 +1811,6 @@
|
|||
"plugin/pkg/client/auth/exec",
|
||||
"rest",
|
||||
"rest/watch",
|
||||
"restmapper",
|
||||
"testing",
|
||||
"tools/auth",
|
||||
"tools/clientcmd",
|
||||
|
@ -1988,6 +1989,7 @@
|
|||
"github.com/openshift/installer/pkg/asset/kubeconfig",
|
||||
"github.com/openshift/installer/pkg/asset/machines",
|
||||
"github.com/openshift/installer/pkg/asset/manifests",
|
||||
"github.com/openshift/installer/pkg/asset/openshiftinstall",
|
||||
"github.com/openshift/installer/pkg/asset/password",
|
||||
"github.com/openshift/installer/pkg/asset/releaseimage",
|
||||
"github.com/openshift/installer/pkg/asset/rhcos",
|
||||
|
@ -2031,7 +2033,6 @@
|
|||
"k8s.io/client-go/kubernetes/fake",
|
||||
"k8s.io/client-go/kubernetes/typed/core/v1",
|
||||
"k8s.io/client-go/rest",
|
||||
"k8s.io/client-go/restmapper",
|
||||
"k8s.io/client-go/testing",
|
||||
"k8s.io/client-go/tools/clientcmd",
|
||||
"k8s.io/client-go/tools/clientcmd/api/v1",
|
||||
|
|
|
@ -733,7 +733,7 @@
|
|||
"component": {
|
||||
"type": "git",
|
||||
"git": {
|
||||
"commitHash": "fa06f17ab6de64c341a4d7bbe58f0cb8265e6f9c",
|
||||
"commitHash": "02be5758f755d9da56cb4b37ae3ea52a9a0c436e",
|
||||
"repositoryUrl": "https://github.com/openshift/cloud-credential-operator/"
|
||||
}
|
||||
}
|
||||
|
@ -796,7 +796,7 @@
|
|||
"component": {
|
||||
"type": "git",
|
||||
"git": {
|
||||
"commitHash": "453c4e73d49e10dae4472a155f1b6e38a2abf1c9",
|
||||
"commitHash": "693fa39b89731b6f3f7a383a480151ccde57eb8a",
|
||||
"repositoryUrl": "https://github.com/openshift/installer/"
|
||||
}
|
||||
}
|
||||
|
@ -1093,7 +1093,7 @@
|
|||
"component": {
|
||||
"type": "git",
|
||||
"git": {
|
||||
"commitHash": "e4af3b345125b0903edb492a33a99a23e9eb3487",
|
||||
"commitHash": "6773bdf39b7fa13e6a40ece7ac2d01e0d469c205",
|
||||
"repositoryUrl": "https://gopkg.in/AlecAivazis/survey.v1/"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,6 +64,8 @@ upstream OCP.
|
|||
|
||||
* No managed identity (for now).
|
||||
|
||||
* No IPv6 support (for now).
|
||||
|
||||
* Upstream installer closely binds the installConfig (cluster) name, cluster
|
||||
domain name, infra ID and Azure resource name prefix. ARO separates these out
|
||||
a little. The installConfig (cluster) name and the domain name remain bound;
|
||||
|
|
|
@ -20,7 +20,7 @@ var allowedPaths = []*regexp.Regexp{
|
|||
regexp.MustCompile(`^/manifests$`),
|
||||
regexp.MustCompile(`^/manifests/bootkube($|/)`),
|
||||
regexp.MustCompile(`^/manifests/openshift($|/)`),
|
||||
regexp.MustCompile(`^/rhcos.json$`),
|
||||
regexp.MustCompile(`^/rhcos-amd64.json$`),
|
||||
}
|
||||
|
||||
type fileSystem struct {
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
azstorage "github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/go-autorest/autorest/azure/auth"
|
||||
"github.com/openshift/installer/pkg/rhcos"
|
||||
"github.com/openshift/installer/pkg/types"
|
||||
|
||||
_ "github.com/Azure/ARO-RP/pkg/install"
|
||||
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/storage"
|
||||
|
@ -55,7 +56,7 @@ func run(ctx context.Context) error {
|
|||
if len(os.Args) == 2 {
|
||||
vhd = os.Args[1]
|
||||
} else {
|
||||
vhd, err = rhcos.VHD(ctx)
|
||||
vhd, err = rhcos.VHD(ctx, types.ArchitectureAMD64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -199,7 +199,11 @@ func (m *Manager) Create(ctx context.Context) error {
|
|||
SSHKey: sshkey.Type() + " " + base64.StdEncoding.EncodeToString(sshkey.Marshal()),
|
||||
BaseDomain: domain[strings.IndexByte(domain, '.')+1:],
|
||||
Networking: &types.Networking{
|
||||
MachineCIDR: ipnet.MustParseCIDR("127.0.0.0/8"), // dummy
|
||||
MachineNetwork: []types.MachineNetworkEntry{
|
||||
{
|
||||
CIDR: *ipnet.MustParseCIDR("127.0.0.0/8"), // dummy
|
||||
},
|
||||
},
|
||||
NetworkType: "OpenShiftSDN",
|
||||
ClusterNetwork: []types.ClusterNetworkEntry{
|
||||
{
|
||||
|
@ -221,6 +225,7 @@ func (m *Manager) Create(ctx context.Context) error {
|
|||
},
|
||||
},
|
||||
Hyperthreading: "Enabled",
|
||||
Architecture: types.ArchitectureAMD64,
|
||||
},
|
||||
Compute: []types.MachinePool{
|
||||
{
|
||||
|
@ -236,6 +241,7 @@ func (m *Manager) Create(ctx context.Context) error {
|
|||
},
|
||||
},
|
||||
Hyperthreading: "Enabled",
|
||||
Architecture: types.ArchitectureAMD64,
|
||||
},
|
||||
},
|
||||
Platform: types.Platform{
|
||||
|
@ -307,7 +313,7 @@ var rxRHCOS = regexp.MustCompile(`rhcos-((\d+)\.\d+\.\d{8})\d{4}\.\d+-azure\.x86
|
|||
|
||||
func getRHCOSImage(ctx context.Context) (*azuretypes.Image, error) {
|
||||
// https://rhcos.blob.core.windows.net/imagebucket/rhcos-43.81.201911221453.0-azure.x86_64.vhd
|
||||
osImage, err := rhcos.VHD(ctx)
|
||||
osImage, err := rhcos.VHD(ctx, types.ArchitectureAMD64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -174,7 +174,7 @@ func (i *Installer) installResources(ctx context.Context) error {
|
|||
PrivateLinkServiceProperties: &mgmtnetwork.PrivateLinkServiceProperties{
|
||||
LoadBalancerFrontendIPConfigurations: &[]mgmtnetwork.FrontendIPConfiguration{
|
||||
{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations', '" + infraID + "-internal-lb', 'internal-lb-ip')]"),
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations', '" + infraID + "-internal-lb', 'internal-lb-ip-v4')]"),
|
||||
},
|
||||
},
|
||||
IPConfigurations: &[]mgmtnetwork.PrivateLinkServiceIPConfiguration{
|
||||
|
@ -215,7 +215,7 @@ func (i *Installer) installResources(ctx context.Context) error {
|
|||
PublicIPAddressPropertiesFormat: &mgmtnetwork.PublicIPAddressPropertiesFormat{
|
||||
PublicIPAllocationMethod: mgmtnetwork.Static,
|
||||
},
|
||||
Name: to.StringPtr(infraID + "-pip"),
|
||||
Name: to.StringPtr(infraID + "-pip-v4"),
|
||||
Type: to.StringPtr("Microsoft.Network/publicIPAddresses"),
|
||||
Location: &installConfig.Config.Azure.Region,
|
||||
},
|
||||
|
@ -236,22 +236,22 @@ func (i *Installer) installResources(ctx context.Context) error {
|
|||
ID: to.StringPtr(i.doc.OpenShiftCluster.Properties.MasterProfile.SubnetID),
|
||||
},
|
||||
},
|
||||
Name: to.StringPtr("internal-lb-ip"),
|
||||
Name: to.StringPtr("internal-lb-ip-v4"),
|
||||
},
|
||||
},
|
||||
BackendAddressPools: &[]mgmtnetwork.BackendAddressPool{
|
||||
{
|
||||
Name: to.StringPtr(infraID + "-internal-controlplane"),
|
||||
Name: to.StringPtr(infraID + "-internal-controlplane-v4"),
|
||||
},
|
||||
},
|
||||
LoadBalancingRules: &[]mgmtnetwork.LoadBalancingRule{
|
||||
{
|
||||
LoadBalancingRulePropertiesFormat: &mgmtnetwork.LoadBalancingRulePropertiesFormat{
|
||||
FrontendIPConfiguration: &mgmtnetwork.SubResource{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations', '" + infraID + "-internal-lb', 'internal-lb-ip')]"),
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations', '" + infraID + "-internal-lb', 'internal-lb-ip-v4')]"),
|
||||
},
|
||||
BackendAddressPool: &mgmtnetwork.SubResource{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', '" + infraID + "-internal-lb', '" + infraID + "-internal-controlplane')]"),
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', '" + infraID + "-internal-lb', '" + infraID + "-internal-controlplane-v4')]"),
|
||||
},
|
||||
Probe: &mgmtnetwork.SubResource{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/probes', '" + infraID + "-internal-lb', 'api-internal-probe')]"),
|
||||
|
@ -263,15 +263,15 @@ func (i *Installer) installResources(ctx context.Context) error {
|
|||
IdleTimeoutInMinutes: to.Int32Ptr(30),
|
||||
DisableOutboundSnat: to.BoolPtr(true),
|
||||
},
|
||||
Name: to.StringPtr("api-internal"),
|
||||
Name: to.StringPtr("api-internal-v4"),
|
||||
},
|
||||
{
|
||||
LoadBalancingRulePropertiesFormat: &mgmtnetwork.LoadBalancingRulePropertiesFormat{
|
||||
FrontendIPConfiguration: &mgmtnetwork.SubResource{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations', '" + infraID + "-internal-lb', 'internal-lb-ip')]"),
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations', '" + infraID + "-internal-lb', 'internal-lb-ip-v4')]"),
|
||||
},
|
||||
BackendAddressPool: &mgmtnetwork.SubResource{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', '" + infraID + "-internal-lb', '" + infraID + "-internal-controlplane')]"),
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', '" + infraID + "-internal-lb', '" + infraID + "-internal-controlplane-v4')]"),
|
||||
},
|
||||
Probe: &mgmtnetwork.SubResource{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/probes', '" + infraID + "-internal-lb', 'sint-probe')]"),
|
||||
|
@ -282,7 +282,7 @@ func (i *Installer) installResources(ctx context.Context) error {
|
|||
BackendPort: to.Int32Ptr(22623),
|
||||
IdleTimeoutInMinutes: to.Int32Ptr(30),
|
||||
},
|
||||
Name: to.StringPtr("sint"),
|
||||
Name: to.StringPtr("sint-v4"),
|
||||
},
|
||||
},
|
||||
Probes: &[]mgmtnetwork.Probe{
|
||||
|
@ -320,17 +320,17 @@ func (i *Installer) installResources(ctx context.Context) error {
|
|||
InterfaceIPConfigurationPropertiesFormat: &mgmtnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
LoadBalancerBackendAddressPools: &[]mgmtnetwork.BackendAddressPool{
|
||||
{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', '" + infraID + "-public-lb', '" + infraID + "-public-lb-control-plane')]"),
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', '" + infraID + "-public-lb', '" + infraID + "-public-lb-control-plane-v4')]"),
|
||||
},
|
||||
{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', '" + infraID + "-internal-lb', '" + infraID + "-internal-controlplane')]"),
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', '" + infraID + "-internal-lb', '" + infraID + "-internal-controlplane-v4')]"),
|
||||
},
|
||||
},
|
||||
Subnet: &mgmtnetwork.Subnet{
|
||||
ID: to.StringPtr(i.doc.OpenShiftCluster.Properties.MasterProfile.SubnetID),
|
||||
},
|
||||
},
|
||||
Name: to.StringPtr("bootstrap-nic-ip"),
|
||||
Name: to.StringPtr("bootstrap-nic-ip-v4"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -352,10 +352,10 @@ func (i *Installer) installResources(ctx context.Context) error {
|
|||
InterfaceIPConfigurationPropertiesFormat: &mgmtnetwork.InterfaceIPConfigurationPropertiesFormat{
|
||||
LoadBalancerBackendAddressPools: &[]mgmtnetwork.BackendAddressPool{
|
||||
{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', '" + infraID + "-public-lb', '" + infraID + "-public-lb-control-plane')]"),
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', '" + infraID + "-public-lb', '" + infraID + "-public-lb-control-plane-v4')]"),
|
||||
},
|
||||
{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', '" + infraID + "-internal-lb', '" + infraID + "-internal-controlplane')]"),
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', '" + infraID + "-internal-lb', '" + infraID + "-internal-controlplane-v4')]"),
|
||||
},
|
||||
},
|
||||
Subnet: &mgmtnetwork.Subnet{
|
||||
|
|
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/openshift/installer/pkg/asset/kubeconfig"
|
||||
"github.com/openshift/installer/pkg/asset/machines"
|
||||
"github.com/openshift/installer/pkg/asset/manifests"
|
||||
"github.com/openshift/installer/pkg/asset/openshiftinstall"
|
||||
"github.com/openshift/installer/pkg/asset/password"
|
||||
"github.com/openshift/installer/pkg/asset/releaseimage"
|
||||
"github.com/openshift/installer/pkg/asset/rhcos"
|
||||
|
@ -71,6 +72,7 @@ var registeredTypes = map[string]asset.Asset{
|
|||
"*openshift.NetworkCRDs": &openshift.NetworkCRDs{},
|
||||
"*openshift.PrivateClusterOutbound": &openshift.PrivateClusterOutbound{},
|
||||
"*openshift.RoleCloudCredsSecretReader": &openshift.RoleCloudCredsSecretReader{},
|
||||
"*openshiftinstall.Config": &openshiftinstall.Config{},
|
||||
"*password.KubeadminPassword": &password.KubeadminPassword{},
|
||||
"*releaseimage.Image": &releaseimage.Image{},
|
||||
"*rhcos.BootstrapImage": new(rhcos.BootstrapImage),
|
||||
|
|
|
@ -58,7 +58,11 @@ func TestGraphRoundTrip(t *testing.T) {
|
|||
SSHKey: sshkey.Type() + " " + base64.StdEncoding.EncodeToString(sshkey.Marshal()),
|
||||
BaseDomain: "dummy",
|
||||
Networking: &types.Networking{
|
||||
MachineCIDR: ipnet.MustParseCIDR("10.0.0.0/16"),
|
||||
MachineNetwork: []types.MachineNetworkEntry{
|
||||
{
|
||||
CIDR: *ipnet.MustParseCIDR("10.0.0.0/16"),
|
||||
},
|
||||
},
|
||||
NetworkType: "OpenShiftSDN",
|
||||
ClusterNetwork: []types.ClusterNetworkEntry{
|
||||
{
|
||||
|
@ -74,12 +78,14 @@ func TestGraphRoundTrip(t *testing.T) {
|
|||
Name: "master",
|
||||
Replicas: to.Int64Ptr(3),
|
||||
Hyperthreading: "Enabled",
|
||||
Architecture: types.ArchitectureAMD64,
|
||||
},
|
||||
Compute: []types.MachinePool{
|
||||
{
|
||||
Name: "worker",
|
||||
Replicas: to.Int64Ptr(3),
|
||||
Hyperthreading: "Enabled",
|
||||
Architecture: types.ArchitectureAMD64,
|
||||
},
|
||||
},
|
||||
Platform: types.Platform{
|
||||
|
|
|
@ -60,7 +60,7 @@ func (i *Installer) updateAPIIP(ctx context.Context) error {
|
|||
resourceGroup := stringutils.LastTokenByte(i.doc.OpenShiftCluster.Properties.ClusterProfile.ResourceGroupID, '/')
|
||||
var ipAddress string
|
||||
if i.doc.OpenShiftCluster.Properties.APIServerProfile.Visibility == api.VisibilityPublic {
|
||||
ip, err := i.publicipaddresses.Get(ctx, resourceGroup, infraID+"-pip", "")
|
||||
ip, err := i.publicipaddresses.Get(ctx, resourceGroup, infraID+"-pip-v4", "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -27,15 +27,15 @@ func (i *Installer) apiServerPublicLoadBalancer(location string, visibility api.
|
|||
{
|
||||
FrontendIPConfigurationPropertiesFormat: &mgmtnetwork.FrontendIPConfigurationPropertiesFormat{
|
||||
PublicIPAddress: &mgmtnetwork.PublicIPAddress{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/publicIPAddresses', '" + infraID + "-pip')]"),
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/publicIPAddresses', '" + infraID + "-pip-v4')]"),
|
||||
},
|
||||
},
|
||||
Name: to.StringPtr("public-lb-ip"),
|
||||
Name: to.StringPtr("public-lb-ip-v4"),
|
||||
},
|
||||
},
|
||||
BackendAddressPools: &[]mgmtnetwork.BackendAddressPool{
|
||||
{
|
||||
Name: to.StringPtr(infraID + "-public-lb-control-plane"),
|
||||
Name: to.StringPtr(infraID + "-public-lb-control-plane-v4"),
|
||||
},
|
||||
},
|
||||
OutboundRules: &[]mgmtnetwork.OutboundRule{
|
||||
|
@ -43,11 +43,11 @@ func (i *Installer) apiServerPublicLoadBalancer(location string, visibility api.
|
|||
OutboundRulePropertiesFormat: &mgmtnetwork.OutboundRulePropertiesFormat{
|
||||
FrontendIPConfigurations: &[]mgmtnetwork.SubResource{
|
||||
{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations', '" + infraID + "-public-lb', 'public-lb-ip')]"),
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations', '" + infraID + "-public-lb', 'public-lb-ip-v4')]"),
|
||||
},
|
||||
},
|
||||
BackendAddressPool: &mgmtnetwork.SubResource{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', '" + infraID + "-public-lb', '" + infraID + "-public-lb-control-plane')]"),
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', '" + infraID + "-public-lb', '" + infraID + "-public-lb-control-plane-v4')]"),
|
||||
},
|
||||
Protocol: mgmtnetwork.LoadBalancerOutboundRuleProtocolAll,
|
||||
IdleTimeoutInMinutes: to.Int32Ptr(30),
|
||||
|
@ -66,10 +66,10 @@ func (i *Installer) apiServerPublicLoadBalancer(location string, visibility api.
|
|||
{
|
||||
LoadBalancingRulePropertiesFormat: &mgmtnetwork.LoadBalancingRulePropertiesFormat{
|
||||
FrontendIPConfiguration: &mgmtnetwork.SubResource{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations', '" + infraID + "-public-lb', 'public-lb-ip')]"),
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/frontendIPConfigurations', '" + infraID + "-public-lb', 'public-lb-ip-v4')]"),
|
||||
},
|
||||
BackendAddressPool: &mgmtnetwork.SubResource{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', '" + infraID + "-public-lb', '" + infraID + "-public-lb-control-plane')]"),
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/backendAddressPools', '" + infraID + "-public-lb', '" + infraID + "-public-lb-control-plane-v4')]"),
|
||||
},
|
||||
Probe: &mgmtnetwork.SubResource{
|
||||
ID: to.StringPtr("[resourceId('Microsoft.Network/loadBalancers/probes', '" + infraID + "-public-lb', 'api-internal-probe')]"),
|
||||
|
@ -81,7 +81,7 @@ func (i *Installer) apiServerPublicLoadBalancer(location string, visibility api.
|
|||
IdleTimeoutInMinutes: to.Int32Ptr(30),
|
||||
DisableOutboundSnat: to.BoolPtr(true),
|
||||
},
|
||||
Name: to.StringPtr("api-internal"),
|
||||
Name: to.StringPtr("api-internal-v4"),
|
||||
},
|
||||
}
|
||||
lb.Probes = &[]mgmtnetwork.Probe{
|
||||
|
@ -102,7 +102,7 @@ func (i *Installer) apiServerPublicLoadBalancer(location string, visibility api.
|
|||
Resource: lb,
|
||||
APIVersion: azureclient.APIVersions["Microsoft.Network"],
|
||||
DependsOn: []string{
|
||||
"Microsoft.Network/publicIPAddresses/" + infraID + "-pip",
|
||||
"Microsoft.Network/publicIPAddresses/" + infraID + "-pip-v4",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
253
vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/resources/mgmt/resources/models.go
сгенерированный
поставляемый
Normal file
253
vendor/github.com/Azure/azure-sdk-for-go/profiles/latest/resources/mgmt/resources/models.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,253 @@
|
|||
// +build go1.9
|
||||
|
||||
// Copyright 2020 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This code was auto-generated by:
|
||||
// github.com/Azure/azure-sdk-for-go/tools/profileBuilder
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
original "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultBaseURI = original.DefaultBaseURI
|
||||
)
|
||||
|
||||
type DeploymentMode = original.DeploymentMode
|
||||
|
||||
const (
|
||||
Complete DeploymentMode = original.Complete
|
||||
Incremental DeploymentMode = original.Incremental
|
||||
)
|
||||
|
||||
type OnErrorDeploymentType = original.OnErrorDeploymentType
|
||||
|
||||
const (
|
||||
LastSuccessful OnErrorDeploymentType = original.LastSuccessful
|
||||
SpecificDeployment OnErrorDeploymentType = original.SpecificDeployment
|
||||
)
|
||||
|
||||
type ResourceIdentityType = original.ResourceIdentityType
|
||||
|
||||
const (
|
||||
None ResourceIdentityType = original.None
|
||||
SystemAssigned ResourceIdentityType = original.SystemAssigned
|
||||
SystemAssignedUserAssigned ResourceIdentityType = original.SystemAssignedUserAssigned
|
||||
UserAssigned ResourceIdentityType = original.UserAssigned
|
||||
)
|
||||
|
||||
type AliasPathType = original.AliasPathType
|
||||
type AliasType = original.AliasType
|
||||
type BaseClient = original.BaseClient
|
||||
type BasicDependency = original.BasicDependency
|
||||
type Client = original.Client
|
||||
type CloudError = original.CloudError
|
||||
type CreateOrUpdateByIDFuture = original.CreateOrUpdateByIDFuture
|
||||
type CreateOrUpdateFuture = original.CreateOrUpdateFuture
|
||||
type DebugSetting = original.DebugSetting
|
||||
type DeleteByIDFuture = original.DeleteByIDFuture
|
||||
type DeleteFuture = original.DeleteFuture
|
||||
type Dependency = original.Dependency
|
||||
type Deployment = original.Deployment
|
||||
type DeploymentExportResult = original.DeploymentExportResult
|
||||
type DeploymentExtended = original.DeploymentExtended
|
||||
type DeploymentExtendedFilter = original.DeploymentExtendedFilter
|
||||
type DeploymentListResult = original.DeploymentListResult
|
||||
type DeploymentListResultIterator = original.DeploymentListResultIterator
|
||||
type DeploymentListResultPage = original.DeploymentListResultPage
|
||||
type DeploymentOperation = original.DeploymentOperation
|
||||
type DeploymentOperationProperties = original.DeploymentOperationProperties
|
||||
type DeploymentOperationsClient = original.DeploymentOperationsClient
|
||||
type DeploymentOperationsListResult = original.DeploymentOperationsListResult
|
||||
type DeploymentOperationsListResultIterator = original.DeploymentOperationsListResultIterator
|
||||
type DeploymentOperationsListResultPage = original.DeploymentOperationsListResultPage
|
||||
type DeploymentProperties = original.DeploymentProperties
|
||||
type DeploymentPropertiesExtended = original.DeploymentPropertiesExtended
|
||||
type DeploymentValidateResult = original.DeploymentValidateResult
|
||||
type DeploymentsClient = original.DeploymentsClient
|
||||
type DeploymentsCreateOrUpdateAtManagementGroupScopeFuture = original.DeploymentsCreateOrUpdateAtManagementGroupScopeFuture
|
||||
type DeploymentsCreateOrUpdateAtSubscriptionScopeFuture = original.DeploymentsCreateOrUpdateAtSubscriptionScopeFuture
|
||||
type DeploymentsCreateOrUpdateFuture = original.DeploymentsCreateOrUpdateFuture
|
||||
type DeploymentsDeleteAtManagementGroupScopeFuture = original.DeploymentsDeleteAtManagementGroupScopeFuture
|
||||
type DeploymentsDeleteAtSubscriptionScopeFuture = original.DeploymentsDeleteAtSubscriptionScopeFuture
|
||||
type DeploymentsDeleteFuture = original.DeploymentsDeleteFuture
|
||||
type ErrorAdditionalInfo = original.ErrorAdditionalInfo
|
||||
type ErrorResponse = original.ErrorResponse
|
||||
type ExportTemplateRequest = original.ExportTemplateRequest
|
||||
type GenericResource = original.GenericResource
|
||||
type GenericResourceExpanded = original.GenericResourceExpanded
|
||||
type GenericResourceFilter = original.GenericResourceFilter
|
||||
type Group = original.Group
|
||||
type GroupExportResult = original.GroupExportResult
|
||||
type GroupFilter = original.GroupFilter
|
||||
type GroupListResult = original.GroupListResult
|
||||
type GroupListResultIterator = original.GroupListResultIterator
|
||||
type GroupListResultPage = original.GroupListResultPage
|
||||
type GroupPatchable = original.GroupPatchable
|
||||
type GroupProperties = original.GroupProperties
|
||||
type GroupsClient = original.GroupsClient
|
||||
type GroupsDeleteFuture = original.GroupsDeleteFuture
|
||||
type HTTPMessage = original.HTTPMessage
|
||||
type Identity = original.Identity
|
||||
type IdentityUserAssignedIdentitiesValue = original.IdentityUserAssignedIdentitiesValue
|
||||
type ListResult = original.ListResult
|
||||
type ListResultIterator = original.ListResultIterator
|
||||
type ListResultPage = original.ListResultPage
|
||||
type ManagementErrorWithDetails = original.ManagementErrorWithDetails
|
||||
type MoveInfo = original.MoveInfo
|
||||
type MoveResourcesFuture = original.MoveResourcesFuture
|
||||
type OnErrorDeployment = original.OnErrorDeployment
|
||||
type OnErrorDeploymentExtended = original.OnErrorDeploymentExtended
|
||||
type Operation = original.Operation
|
||||
type OperationDisplay = original.OperationDisplay
|
||||
type OperationListResult = original.OperationListResult
|
||||
type OperationListResultIterator = original.OperationListResultIterator
|
||||
type OperationListResultPage = original.OperationListResultPage
|
||||
type OperationsClient = original.OperationsClient
|
||||
type ParametersLink = original.ParametersLink
|
||||
type Plan = original.Plan
|
||||
type Provider = original.Provider
|
||||
type ProviderListResult = original.ProviderListResult
|
||||
type ProviderListResultIterator = original.ProviderListResultIterator
|
||||
type ProviderListResultPage = original.ProviderListResultPage
|
||||
type ProviderOperationDisplayProperties = original.ProviderOperationDisplayProperties
|
||||
type ProviderResourceType = original.ProviderResourceType
|
||||
type ProvidersClient = original.ProvidersClient
|
||||
type Resource = original.Resource
|
||||
type Sku = original.Sku
|
||||
type SubResource = original.SubResource
|
||||
type TagCount = original.TagCount
|
||||
type TagDetails = original.TagDetails
|
||||
type TagValue = original.TagValue
|
||||
type TagsClient = original.TagsClient
|
||||
type TagsListResult = original.TagsListResult
|
||||
type TagsListResultIterator = original.TagsListResultIterator
|
||||
type TagsListResultPage = original.TagsListResultPage
|
||||
type TargetResource = original.TargetResource
|
||||
type TemplateHashResult = original.TemplateHashResult
|
||||
type TemplateLink = original.TemplateLink
|
||||
type UpdateByIDFuture = original.UpdateByIDFuture
|
||||
type UpdateFuture = original.UpdateFuture
|
||||
type ValidateMoveResourcesFuture = original.ValidateMoveResourcesFuture
|
||||
|
||||
func New(subscriptionID string) BaseClient {
|
||||
return original.New(subscriptionID)
|
||||
}
|
||||
func NewClient(subscriptionID string) Client {
|
||||
return original.NewClient(subscriptionID)
|
||||
}
|
||||
func NewClientWithBaseURI(baseURI string, subscriptionID string) Client {
|
||||
return original.NewClientWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func NewDeploymentListResultIterator(page DeploymentListResultPage) DeploymentListResultIterator {
|
||||
return original.NewDeploymentListResultIterator(page)
|
||||
}
|
||||
func NewDeploymentListResultPage(getNextPage func(context.Context, DeploymentListResult) (DeploymentListResult, error)) DeploymentListResultPage {
|
||||
return original.NewDeploymentListResultPage(getNextPage)
|
||||
}
|
||||
func NewDeploymentOperationsClient(subscriptionID string) DeploymentOperationsClient {
|
||||
return original.NewDeploymentOperationsClient(subscriptionID)
|
||||
}
|
||||
func NewDeploymentOperationsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentOperationsClient {
|
||||
return original.NewDeploymentOperationsClientWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func NewDeploymentOperationsListResultIterator(page DeploymentOperationsListResultPage) DeploymentOperationsListResultIterator {
|
||||
return original.NewDeploymentOperationsListResultIterator(page)
|
||||
}
|
||||
func NewDeploymentOperationsListResultPage(getNextPage func(context.Context, DeploymentOperationsListResult) (DeploymentOperationsListResult, error)) DeploymentOperationsListResultPage {
|
||||
return original.NewDeploymentOperationsListResultPage(getNextPage)
|
||||
}
|
||||
func NewDeploymentsClient(subscriptionID string) DeploymentsClient {
|
||||
return original.NewDeploymentsClient(subscriptionID)
|
||||
}
|
||||
func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentsClient {
|
||||
return original.NewDeploymentsClientWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func NewGroupListResultIterator(page GroupListResultPage) GroupListResultIterator {
|
||||
return original.NewGroupListResultIterator(page)
|
||||
}
|
||||
func NewGroupListResultPage(getNextPage func(context.Context, GroupListResult) (GroupListResult, error)) GroupListResultPage {
|
||||
return original.NewGroupListResultPage(getNextPage)
|
||||
}
|
||||
func NewGroupsClient(subscriptionID string) GroupsClient {
|
||||
return original.NewGroupsClient(subscriptionID)
|
||||
}
|
||||
func NewGroupsClientWithBaseURI(baseURI string, subscriptionID string) GroupsClient {
|
||||
return original.NewGroupsClientWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func NewListResultIterator(page ListResultPage) ListResultIterator {
|
||||
return original.NewListResultIterator(page)
|
||||
}
|
||||
func NewListResultPage(getNextPage func(context.Context, ListResult) (ListResult, error)) ListResultPage {
|
||||
return original.NewListResultPage(getNextPage)
|
||||
}
|
||||
func NewOperationListResultIterator(page OperationListResultPage) OperationListResultIterator {
|
||||
return original.NewOperationListResultIterator(page)
|
||||
}
|
||||
func NewOperationListResultPage(getNextPage func(context.Context, OperationListResult) (OperationListResult, error)) OperationListResultPage {
|
||||
return original.NewOperationListResultPage(getNextPage)
|
||||
}
|
||||
func NewOperationsClient(subscriptionID string) OperationsClient {
|
||||
return original.NewOperationsClient(subscriptionID)
|
||||
}
|
||||
func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
|
||||
return original.NewOperationsClientWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func NewProviderListResultIterator(page ProviderListResultPage) ProviderListResultIterator {
|
||||
return original.NewProviderListResultIterator(page)
|
||||
}
|
||||
func NewProviderListResultPage(getNextPage func(context.Context, ProviderListResult) (ProviderListResult, error)) ProviderListResultPage {
|
||||
return original.NewProviderListResultPage(getNextPage)
|
||||
}
|
||||
func NewProvidersClient(subscriptionID string) ProvidersClient {
|
||||
return original.NewProvidersClient(subscriptionID)
|
||||
}
|
||||
func NewProvidersClientWithBaseURI(baseURI string, subscriptionID string) ProvidersClient {
|
||||
return original.NewProvidersClientWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func NewTagsClient(subscriptionID string) TagsClient {
|
||||
return original.NewTagsClient(subscriptionID)
|
||||
}
|
||||
func NewTagsClientWithBaseURI(baseURI string, subscriptionID string) TagsClient {
|
||||
return original.NewTagsClientWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func NewTagsListResultIterator(page TagsListResultPage) TagsListResultIterator {
|
||||
return original.NewTagsListResultIterator(page)
|
||||
}
|
||||
func NewTagsListResultPage(getNextPage func(context.Context, TagsListResult) (TagsListResult, error)) TagsListResultPage {
|
||||
return original.NewTagsListResultPage(getNextPage)
|
||||
}
|
||||
func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
|
||||
return original.NewWithBaseURI(baseURI, subscriptionID)
|
||||
}
|
||||
func PossibleDeploymentModeValues() []DeploymentMode {
|
||||
return original.PossibleDeploymentModeValues()
|
||||
}
|
||||
func PossibleOnErrorDeploymentTypeValues() []OnErrorDeploymentType {
|
||||
return original.PossibleOnErrorDeploymentTypeValues()
|
||||
}
|
||||
func PossibleResourceIdentityTypeValues() []ResourceIdentityType {
|
||||
return original.PossibleResourceIdentityTypeValues()
|
||||
}
|
||||
func UserAgent() string {
|
||||
return original.UserAgent() + " profiles/latest"
|
||||
}
|
||||
func Version() string {
|
||||
return original.Version()
|
||||
}
|
52
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/client.go
сгенерированный
поставляемый
Normal file
52
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/client.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,52 @@
|
|||
// Package resources implements the Azure ARM Resources service API version 2019-05-01.
|
||||
//
|
||||
// Provides operations for working with resources and resource groups.
|
||||
package resources
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultBaseURI is the default URI used for the service Resources
|
||||
DefaultBaseURI = "https://management.azure.com"
|
||||
)
|
||||
|
||||
// BaseClient is the base client for Resources.
|
||||
type BaseClient struct {
|
||||
autorest.Client
|
||||
BaseURI string
|
||||
SubscriptionID string
|
||||
}
|
||||
|
||||
// New creates an instance of the BaseClient client.
|
||||
func New(subscriptionID string) BaseClient {
|
||||
return NewWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewWithBaseURI creates an instance of the BaseClient client using a custom endpoint. Use this when interacting with
|
||||
// an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
|
||||
func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient {
|
||||
return BaseClient{
|
||||
Client: autorest.NewClientWithUserAgent(UserAgent()),
|
||||
BaseURI: baseURI,
|
||||
SubscriptionID: subscriptionID,
|
||||
}
|
||||
}
|
688
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/deploymentoperations.go
сгенерированный
поставляемый
Normal file
688
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/deploymentoperations.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,688 @@
|
|||
package resources
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/validation"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// DeploymentOperationsClient is the provides operations for working with resources and resource groups.
|
||||
type DeploymentOperationsClient struct {
|
||||
BaseClient
|
||||
}
|
||||
|
||||
// NewDeploymentOperationsClient creates an instance of the DeploymentOperationsClient client.
|
||||
func NewDeploymentOperationsClient(subscriptionID string) DeploymentOperationsClient {
|
||||
return NewDeploymentOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewDeploymentOperationsClientWithBaseURI creates an instance of the DeploymentOperationsClient client using a custom
|
||||
// endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure
|
||||
// stack).
|
||||
func NewDeploymentOperationsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentOperationsClient {
|
||||
return DeploymentOperationsClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// Get gets a deployments operation.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group. The name is case insensitive.
|
||||
// deploymentName - the name of the deployment.
|
||||
// operationID - the ID of the operation to get.
|
||||
func (client DeploymentOperationsClient) Get(ctx context.Context, resourceGroupName string, deploymentName string, operationID string) (result DeploymentOperation, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsClient.Get")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}},
|
||||
{TargetValue: deploymentName,
|
||||
Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil},
|
||||
{Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("resources.DeploymentOperationsClient", "Get", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.GetPreparer(ctx, resourceGroupName, deploymentName, operationID)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "Get", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.GetSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "Get", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.GetResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "Get", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetPreparer prepares the Get request.
|
||||
func (client DeploymentOperationsClient) GetPreparer(ctx context.Context, resourceGroupName string, deploymentName string, operationID string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"deploymentName": autorest.Encode("path", deploymentName),
|
||||
"operationId": autorest.Encode("path", operationID),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations/{operationId}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// GetSender sends the Get request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client DeploymentOperationsClient) GetSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// GetResponder handles the response to the Get request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client DeploymentOperationsClient) GetResponder(resp *http.Response) (result DeploymentOperation, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// GetAtManagementGroupScope gets a deployments operation.
|
||||
// Parameters:
|
||||
// groupID - the management group ID.
|
||||
// deploymentName - the name of the deployment.
|
||||
// operationID - the ID of the operation to get.
|
||||
func (client DeploymentOperationsClient) GetAtManagementGroupScope(ctx context.Context, groupID string, deploymentName string, operationID string) (result DeploymentOperation, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsClient.GetAtManagementGroupScope")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: groupID,
|
||||
Constraints: []validation.Constraint{{Target: "groupID", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "groupID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
|
||||
{TargetValue: deploymentName,
|
||||
Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil},
|
||||
{Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("resources.DeploymentOperationsClient", "GetAtManagementGroupScope", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.GetAtManagementGroupScopePreparer(ctx, groupID, deploymentName, operationID)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "GetAtManagementGroupScope", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.GetAtManagementGroupScopeSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "GetAtManagementGroupScope", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.GetAtManagementGroupScopeResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "GetAtManagementGroupScope", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetAtManagementGroupScopePreparer prepares the GetAtManagementGroupScope request.
|
||||
func (client DeploymentOperationsClient) GetAtManagementGroupScopePreparer(ctx context.Context, groupID string, deploymentName string, operationID string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"deploymentName": autorest.Encode("path", deploymentName),
|
||||
"groupId": autorest.Encode("path", groupID),
|
||||
"operationId": autorest.Encode("path", operationID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// GetAtManagementGroupScopeSender sends the GetAtManagementGroupScope request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client DeploymentOperationsClient) GetAtManagementGroupScopeSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
|
||||
}
|
||||
|
||||
// GetAtManagementGroupScopeResponder handles the response to the GetAtManagementGroupScope request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client DeploymentOperationsClient) GetAtManagementGroupScopeResponder(resp *http.Response) (result DeploymentOperation, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// GetAtSubscriptionScope gets a deployments operation.
|
||||
// Parameters:
|
||||
// deploymentName - the name of the deployment.
|
||||
// operationID - the ID of the operation to get.
|
||||
func (client DeploymentOperationsClient) GetAtSubscriptionScope(ctx context.Context, deploymentName string, operationID string) (result DeploymentOperation, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsClient.GetAtSubscriptionScope")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: deploymentName,
|
||||
Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil},
|
||||
{Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("resources.DeploymentOperationsClient", "GetAtSubscriptionScope", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.GetAtSubscriptionScopePreparer(ctx, deploymentName, operationID)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "GetAtSubscriptionScope", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.GetAtSubscriptionScopeSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "GetAtSubscriptionScope", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.GetAtSubscriptionScopeResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "GetAtSubscriptionScope", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetAtSubscriptionScopePreparer prepares the GetAtSubscriptionScope request.
|
||||
func (client DeploymentOperationsClient) GetAtSubscriptionScopePreparer(ctx context.Context, deploymentName string, operationID string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"deploymentName": autorest.Encode("path", deploymentName),
|
||||
"operationId": autorest.Encode("path", operationID),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// GetAtSubscriptionScopeSender sends the GetAtSubscriptionScope request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client DeploymentOperationsClient) GetAtSubscriptionScopeSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// GetAtSubscriptionScopeResponder handles the response to the GetAtSubscriptionScope request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client DeploymentOperationsClient) GetAtSubscriptionScopeResponder(resp *http.Response) (result DeploymentOperation, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// List gets all deployments operations for a deployment.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group. The name is case insensitive.
|
||||
// deploymentName - the name of the deployment.
|
||||
// top - the number of results to return.
|
||||
func (client DeploymentOperationsClient) List(ctx context.Context, resourceGroupName string, deploymentName string, top *int32) (result DeploymentOperationsListResultPage, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsClient.List")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.dolr.Response.Response != nil {
|
||||
sc = result.dolr.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}},
|
||||
{TargetValue: deploymentName,
|
||||
Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil},
|
||||
{Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("resources.DeploymentOperationsClient", "List", err.Error())
|
||||
}
|
||||
|
||||
result.fn = client.listNextResults
|
||||
req, err := client.ListPreparer(ctx, resourceGroupName, deploymentName, top)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "List", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.dolr.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "List", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result.dolr, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "List", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListPreparer prepares the List request.
|
||||
func (client DeploymentOperationsClient) ListPreparer(ctx context.Context, resourceGroupName string, deploymentName string, top *int32) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"deploymentName": autorest.Encode("path", deploymentName),
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
if top != nil {
|
||||
queryParameters["$top"] = autorest.Encode("query", *top)
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// ListSender sends the List request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client DeploymentOperationsClient) ListSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// ListResponder handles the response to the List request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client DeploymentOperationsClient) ListResponder(resp *http.Response) (result DeploymentOperationsListResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// listNextResults retrieves the next set of results, if any.
|
||||
func (client DeploymentOperationsClient) listNextResults(ctx context.Context, lastResults DeploymentOperationsListResult) (result DeploymentOperationsListResult, err error) {
|
||||
req, err := lastResults.deploymentOperationsListResultPreparer(ctx)
|
||||
if err != nil {
|
||||
return result, autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "listNextResults", nil, "Failure preparing next results request")
|
||||
}
|
||||
if req == nil {
|
||||
return
|
||||
}
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return result, autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "listNextResults", resp, "Failure sending next results request")
|
||||
}
|
||||
result, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "listNextResults", resp, "Failure responding to next results request")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ListComplete enumerates all values, automatically crossing page boundaries as required.
|
||||
func (client DeploymentOperationsClient) ListComplete(ctx context.Context, resourceGroupName string, deploymentName string, top *int32) (result DeploymentOperationsListResultIterator, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsClient.List")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response().Response.Response != nil {
|
||||
sc = result.page.Response().Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
result.page, err = client.List(ctx, resourceGroupName, deploymentName, top)
|
||||
return
|
||||
}
|
||||
|
||||
// ListAtManagementGroupScope gets all deployments operations for a deployment.
|
||||
// Parameters:
|
||||
// groupID - the management group ID.
|
||||
// deploymentName - the name of the deployment.
|
||||
// top - the number of results to return.
|
||||
func (client DeploymentOperationsClient) ListAtManagementGroupScope(ctx context.Context, groupID string, deploymentName string, top *int32) (result DeploymentOperationsListResultPage, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsClient.ListAtManagementGroupScope")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.dolr.Response.Response != nil {
|
||||
sc = result.dolr.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: groupID,
|
||||
Constraints: []validation.Constraint{{Target: "groupID", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "groupID", Name: validation.MinLength, Rule: 1, Chain: nil}}},
|
||||
{TargetValue: deploymentName,
|
||||
Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil},
|
||||
{Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("resources.DeploymentOperationsClient", "ListAtManagementGroupScope", err.Error())
|
||||
}
|
||||
|
||||
result.fn = client.listAtManagementGroupScopeNextResults
|
||||
req, err := client.ListAtManagementGroupScopePreparer(ctx, groupID, deploymentName, top)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "ListAtManagementGroupScope", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListAtManagementGroupScopeSender(req)
|
||||
if err != nil {
|
||||
result.dolr.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "ListAtManagementGroupScope", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result.dolr, err = client.ListAtManagementGroupScopeResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "ListAtManagementGroupScope", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListAtManagementGroupScopePreparer prepares the ListAtManagementGroupScope request.
|
||||
func (client DeploymentOperationsClient) ListAtManagementGroupScopePreparer(ctx context.Context, groupID string, deploymentName string, top *int32) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"deploymentName": autorest.Encode("path", deploymentName),
|
||||
"groupId": autorest.Encode("path", groupID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
if top != nil {
|
||||
queryParameters["$top"] = autorest.Encode("query", *top)
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/providers/Microsoft.Management/managementGroups/{groupId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// ListAtManagementGroupScopeSender sends the ListAtManagementGroupScope request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client DeploymentOperationsClient) ListAtManagementGroupScopeSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
|
||||
}
|
||||
|
||||
// ListAtManagementGroupScopeResponder handles the response to the ListAtManagementGroupScope request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client DeploymentOperationsClient) ListAtManagementGroupScopeResponder(resp *http.Response) (result DeploymentOperationsListResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// listAtManagementGroupScopeNextResults retrieves the next set of results, if any.
|
||||
func (client DeploymentOperationsClient) listAtManagementGroupScopeNextResults(ctx context.Context, lastResults DeploymentOperationsListResult) (result DeploymentOperationsListResult, err error) {
|
||||
req, err := lastResults.deploymentOperationsListResultPreparer(ctx)
|
||||
if err != nil {
|
||||
return result, autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "listAtManagementGroupScopeNextResults", nil, "Failure preparing next results request")
|
||||
}
|
||||
if req == nil {
|
||||
return
|
||||
}
|
||||
resp, err := client.ListAtManagementGroupScopeSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return result, autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "listAtManagementGroupScopeNextResults", resp, "Failure sending next results request")
|
||||
}
|
||||
result, err = client.ListAtManagementGroupScopeResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "listAtManagementGroupScopeNextResults", resp, "Failure responding to next results request")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ListAtManagementGroupScopeComplete enumerates all values, automatically crossing page boundaries as required.
|
||||
func (client DeploymentOperationsClient) ListAtManagementGroupScopeComplete(ctx context.Context, groupID string, deploymentName string, top *int32) (result DeploymentOperationsListResultIterator, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsClient.ListAtManagementGroupScope")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response().Response.Response != nil {
|
||||
sc = result.page.Response().Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
result.page, err = client.ListAtManagementGroupScope(ctx, groupID, deploymentName, top)
|
||||
return
|
||||
}
|
||||
|
||||
// ListAtSubscriptionScope gets all deployments operations for a deployment.
|
||||
// Parameters:
|
||||
// deploymentName - the name of the deployment.
|
||||
// top - the number of results to return.
|
||||
func (client DeploymentOperationsClient) ListAtSubscriptionScope(ctx context.Context, deploymentName string, top *int32) (result DeploymentOperationsListResultPage, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsClient.ListAtSubscriptionScope")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.dolr.Response.Response != nil {
|
||||
sc = result.dolr.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: deploymentName,
|
||||
Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil},
|
||||
{Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("resources.DeploymentOperationsClient", "ListAtSubscriptionScope", err.Error())
|
||||
}
|
||||
|
||||
result.fn = client.listAtSubscriptionScopeNextResults
|
||||
req, err := client.ListAtSubscriptionScopePreparer(ctx, deploymentName, top)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "ListAtSubscriptionScope", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListAtSubscriptionScopeSender(req)
|
||||
if err != nil {
|
||||
result.dolr.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "ListAtSubscriptionScope", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result.dolr, err = client.ListAtSubscriptionScopeResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "ListAtSubscriptionScope", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListAtSubscriptionScopePreparer prepares the ListAtSubscriptionScope request.
|
||||
func (client DeploymentOperationsClient) ListAtSubscriptionScopePreparer(ctx context.Context, deploymentName string, top *int32) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"deploymentName": autorest.Encode("path", deploymentName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
if top != nil {
|
||||
queryParameters["$top"] = autorest.Encode("query", *top)
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// ListAtSubscriptionScopeSender sends the ListAtSubscriptionScope request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client DeploymentOperationsClient) ListAtSubscriptionScopeSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// ListAtSubscriptionScopeResponder handles the response to the ListAtSubscriptionScope request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client DeploymentOperationsClient) ListAtSubscriptionScopeResponder(resp *http.Response) (result DeploymentOperationsListResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// listAtSubscriptionScopeNextResults retrieves the next set of results, if any.
|
||||
func (client DeploymentOperationsClient) listAtSubscriptionScopeNextResults(ctx context.Context, lastResults DeploymentOperationsListResult) (result DeploymentOperationsListResult, err error) {
|
||||
req, err := lastResults.deploymentOperationsListResultPreparer(ctx)
|
||||
if err != nil {
|
||||
return result, autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "listAtSubscriptionScopeNextResults", nil, "Failure preparing next results request")
|
||||
}
|
||||
if req == nil {
|
||||
return
|
||||
}
|
||||
resp, err := client.ListAtSubscriptionScopeSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return result, autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "listAtSubscriptionScopeNextResults", resp, "Failure sending next results request")
|
||||
}
|
||||
result, err = client.ListAtSubscriptionScopeResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "listAtSubscriptionScopeNextResults", resp, "Failure responding to next results request")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ListAtSubscriptionScopeComplete enumerates all values, automatically crossing page boundaries as required.
|
||||
func (client DeploymentOperationsClient) ListAtSubscriptionScopeComplete(ctx context.Context, deploymentName string, top *int32) (result DeploymentOperationsListResultIterator, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsClient.ListAtSubscriptionScope")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response().Response.Response != nil {
|
||||
sc = result.page.Response().Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
result.page, err = client.ListAtSubscriptionScope(ctx, deploymentName, top)
|
||||
return
|
||||
}
|
2364
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/deployments.go
сгенерированный
поставляемый
Normal file
2364
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/deployments.go
сгенерированный
поставляемый
Normal file
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
670
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/groups.go
сгенерированный
поставляемый
Normal file
670
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/groups.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,670 @@
|
|||
package resources
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/autorest/validation"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// GroupsClient is the provides operations for working with resources and resource groups.
|
||||
type GroupsClient struct {
|
||||
BaseClient
|
||||
}
|
||||
|
||||
// NewGroupsClient creates an instance of the GroupsClient client.
|
||||
func NewGroupsClient(subscriptionID string) GroupsClient {
|
||||
return NewGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewGroupsClientWithBaseURI creates an instance of the GroupsClient client using a custom endpoint. Use this when
|
||||
// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
|
||||
func NewGroupsClientWithBaseURI(baseURI string, subscriptionID string) GroupsClient {
|
||||
return GroupsClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// CheckExistence checks whether a resource group exists.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group to check. The name is case insensitive.
|
||||
func (client GroupsClient) CheckExistence(ctx context.Context, resourceGroupName string) (result autorest.Response, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.CheckExistence")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response != nil {
|
||||
sc = result.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("resources.GroupsClient", "CheckExistence", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.CheckExistencePreparer(ctx, resourceGroupName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CheckExistence", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.CheckExistenceSender(req)
|
||||
if err != nil {
|
||||
result.Response = resp
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CheckExistence", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.CheckExistenceResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CheckExistence", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// CheckExistencePreparer prepares the CheckExistence request.
|
||||
func (client GroupsClient) CheckExistencePreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsHead(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// CheckExistenceSender sends the CheckExistence request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client GroupsClient) CheckExistenceSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// CheckExistenceResponder handles the response to the CheckExistence request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client GroupsClient) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound),
|
||||
autorest.ByClosing())
|
||||
result.Response = resp
|
||||
return
|
||||
}
|
||||
|
||||
// CreateOrUpdate creates or updates a resource group.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group to create or update. Can include alphanumeric,
|
||||
// underscore, parentheses, hyphen, period (except at end), and Unicode characters that match the allowed
|
||||
// characters.
|
||||
// parameters - parameters supplied to the create or update a resource group.
|
||||
func (client GroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, parameters Group) (result Group, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.CreateOrUpdate")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}},
|
||||
{TargetValue: parameters,
|
||||
Constraints: []validation.Constraint{{Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("resources.GroupsClient", "CreateOrUpdate", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, parameters)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CreateOrUpdate", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.CreateOrUpdateSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CreateOrUpdate", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.CreateOrUpdateResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CreateOrUpdate", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
|
||||
func (client GroupsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, parameters Group) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
parameters.ID = nil
|
||||
parameters.Name = nil
|
||||
parameters.Type = nil
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsContentType("application/json; charset=utf-8"),
|
||||
autorest.AsPut(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters),
|
||||
autorest.WithJSON(parameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client GroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client GroupsClient) CreateOrUpdateResponder(resp *http.Response) (result Group, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// Delete when you delete a resource group, all of its resources are also deleted. Deleting a resource group deletes
|
||||
// all of its template deployments and currently stored operations.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group to delete. The name is case insensitive.
|
||||
func (client GroupsClient) Delete(ctx context.Context, resourceGroupName string) (result GroupsDeleteFuture, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.Delete")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response() != nil {
|
||||
sc = result.Response().StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("resources.GroupsClient", "Delete", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.DeletePreparer(ctx, resourceGroupName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Delete", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.DeleteSender(req)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Delete", result.Response(), "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// DeletePreparer prepares the Delete request.
|
||||
func (client GroupsClient) DeletePreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsDelete(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// DeleteSender sends the Delete request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client GroupsClient) DeleteSender(req *http.Request) (future GroupsDeleteFuture, err error) {
|
||||
var resp *http.Response
|
||||
resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
future.Future, err = azure.NewFutureFromResponse(resp)
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteResponder handles the response to the Delete request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client GroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
|
||||
autorest.ByClosing())
|
||||
result.Response = resp
|
||||
return
|
||||
}
|
||||
|
||||
// ExportTemplate captures the specified resource group as a template.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group to export as a template.
|
||||
// parameters - parameters for exporting the template.
|
||||
func (client GroupsClient) ExportTemplate(ctx context.Context, resourceGroupName string, parameters ExportTemplateRequest) (result GroupExportResult, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.ExportTemplate")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("resources.GroupsClient", "ExportTemplate", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.ExportTemplatePreparer(ctx, resourceGroupName, parameters)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "ExportTemplate", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ExportTemplateSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "ExportTemplate", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.ExportTemplateResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "ExportTemplate", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ExportTemplatePreparer prepares the ExportTemplate request.
|
||||
func (client GroupsClient) ExportTemplatePreparer(ctx context.Context, resourceGroupName string, parameters ExportTemplateRequest) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsContentType("application/json; charset=utf-8"),
|
||||
autorest.AsPost(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/exportTemplate", pathParameters),
|
||||
autorest.WithJSON(parameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// ExportTemplateSender sends the ExportTemplate request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client GroupsClient) ExportTemplateSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// ExportTemplateResponder handles the response to the ExportTemplate request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client GroupsClient) ExportTemplateResponder(resp *http.Response) (result GroupExportResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// Get gets a resource group.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group to get. The name is case insensitive.
|
||||
func (client GroupsClient) Get(ctx context.Context, resourceGroupName string) (result Group, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.Get")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("resources.GroupsClient", "Get", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.GetPreparer(ctx, resourceGroupName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Get", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.GetSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Get", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.GetResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Get", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetPreparer prepares the Get request.
|
||||
func (client GroupsClient) GetPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// GetSender sends the Get request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client GroupsClient) GetSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// GetResponder handles the response to the Get request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client GroupsClient) GetResponder(resp *http.Response) (result Group, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// List gets all the resource groups for a subscription.
|
||||
// Parameters:
|
||||
// filter - the filter to apply on the operation.<br><br>You can filter by tag names and values. For example,
|
||||
// to filter for a tag name and value, use $filter=tagName eq 'tag1' and tagValue eq 'Value1'
|
||||
// top - the number of results to return. If null is passed, returns all resource groups.
|
||||
func (client GroupsClient) List(ctx context.Context, filter string, top *int32) (result GroupListResultPage, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.List")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.glr.Response.Response != nil {
|
||||
sc = result.glr.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
result.fn = client.listNextResults
|
||||
req, err := client.ListPreparer(ctx, filter, top)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "List", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.glr.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "List", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result.glr, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "List", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListPreparer prepares the List request.
|
||||
func (client GroupsClient) ListPreparer(ctx context.Context, filter string, top *int32) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
if len(filter) > 0 {
|
||||
queryParameters["$filter"] = autorest.Encode("query", filter)
|
||||
}
|
||||
if top != nil {
|
||||
queryParameters["$top"] = autorest.Encode("query", *top)
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// ListSender sends the List request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client GroupsClient) ListSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// ListResponder handles the response to the List request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client GroupsClient) ListResponder(resp *http.Response) (result GroupListResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// listNextResults retrieves the next set of results, if any.
|
||||
func (client GroupsClient) listNextResults(ctx context.Context, lastResults GroupListResult) (result GroupListResult, err error) {
|
||||
req, err := lastResults.groupListResultPreparer(ctx)
|
||||
if err != nil {
|
||||
return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "listNextResults", nil, "Failure preparing next results request")
|
||||
}
|
||||
if req == nil {
|
||||
return
|
||||
}
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "listNextResults", resp, "Failure sending next results request")
|
||||
}
|
||||
result, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "listNextResults", resp, "Failure responding to next results request")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ListComplete enumerates all values, automatically crossing page boundaries as required.
|
||||
func (client GroupsClient) ListComplete(ctx context.Context, filter string, top *int32) (result GroupListResultIterator, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.List")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response().Response.Response != nil {
|
||||
sc = result.page.Response().Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
result.page, err = client.List(ctx, filter, top)
|
||||
return
|
||||
}
|
||||
|
||||
// Update resource groups can be updated through a simple PATCH operation to a group address. The format of the request
|
||||
// is the same as that for creating a resource group. If a field is unspecified, the current value is retained.
|
||||
// Parameters:
|
||||
// resourceGroupName - the name of the resource group to update. The name is case insensitive.
|
||||
// parameters - parameters supplied to update a resource group.
|
||||
func (client GroupsClient) Update(ctx context.Context, resourceGroupName string, parameters GroupPatchable) (result Group, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.Update")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
if err := validation.Validate([]validation.Validation{
|
||||
{TargetValue: resourceGroupName,
|
||||
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil},
|
||||
{Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil {
|
||||
return result, validation.NewError("resources.GroupsClient", "Update", err.Error())
|
||||
}
|
||||
|
||||
req, err := client.UpdatePreparer(ctx, resourceGroupName, parameters)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Update", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.UpdateSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Update", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.UpdateResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Update", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UpdatePreparer prepares the Update request.
|
||||
func (client GroupsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, parameters GroupPatchable) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"resourceGroupName": autorest.Encode("path", resourceGroupName),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsContentType("application/json; charset=utf-8"),
|
||||
autorest.AsPatch(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters),
|
||||
autorest.WithJSON(parameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// UpdateSender sends the Update request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client GroupsClient) UpdateSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// UpdateResponder handles the response to the Update request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client GroupsClient) UpdateResponder(resp *http.Response) (result Group, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
2208
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/models.go
сгенерированный
поставляемый
Normal file
2208
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/models.go
сгенерированный
поставляемый
Normal file
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
147
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/operations.go
сгенерированный
поставляемый
Normal file
147
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/operations.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,147 @@
|
|||
package resources
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// OperationsClient is the provides operations for working with resources and resource groups.
|
||||
type OperationsClient struct {
|
||||
BaseClient
|
||||
}
|
||||
|
||||
// NewOperationsClient creates an instance of the OperationsClient client.
|
||||
func NewOperationsClient(subscriptionID string) OperationsClient {
|
||||
return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client using a custom endpoint. Use this
|
||||
// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
|
||||
func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient {
|
||||
return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// List lists all of the available Microsoft.Resources REST API operations.
|
||||
func (client OperationsClient) List(ctx context.Context) (result OperationListResultPage, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.olr.Response.Response != nil {
|
||||
sc = result.olr.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
result.fn = client.listNextResults
|
||||
req, err := client.ListPreparer(ctx)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.OperationsClient", "List", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.olr.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.OperationsClient", "List", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result.olr, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.OperationsClient", "List", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListPreparer prepares the List request.
|
||||
func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPath("/providers/Microsoft.Resources/operations"),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// ListSender sends the List request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
|
||||
}
|
||||
|
||||
// ListResponder handles the response to the List request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// listNextResults retrieves the next set of results, if any.
|
||||
func (client OperationsClient) listNextResults(ctx context.Context, lastResults OperationListResult) (result OperationListResult, err error) {
|
||||
req, err := lastResults.operationListResultPreparer(ctx)
|
||||
if err != nil {
|
||||
return result, autorest.NewErrorWithError(err, "resources.OperationsClient", "listNextResults", nil, "Failure preparing next results request")
|
||||
}
|
||||
if req == nil {
|
||||
return
|
||||
}
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return result, autorest.NewErrorWithError(err, "resources.OperationsClient", "listNextResults", resp, "Failure sending next results request")
|
||||
}
|
||||
result, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.OperationsClient", "listNextResults", resp, "Failure responding to next results request")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ListComplete enumerates all values, automatically crossing page boundaries as required.
|
||||
func (client OperationsClient) ListComplete(ctx context.Context) (result OperationListResultIterator, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response().Response.Response != nil {
|
||||
sc = result.page.Response().Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
result.page, err = client.List(ctx)
|
||||
return
|
||||
}
|
389
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/providers.go
сгенерированный
поставляемый
Normal file
389
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/providers.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,389 @@
|
|||
package resources
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// ProvidersClient is the provides operations for working with resources and resource groups.
|
||||
type ProvidersClient struct {
|
||||
BaseClient
|
||||
}
|
||||
|
||||
// NewProvidersClient creates an instance of the ProvidersClient client.
|
||||
func NewProvidersClient(subscriptionID string) ProvidersClient {
|
||||
return NewProvidersClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewProvidersClientWithBaseURI creates an instance of the ProvidersClient client using a custom endpoint. Use this
|
||||
// when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
|
||||
func NewProvidersClientWithBaseURI(baseURI string, subscriptionID string) ProvidersClient {
|
||||
return ProvidersClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// Get gets the specified resource provider.
|
||||
// Parameters:
|
||||
// resourceProviderNamespace - the namespace of the resource provider.
|
||||
// expand - the $expand query parameter. For example, to include property aliases in response, use
|
||||
// $expand=resourceTypes/aliases.
|
||||
func (client ProvidersClient) Get(ctx context.Context, resourceProviderNamespace string, expand string) (result Provider, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/ProvidersClient.Get")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
req, err := client.GetPreparer(ctx, resourceProviderNamespace, expand)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Get", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.GetSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Get", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.GetResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Get", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// GetPreparer prepares the Get request.
|
||||
func (client ProvidersClient) GetPreparer(ctx context.Context, resourceProviderNamespace string, expand string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
if len(expand) > 0 {
|
||||
queryParameters["$expand"] = autorest.Encode("query", expand)
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// GetSender sends the Get request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client ProvidersClient) GetSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// GetResponder handles the response to the Get request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client ProvidersClient) GetResponder(resp *http.Response) (result Provider, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// List gets all resource providers for a subscription.
|
||||
// Parameters:
|
||||
// top - the number of results to return. If null is passed returns all deployments.
|
||||
// expand - the properties to include in the results. For example, use &$expand=metadata in the query string to
|
||||
// retrieve resource provider metadata. To include property aliases in response, use
|
||||
// $expand=resourceTypes/aliases.
|
||||
func (client ProvidersClient) List(ctx context.Context, top *int32, expand string) (result ProviderListResultPage, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/ProvidersClient.List")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.plr.Response.Response != nil {
|
||||
sc = result.plr.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
result.fn = client.listNextResults
|
||||
req, err := client.ListPreparer(ctx, top, expand)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.plr.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result.plr, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListPreparer prepares the List request.
|
||||
func (client ProvidersClient) ListPreparer(ctx context.Context, top *int32, expand string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
if top != nil {
|
||||
queryParameters["$top"] = autorest.Encode("query", *top)
|
||||
}
|
||||
if len(expand) > 0 {
|
||||
queryParameters["$expand"] = autorest.Encode("query", expand)
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// ListSender sends the List request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client ProvidersClient) ListSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// ListResponder handles the response to the List request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client ProvidersClient) ListResponder(resp *http.Response) (result ProviderListResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// listNextResults retrieves the next set of results, if any.
|
||||
func (client ProvidersClient) listNextResults(ctx context.Context, lastResults ProviderListResult) (result ProviderListResult, err error) {
|
||||
req, err := lastResults.providerListResultPreparer(ctx)
|
||||
if err != nil {
|
||||
return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "listNextResults", nil, "Failure preparing next results request")
|
||||
}
|
||||
if req == nil {
|
||||
return
|
||||
}
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "listNextResults", resp, "Failure sending next results request")
|
||||
}
|
||||
result, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "listNextResults", resp, "Failure responding to next results request")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ListComplete enumerates all values, automatically crossing page boundaries as required.
|
||||
func (client ProvidersClient) ListComplete(ctx context.Context, top *int32, expand string) (result ProviderListResultIterator, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/ProvidersClient.List")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response().Response.Response != nil {
|
||||
sc = result.page.Response().Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
result.page, err = client.List(ctx, top, expand)
|
||||
return
|
||||
}
|
||||
|
||||
// Register registers a subscription with a resource provider.
|
||||
// Parameters:
|
||||
// resourceProviderNamespace - the namespace of the resource provider to register.
|
||||
func (client ProvidersClient) Register(ctx context.Context, resourceProviderNamespace string) (result Provider, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/ProvidersClient.Register")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
req, err := client.RegisterPreparer(ctx, resourceProviderNamespace)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Register", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.RegisterSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Register", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.RegisterResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Register", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// RegisterPreparer prepares the Register request.
|
||||
func (client ProvidersClient) RegisterPreparer(ctx context.Context, resourceProviderNamespace string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsPost(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// RegisterSender sends the Register request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client ProvidersClient) RegisterSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// RegisterResponder handles the response to the Register request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client ProvidersClient) RegisterResponder(resp *http.Response) (result Provider, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// Unregister unregisters a subscription from a resource provider.
|
||||
// Parameters:
|
||||
// resourceProviderNamespace - the namespace of the resource provider to unregister.
|
||||
func (client ProvidersClient) Unregister(ctx context.Context, resourceProviderNamespace string) (result Provider, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/ProvidersClient.Unregister")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
req, err := client.UnregisterPreparer(ctx, resourceProviderNamespace)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Unregister", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.UnregisterSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Unregister", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.UnregisterResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Unregister", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// UnregisterPreparer prepares the Unregister request.
|
||||
func (client ProvidersClient) UnregisterPreparer(ctx context.Context, resourceProviderNamespace string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace),
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsPost(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/unregister", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// UnregisterSender sends the Unregister request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client ProvidersClient) UnregisterSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// UnregisterResponder handles the response to the Unregister request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client ProvidersClient) UnregisterResponder(resp *http.Response) (result Provider, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
1339
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/resources.go
сгенерированный
поставляемый
Normal file
1339
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/resources.go
сгенерированный
поставляемый
Normal file
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
450
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/tags.go
сгенерированный
поставляемый
Normal file
450
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/tags.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,450 @@
|
|||
package resources
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/Azure/go-autorest/tracing"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// TagsClient is the provides operations for working with resources and resource groups.
|
||||
type TagsClient struct {
|
||||
BaseClient
|
||||
}
|
||||
|
||||
// NewTagsClient creates an instance of the TagsClient client.
|
||||
func NewTagsClient(subscriptionID string) TagsClient {
|
||||
return NewTagsClientWithBaseURI(DefaultBaseURI, subscriptionID)
|
||||
}
|
||||
|
||||
// NewTagsClientWithBaseURI creates an instance of the TagsClient client using a custom endpoint. Use this when
|
||||
// interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure stack).
|
||||
func NewTagsClientWithBaseURI(baseURI string, subscriptionID string) TagsClient {
|
||||
return TagsClient{NewWithBaseURI(baseURI, subscriptionID)}
|
||||
}
|
||||
|
||||
// CreateOrUpdate the tag name can have a maximum of 512 characters and is case insensitive. Tag names created by Azure
|
||||
// have prefixes of microsoft, azure, or windows. You cannot create tags with one of these prefixes.
|
||||
// Parameters:
|
||||
// tagName - the name of the tag to create.
|
||||
func (client TagsClient) CreateOrUpdate(ctx context.Context, tagName string) (result TagDetails, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/TagsClient.CreateOrUpdate")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
req, err := client.CreateOrUpdatePreparer(ctx, tagName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdate", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.CreateOrUpdateSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdate", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.CreateOrUpdateResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdate", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// CreateOrUpdatePreparer prepares the CreateOrUpdate request.
|
||||
func (client TagsClient) CreateOrUpdatePreparer(ctx context.Context, tagName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
"tagName": autorest.Encode("path", tagName),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsPut(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames/{tagName}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client TagsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client TagsClient) CreateOrUpdateResponder(resp *http.Response) (result TagDetails, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// CreateOrUpdateValue creates a tag value. The name of the tag must already exist.
|
||||
// Parameters:
|
||||
// tagName - the name of the tag.
|
||||
// tagValue - the value of the tag to create.
|
||||
func (client TagsClient) CreateOrUpdateValue(ctx context.Context, tagName string, tagValue string) (result TagValue, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/TagsClient.CreateOrUpdateValue")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response.Response != nil {
|
||||
sc = result.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
req, err := client.CreateOrUpdateValuePreparer(ctx, tagName, tagValue)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdateValue", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.CreateOrUpdateValueSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdateValue", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.CreateOrUpdateValueResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdateValue", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// CreateOrUpdateValuePreparer prepares the CreateOrUpdateValue request.
|
||||
func (client TagsClient) CreateOrUpdateValuePreparer(ctx context.Context, tagName string, tagValue string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
"tagName": autorest.Encode("path", tagName),
|
||||
"tagValue": autorest.Encode("path", tagValue),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsPut(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// CreateOrUpdateValueSender sends the CreateOrUpdateValue request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client TagsClient) CreateOrUpdateValueSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// CreateOrUpdateValueResponder handles the response to the CreateOrUpdateValue request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client TagsClient) CreateOrUpdateValueResponder(resp *http.Response) (result TagValue, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// Delete you must remove all values from a resource tag before you can delete it.
|
||||
// Parameters:
|
||||
// tagName - the name of the tag.
|
||||
func (client TagsClient) Delete(ctx context.Context, tagName string) (result autorest.Response, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/TagsClient.Delete")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response != nil {
|
||||
sc = result.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
req, err := client.DeletePreparer(ctx, tagName)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.TagsClient", "Delete", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.DeleteSender(req)
|
||||
if err != nil {
|
||||
result.Response = resp
|
||||
err = autorest.NewErrorWithError(err, "resources.TagsClient", "Delete", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.DeleteResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.TagsClient", "Delete", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// DeletePreparer prepares the Delete request.
|
||||
func (client TagsClient) DeletePreparer(ctx context.Context, tagName string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
"tagName": autorest.Encode("path", tagName),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsDelete(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames/{tagName}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// DeleteSender sends the Delete request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client TagsClient) DeleteSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// DeleteResponder handles the response to the Delete request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client TagsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
|
||||
autorest.ByClosing())
|
||||
result.Response = resp
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteValue deletes a tag value.
|
||||
// Parameters:
|
||||
// tagName - the name of the tag.
|
||||
// tagValue - the value of the tag to delete.
|
||||
func (client TagsClient) DeleteValue(ctx context.Context, tagName string, tagValue string) (result autorest.Response, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/TagsClient.DeleteValue")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response != nil {
|
||||
sc = result.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
req, err := client.DeleteValuePreparer(ctx, tagName, tagValue)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.TagsClient", "DeleteValue", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.DeleteValueSender(req)
|
||||
if err != nil {
|
||||
result.Response = resp
|
||||
err = autorest.NewErrorWithError(err, "resources.TagsClient", "DeleteValue", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result, err = client.DeleteValueResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.TagsClient", "DeleteValue", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// DeleteValuePreparer prepares the DeleteValue request.
|
||||
func (client TagsClient) DeleteValuePreparer(ctx context.Context, tagName string, tagValue string) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
"tagName": autorest.Encode("path", tagName),
|
||||
"tagValue": autorest.Encode("path", tagValue),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsDelete(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// DeleteValueSender sends the DeleteValue request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client TagsClient) DeleteValueSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// DeleteValueResponder handles the response to the DeleteValue request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client TagsClient) DeleteValueResponder(resp *http.Response) (result autorest.Response, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent),
|
||||
autorest.ByClosing())
|
||||
result.Response = resp
|
||||
return
|
||||
}
|
||||
|
||||
// List gets the names and values of all resource tags that are defined in a subscription.
|
||||
func (client TagsClient) List(ctx context.Context) (result TagsListResultPage, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/TagsClient.List")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.tlr.Response.Response != nil {
|
||||
sc = result.tlr.Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
result.fn = client.listNextResults
|
||||
req, err := client.ListPreparer(ctx)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.TagsClient", "List", nil, "Failure preparing request")
|
||||
return
|
||||
}
|
||||
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.tlr.Response = autorest.Response{Response: resp}
|
||||
err = autorest.NewErrorWithError(err, "resources.TagsClient", "List", resp, "Failure sending request")
|
||||
return
|
||||
}
|
||||
|
||||
result.tlr, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.TagsClient", "List", resp, "Failure responding to request")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ListPreparer prepares the List request.
|
||||
func (client TagsClient) ListPreparer(ctx context.Context) (*http.Request, error) {
|
||||
pathParameters := map[string]interface{}{
|
||||
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
|
||||
}
|
||||
|
||||
const APIVersion = "2019-05-01"
|
||||
queryParameters := map[string]interface{}{
|
||||
"api-version": APIVersion,
|
||||
}
|
||||
|
||||
preparer := autorest.CreatePreparer(
|
||||
autorest.AsGet(),
|
||||
autorest.WithBaseURL(client.BaseURI),
|
||||
autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames", pathParameters),
|
||||
autorest.WithQueryParameters(queryParameters))
|
||||
return preparer.Prepare((&http.Request{}).WithContext(ctx))
|
||||
}
|
||||
|
||||
// ListSender sends the List request. The method will close the
|
||||
// http.Response Body if it receives an error.
|
||||
func (client TagsClient) ListSender(req *http.Request) (*http.Response, error) {
|
||||
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
|
||||
}
|
||||
|
||||
// ListResponder handles the response to the List request. The method always
|
||||
// closes the http.Response Body.
|
||||
func (client TagsClient) ListResponder(resp *http.Response) (result TagsListResult, err error) {
|
||||
err = autorest.Respond(
|
||||
resp,
|
||||
client.ByInspecting(),
|
||||
azure.WithErrorUnlessStatusCode(http.StatusOK),
|
||||
autorest.ByUnmarshallingJSON(&result),
|
||||
autorest.ByClosing())
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return
|
||||
}
|
||||
|
||||
// listNextResults retrieves the next set of results, if any.
|
||||
func (client TagsClient) listNextResults(ctx context.Context, lastResults TagsListResult) (result TagsListResult, err error) {
|
||||
req, err := lastResults.tagsListResultPreparer(ctx)
|
||||
if err != nil {
|
||||
return result, autorest.NewErrorWithError(err, "resources.TagsClient", "listNextResults", nil, "Failure preparing next results request")
|
||||
}
|
||||
if req == nil {
|
||||
return
|
||||
}
|
||||
resp, err := client.ListSender(req)
|
||||
if err != nil {
|
||||
result.Response = autorest.Response{Response: resp}
|
||||
return result, autorest.NewErrorWithError(err, "resources.TagsClient", "listNextResults", resp, "Failure sending next results request")
|
||||
}
|
||||
result, err = client.ListResponder(resp)
|
||||
if err != nil {
|
||||
err = autorest.NewErrorWithError(err, "resources.TagsClient", "listNextResults", resp, "Failure responding to next results request")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ListComplete enumerates all values, automatically crossing page boundaries as required.
|
||||
func (client TagsClient) ListComplete(ctx context.Context) (result TagsListResultIterator, err error) {
|
||||
if tracing.IsEnabled() {
|
||||
ctx = tracing.StartSpan(ctx, fqdn+"/TagsClient.List")
|
||||
defer func() {
|
||||
sc := -1
|
||||
if result.Response().Response.Response != nil {
|
||||
sc = result.page.Response().Response.Response.StatusCode
|
||||
}
|
||||
tracing.EndSpan(ctx, sc, err)
|
||||
}()
|
||||
}
|
||||
result.page, err = client.List(ctx)
|
||||
return
|
||||
}
|
30
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/version.go
сгенерированный
поставляемый
Normal file
30
vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-05-01/resources/version.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,30 @@
|
|||
package resources
|
||||
|
||||
import "github.com/Azure/azure-sdk-for-go/version"
|
||||
|
||||
// Copyright (c) Microsoft and contributors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
//
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Code generated by Microsoft (R) AutoRest Code Generator.
|
||||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
// UserAgent returns the UserAgent string to use when sending http.Requests.
|
||||
func UserAgent() string {
|
||||
return "Azure-SDK-For-Go/" + version.Number + " resources/2019-05-01"
|
||||
}
|
||||
|
||||
// Version returns the semantic version (see http://semver.org) of the client.
|
||||
func Version() string {
|
||||
return version.Number
|
||||
}
|
36
vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/ovirt_types.go
сгенерированный
поставляемый
36
vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/ovirt_types.go
сгенерированный
поставляемый
|
@ -1,36 +0,0 @@
|
|||
/*
|
||||
Copyright 2019 The OpenShift Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// TODO: these types should eventually be broken out, along with the actuator,
|
||||
// to a separate repo.
|
||||
|
||||
// OvirtProviderSpec the specification of the credentials request in Ovirt.
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type OvirtProviderSpec struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
}
|
||||
|
||||
// OvirtProviderStatus contains the status of the credentials request in Ovirt.
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
type OvirtProviderStatus struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
}
|
50
vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/zz_generated.deepcopy.go
сгенерированный
поставляемый
50
vendor/github.com/openshift/cloud-credential-operator/pkg/apis/cloudcredential/v1/zz_generated.deepcopy.go
сгенерированный
поставляемый
|
@ -373,56 +373,6 @@ func (in *OpenStackProviderStatus) DeepCopyObject() runtime.Object {
|
|||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OvirtProviderSpec) DeepCopyInto(out *OvirtProviderSpec) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtProviderSpec.
|
||||
func (in *OvirtProviderSpec) DeepCopy() *OvirtProviderSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OvirtProviderSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *OvirtProviderSpec) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OvirtProviderStatus) DeepCopyInto(out *OvirtProviderStatus) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtProviderStatus.
|
||||
func (in *OvirtProviderStatus) DeepCopy() *OvirtProviderStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(OvirtProviderStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *OvirtProviderStatus) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RoleBinding) DeepCopyInto(out *RoleBinding) {
|
||||
*out = *in
|
||||
|
|
37
vendor/github.com/openshift/cloud-credential-operator/pkg/aws/utils.go
сгенерированный
поставляемый
37
vendor/github.com/openshift/cloud-credential-operator/pkg/aws/utils.go
сгенерированный
поставляемый
|
@ -106,9 +106,17 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
// SimulateParams captures any additional details that should be used
|
||||
// when simulating permissions.
|
||||
type SimulateParams struct {
|
||||
Region string
|
||||
}
|
||||
|
||||
// CheckCloudCredCreation will see whether we have enough permissions to create new sub-creds
|
||||
func CheckCloudCredCreation(awsClient Client, logger log.FieldLogger) (bool, error) {
|
||||
return CheckPermissionsAgainstActions(awsClient, credMintingActions, logger)
|
||||
// Empty SimulateParams{} b/c creating IAM users and assigning policies
|
||||
// are all IAM API alls which are not region-specific
|
||||
return CheckPermissionsAgainstActions(awsClient, credMintingActions, &SimulateParams{}, logger)
|
||||
}
|
||||
|
||||
// getClientDetails will return the *iam.User associated with the provided client's credentials,
|
||||
|
@ -136,7 +144,8 @@ func getClientDetails(awsClient Client) (*iam.User, bool, error) {
|
|||
|
||||
// CheckPermissionsUsingQueryClient will use queryClient to query whether the credentials in targetClient can perform the actions
|
||||
// listed in the statementEntries. queryClient will need iam:GetUser and iam:SimulatePrincipalPolicy
|
||||
func CheckPermissionsUsingQueryClient(queryClient, targetClient Client, statementEntries []minterv1.StatementEntry, logger log.FieldLogger) (bool, error) {
|
||||
func CheckPermissionsUsingQueryClient(queryClient, targetClient Client, statementEntries []minterv1.StatementEntry,
|
||||
params *SimulateParams, logger log.FieldLogger) (bool, error) {
|
||||
targetUser, isRoot, err := getClientDetails(targetClient)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("error gathering AWS credentials details: %v", err)
|
||||
|
@ -157,6 +166,17 @@ func CheckPermissionsUsingQueryClient(queryClient, targetClient Client, statemen
|
|||
input := &iam.SimulatePrincipalPolicyInput{
|
||||
PolicySourceArn: targetUser.Arn,
|
||||
ActionNames: allowList,
|
||||
ContextEntries: []*iam.ContextEntry{},
|
||||
}
|
||||
|
||||
if params != nil {
|
||||
if params.Region != "" {
|
||||
input.ContextEntries = append(input.ContextEntries, &iam.ContextEntry{
|
||||
ContextKeyName: aws.String("aws:RequestedRegion"),
|
||||
ContextKeyType: aws.String("string"),
|
||||
ContextKeyValues: []*string{aws.String(params.Region)},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Either all actions are allowed and we'll return 'true', or it's a failure
|
||||
|
@ -189,14 +209,15 @@ func CheckPermissionsUsingQueryClient(queryClient, targetClient Client, statemen
|
|||
|
||||
// CheckPermissionsAgainstStatementList will test to see whether the list of actions in the provided
|
||||
// list of StatementEntries can work with the credentials used by the passed-in awsClient
|
||||
func CheckPermissionsAgainstStatementList(awsClient Client, statementEntries []minterv1.StatementEntry, logger log.FieldLogger) (bool, error) {
|
||||
return CheckPermissionsUsingQueryClient(awsClient, awsClient, statementEntries, logger)
|
||||
func CheckPermissionsAgainstStatementList(awsClient Client, statementEntries []minterv1.StatementEntry,
|
||||
params *SimulateParams, logger log.FieldLogger) (bool, error) {
|
||||
return CheckPermissionsUsingQueryClient(awsClient, awsClient, statementEntries, params, logger)
|
||||
}
|
||||
|
||||
// CheckPermissionsAgainstActions will take the static list of Actions to check whether the provided
|
||||
// awsClient creds have sufficient permissions to perform the actions.
|
||||
// Will return true/false indicating whether the permissions are sufficient.
|
||||
func CheckPermissionsAgainstActions(awsClient Client, actionList []string, logger log.FieldLogger) (bool, error) {
|
||||
func CheckPermissionsAgainstActions(awsClient Client, actionList []string, params *SimulateParams, logger log.FieldLogger) (bool, error) {
|
||||
statementList := []minterv1.StatementEntry{
|
||||
{
|
||||
Action: actionList,
|
||||
|
@ -205,15 +226,15 @@ func CheckPermissionsAgainstActions(awsClient Client, actionList []string, logge
|
|||
},
|
||||
}
|
||||
|
||||
return CheckPermissionsAgainstStatementList(awsClient, statementList, logger)
|
||||
return CheckPermissionsAgainstStatementList(awsClient, statementList, params, logger)
|
||||
}
|
||||
|
||||
// CheckCloudCredPassthrough will see if the provided creds are good enough to pass through
|
||||
// to other components as-is based on the static list of permissions needed by the various
|
||||
// users of CredentialsRequests
|
||||
// TODO: move away from static list (to dynamic passthrough validation?)
|
||||
func CheckCloudCredPassthrough(awsClient Client, logger log.FieldLogger) (bool, error) {
|
||||
return CheckPermissionsAgainstActions(awsClient, credPassthroughActions, logger)
|
||||
func CheckCloudCredPassthrough(awsClient Client, params *SimulateParams, logger log.FieldLogger) (bool, error) {
|
||||
return CheckPermissionsAgainstActions(awsClient, credPassthroughActions, params, logger)
|
||||
}
|
||||
|
||||
func readCredentialRequest(cr []byte) (*minterv1.CredentialsRequest, error) {
|
||||
|
|
|
@ -18,10 +18,11 @@
|
|||
version = "v0.4.11"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:30bf7932616fd30848ac8f869c9bdf158ce4d099944f13d8dd8a15243c2cb1ea"
|
||||
digest = "1:cb5a084abcd601226fd3fbc8e226aa4b6c5a8b90b8aa46266d6a27c9c4df984d"
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = [
|
||||
"profiles/latest/dns/mgmt/dns",
|
||||
"profiles/latest/resources/mgmt/resources",
|
||||
"profiles/latest/resources/mgmt/subscriptions",
|
||||
"services/compute/mgmt/2018-10-01/compute",
|
||||
"services/dns/mgmt/2017-10-01/dns",
|
||||
|
@ -31,6 +32,7 @@
|
|||
"services/privatedns/mgmt/2018-09-01/privatedns",
|
||||
"services/resources/mgmt/2018-05-01/resources",
|
||||
"services/resources/mgmt/2018-06-01/subscriptions",
|
||||
"services/resources/mgmt/2019-03-01/resources",
|
||||
"version",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
|
@ -112,7 +114,7 @@
|
|||
version = "v2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ef55f87a8efa6ed704f2477bd6ca696780ff56340d84cd4fc473b04134a03f1e"
|
||||
digest = "1:607e6e0487464ad5194fde915e288ae705e2d5750e3e4058e5e4de6033397366"
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
packages = [
|
||||
"aws",
|
||||
|
@ -137,9 +139,11 @@
|
|||
"internal/ini",
|
||||
"internal/s3err",
|
||||
"internal/sdkio",
|
||||
"internal/sdkmath",
|
||||
"internal/sdkrand",
|
||||
"internal/sdkuri",
|
||||
"internal/shareddefaults",
|
||||
"internal/strings",
|
||||
"private/protocol",
|
||||
"private/protocol/ec2query",
|
||||
"private/protocol/eventstream",
|
||||
|
@ -160,13 +164,15 @@
|
|||
"service/resourcegroupstaggingapi",
|
||||
"service/route53",
|
||||
"service/s3",
|
||||
"service/s3/internal/arn",
|
||||
"service/s3/s3iface",
|
||||
"service/s3/s3manager",
|
||||
"service/sts",
|
||||
"service/sts/stsiface",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "fb5f514796fc4fdc6afdcf5a675a5b2baa714b9f"
|
||||
version = "v1.16.14"
|
||||
revision = "10653500713c229d4ef93175510476c3cc385299"
|
||||
version = "v1.28.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fdb4ed936abeecb46a8c27dcac83f75c05c87a46d9ec7711411eb785c213fa02"
|
||||
|
@ -670,8 +676,8 @@
|
|||
revision = "3b0e988f8cb09a07f375dfc304b86c1a81cbc82b"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:cd242e9cb5a2a75c6e6527828a698746225b397c8600f0d3f51d1f149fbeb28d"
|
||||
branch = "release-4.3"
|
||||
digest = "1:454f3ebed85be06f8ed32549474df4fb75624319314f222983d4bae6d4daa1cd"
|
||||
name = "github.com/openshift/cloud-credential-operator"
|
||||
packages = [
|
||||
"pkg/apis/cloudcredential/v1",
|
||||
|
@ -679,7 +685,7 @@
|
|||
"version",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "44ed18ef8496f9417af89f92539b8385b1cc9d51"
|
||||
revision = "16ebd83fd5269c41983406dd11758a2601751188"
|
||||
|
||||
[[projects]]
|
||||
branch = "openshift-4.2-cluster-api-0.1.0"
|
||||
|
@ -1069,7 +1075,7 @@
|
|||
version = "v1.19.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:de19d3688bad66ab5567a2e0a12951a6323a48afca7a7e6582a2b9d3a7456c1c"
|
||||
digest = "1:df052cd919be38ef71faa661921b7d1f1cb921c4eae9b6c6472695eaa08c99fd"
|
||||
name = "gopkg.in/AlecAivazis/survey.v1"
|
||||
packages = [
|
||||
".",
|
||||
|
@ -1077,8 +1083,7 @@
|
|||
"terminal",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "e4af3b345125b0903edb492a33a99a23e9eb3487"
|
||||
version = "v1.8.7"
|
||||
revision = "6773bdf39b7fa13e6a40ece7ac2d01e0d469c205"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
|
||||
|
@ -1362,6 +1367,7 @@
|
|||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"github.com/Azure/azure-sdk-for-go/profiles/latest/dns/mgmt/dns",
|
||||
"github.com/Azure/azure-sdk-for-go/profiles/latest/resources/mgmt/resources",
|
||||
"github.com/Azure/azure-sdk-for-go/profiles/latest/resources/mgmt/subscriptions",
|
||||
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-10-01/compute",
|
||||
"github.com/Azure/azure-sdk-for-go/services/graphrbac/1.6/graphrbac",
|
||||
|
|
|
@ -15,11 +15,11 @@ required = [
|
|||
|
||||
[[constraint]]
|
||||
name = "gopkg.in/AlecAivazis/survey.v1"
|
||||
version = "1.8.7"
|
||||
revision = "6773bdf39b7fa13e6a40ece7ac2d01e0d469c205"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/aws/aws-sdk-go"
|
||||
version = "1.16.14"
|
||||
version = "1.25.35"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
|
@ -94,6 +94,10 @@ required = [
|
|||
branch = "master"
|
||||
name = "github.com/gophercloud/gophercloud"
|
||||
|
||||
[[constraint]]
|
||||
branch = "release-4.3"
|
||||
name = "github.com/openshift/cloud-credential-operator"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "sigs.k8s.io/cluster-api-provider-openstack"
|
||||
|
@ -116,7 +120,7 @@ required = [
|
|||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
|
||||
[[constraint]]
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/openshift/cluster-api-provider-gcp"
|
||||
|
||||
|
@ -124,7 +128,7 @@ required = [
|
|||
branch = "master"
|
||||
name = "github.com/openshift/machine-config-operator"
|
||||
|
||||
[[constraint]]
|
||||
[[constraint]]
|
||||
name = "github.com/containers/image"
|
||||
version = "2.0.0"
|
||||
|
||||
|
|
27
vendor/github.com/openshift/installer/cmd/openshift-install/create.go
сгенерированный
поставляемый
27
vendor/github.com/openshift/installer/cmd/openshift-install/create.go
сгенерированный
поставляемый
|
@ -5,6 +5,7 @@ import (
|
|||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -28,9 +29,11 @@ import (
|
|||
configclient "github.com/openshift/client-go/config/clientset/versioned"
|
||||
routeclient "github.com/openshift/client-go/route/clientset/versioned"
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/installconfig"
|
||||
assetstore "github.com/openshift/installer/pkg/asset/store"
|
||||
targetassets "github.com/openshift/installer/pkg/asset/targets"
|
||||
destroybootstrap "github.com/openshift/installer/pkg/destroy/bootstrap"
|
||||
"github.com/openshift/installer/pkg/types/baremetal"
|
||||
cov1helpers "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers"
|
||||
)
|
||||
|
||||
|
@ -105,10 +108,15 @@ var (
|
|||
logrus.Fatal("Bootstrap failed to complete: ", err)
|
||||
}
|
||||
|
||||
logrus.Info("Destroying the bootstrap resources...")
|
||||
err = destroybootstrap.Destroy(rootOpts.dir)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
if oi, ok := os.LookupEnv("OPENSHIFT_INSTALL_PRESERVE_BOOTSTRAP"); ok && oi != "" {
|
||||
logrus.Warn("OPENSHIFT_INSTALL_PRESERVE_BOOTSTRAP is set, not destroying bootstrap resources. " +
|
||||
"Warning: this should only be used for debugging purposes, and poses a risk to cluster stability.")
|
||||
} else {
|
||||
logrus.Info("Destroying the bootstrap resources...")
|
||||
err = destroybootstrap.Destroy(rootOpts.dir)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
err = waitForInstallComplete(ctx, config, rootOpts.dir)
|
||||
|
@ -324,6 +332,17 @@ func waitForBootstrapConfigMap(ctx context.Context, client *kubernetes.Clientset
|
|||
// that the cluster has been initialized.
|
||||
func waitForInitializedCluster(ctx context.Context, config *rest.Config) error {
|
||||
timeout := 30 * time.Minute
|
||||
|
||||
// Wait longer for baremetal, due to length of time it takes to boot
|
||||
if assetStore, err := assetstore.NewStore(rootOpts.dir); err == nil {
|
||||
installConfig := &installconfig.InstallConfig{}
|
||||
if err := assetStore.Fetch(installConfig); err == nil {
|
||||
if installConfig.Config.Platform.Name() == baremetal.Name {
|
||||
timeout = 60 * time.Minute
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logrus.Infof("Waiting up to %v for the cluster at %s to initialize...", timeout, config.Host)
|
||||
cc, err := configclient.NewForConfig(config)
|
||||
if err != nil {
|
||||
|
|
4
vendor/github.com/openshift/installer/cmd/openshift-install/gather.go
сгенерированный
поставляемый
4
vendor/github.com/openshift/installer/cmd/openshift-install/gather.go
сгенерированный
поставляемый
|
@ -3,8 +3,10 @@ package main
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -116,7 +118,7 @@ func runGatherBootstrapCmd(directory string) error {
|
|||
|
||||
func logGatherBootstrap(bootstrap string, port int, masters []string, directory string) error {
|
||||
logrus.Info("Pulling debug logs from the bootstrap machine")
|
||||
client, err := ssh.NewClient("core", fmt.Sprintf("%s:%d", bootstrap, port), gatherBootstrapOpts.sshKeys)
|
||||
client, err := ssh.NewClient("core", net.JoinHostPort(bootstrap, strconv.Itoa(port)), gatherBootstrapOpts.sshKeys)
|
||||
if err != nil && len(gatherBootstrapOpts.sshKeys) == 0 {
|
||||
return errors.Wrap(err, "failed to create SSH client, ensure the proper ssh key is in your keyring or specify with --key")
|
||||
} else if err != nil {
|
||||
|
|
|
@ -67,7 +67,11 @@ func setupFileHook(baseDir string) func() {
|
|||
DisableLevelTruncation: false,
|
||||
}))
|
||||
|
||||
logrus.Debugf(version.String)
|
||||
versionString, err := version.String()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
logrus.Debugf(versionString)
|
||||
if version.Commit != "" {
|
||||
logrus.Debugf("Built from commit %s", version.Commit)
|
||||
}
|
||||
|
|
7
vendor/github.com/openshift/installer/cmd/openshift-install/version.go
сгенерированный
поставляемый
7
vendor/github.com/openshift/installer/cmd/openshift-install/version.go
сгенерированный
поставляемый
|
@ -21,7 +21,12 @@ func newVersionCmd() *cobra.Command {
|
|||
}
|
||||
|
||||
func runVersionCmd(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("%s %s\n", os.Args[0], version.Raw)
|
||||
versionString, err := version.Version()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("%s %s\n", os.Args[0], versionString)
|
||||
if version.Commit != "" {
|
||||
fmt.Printf("built from commit %s\n", version.Commit)
|
||||
}
|
||||
|
|
|
@ -9,6 +9,10 @@ locals {
|
|||
|
||||
provider "aws" {
|
||||
region = var.aws_region
|
||||
|
||||
# Validation of AWS Bahrain region was added in AWS TF provider v2.22
|
||||
# so we skip when installing in me-south-1.
|
||||
skip_region_validation = var.aws_region == "me-south-1"
|
||||
}
|
||||
|
||||
module "bootstrap" {
|
||||
|
|
19
vendor/github.com/openshift/installer/data/data/aws/vpc/sg-master.tf
сгенерированный
поставляемый
19
vendor/github.com/openshift/installer/data/data/aws/vpc/sg-master.tf
сгенерированный
поставляемый
|
@ -243,6 +243,16 @@ resource "aws_security_group_rule" "master_ingress_services_tcp" {
|
|||
self = true
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "master_ingress_services_tcp_from_worker" {
|
||||
type = "ingress"
|
||||
security_group_id = aws_security_group.master.id
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
|
||||
protocol = "tcp"
|
||||
from_port = 30000
|
||||
to_port = 32767
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "master_ingress_services_udp" {
|
||||
type = "ingress"
|
||||
security_group_id = aws_security_group.master.id
|
||||
|
@ -253,3 +263,12 @@ resource "aws_security_group_rule" "master_ingress_services_udp" {
|
|||
self = true
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "master_ingress_services_udp_from_worker" {
|
||||
type = "ingress"
|
||||
security_group_id = aws_security_group.master.id
|
||||
source_security_group_id = aws_security_group.worker.id
|
||||
|
||||
protocol = "udp"
|
||||
from_port = 30000
|
||||
to_port = 32767
|
||||
}
|
||||
|
|
19
vendor/github.com/openshift/installer/data/data/aws/vpc/sg-worker.tf
сгенерированный
поставляемый
19
vendor/github.com/openshift/installer/data/data/aws/vpc/sg-worker.tf
сгенерированный
поставляемый
|
@ -153,6 +153,16 @@ resource "aws_security_group_rule" "worker_ingress_services_tcp" {
|
|||
self = true
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker_ingress_services_tcp_from_master" {
|
||||
type = "ingress"
|
||||
security_group_id = aws_security_group.worker.id
|
||||
source_security_group_id = aws_security_group.master.id
|
||||
|
||||
protocol = "tcp"
|
||||
from_port = 30000
|
||||
to_port = 32767
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker_ingress_services_udp" {
|
||||
type = "ingress"
|
||||
security_group_id = aws_security_group.worker.id
|
||||
|
@ -163,3 +173,12 @@ resource "aws_security_group_rule" "worker_ingress_services_udp" {
|
|||
self = true
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "worker_ingress_services_udp_from_master" {
|
||||
type = "ingress"
|
||||
security_group_id = aws_security_group.worker.id
|
||||
source_security_group_id = aws_security_group.master.id
|
||||
|
||||
protocol = "udp"
|
||||
from_port = 30000
|
||||
to_port = 32767
|
||||
}
|
||||
|
|
108
vendor/github.com/openshift/installer/data/data/azure/bootstrap/main.tf
сгенерированный
поставляемый
108
vendor/github.com/openshift/installer/data/data/azure/bootstrap/main.tf
сгенерированный
поставляемый
|
@ -1,5 +1,6 @@
|
|||
locals {
|
||||
bootstrap_nic_ip_configuration_name = "bootstrap-nic-ip"
|
||||
bootstrap_nic_ip_v4_configuration_name = "bootstrap-nic-ip-v4"
|
||||
bootstrap_nic_ip_v6_configuration_name = "bootstrap-nic-ip-v6"
|
||||
}
|
||||
|
||||
data "azurerm_storage_account_sas" "ignition" {
|
||||
|
@ -61,20 +62,38 @@ data "ignition_config" "redirect" {
|
|||
}
|
||||
}
|
||||
|
||||
resource "azurerm_public_ip" "bootstrap_public_ip" {
|
||||
count = var.private ? 0 : 1
|
||||
resource "azurerm_public_ip" "bootstrap_public_ip_v4" {
|
||||
count = var.private || ! var.use_ipv4 ? 0 : 1
|
||||
|
||||
sku = "Standard"
|
||||
location = var.region
|
||||
name = "${var.cluster_id}-bootstrap-pip"
|
||||
name = "${var.cluster_id}-bootstrap-pip-v4"
|
||||
resource_group_name = var.resource_group_name
|
||||
allocation_method = "Static"
|
||||
}
|
||||
|
||||
data "azurerm_public_ip" "bootstrap_public_ip" {
|
||||
data "azurerm_public_ip" "bootstrap_public_ip_v4" {
|
||||
count = var.private ? 0 : 1
|
||||
|
||||
name = azurerm_public_ip.bootstrap_public_ip[0].name
|
||||
name = azurerm_public_ip.bootstrap_public_ip_v4[0].name
|
||||
resource_group_name = var.resource_group_name
|
||||
}
|
||||
|
||||
resource "azurerm_public_ip" "bootstrap_public_ip_v6" {
|
||||
count = var.private || ! var.use_ipv6 ? 0 : 1
|
||||
|
||||
sku = "Standard"
|
||||
location = var.region
|
||||
name = "${var.cluster_id}-bootstrap-pip-v6"
|
||||
resource_group_name = var.resource_group_name
|
||||
allocation_method = "Static"
|
||||
ip_version = "IPv6"
|
||||
}
|
||||
|
||||
data "azurerm_public_ip" "bootstrap_public_ip_v6" {
|
||||
count = var.private || ! var.use_ipv6 ? 0 : 1
|
||||
|
||||
name = azurerm_public_ip.bootstrap_public_ip_v6[0].name
|
||||
resource_group_name = var.resource_group_name
|
||||
}
|
||||
|
||||
|
@ -83,24 +102,73 @@ resource "azurerm_network_interface" "bootstrap" {
|
|||
location = var.region
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
ip_configuration {
|
||||
subnet_id = var.subnet_id
|
||||
name = local.bootstrap_nic_ip_configuration_name
|
||||
private_ip_address_allocation = "Dynamic"
|
||||
public_ip_address_id = var.private ? null : azurerm_public_ip.bootstrap_public_ip[0].id
|
||||
dynamic "ip_configuration" {
|
||||
for_each = [for ip in [
|
||||
{
|
||||
// LIMITATION: azure does not allow an ipv6 address to be primary today
|
||||
primary : var.use_ipv4,
|
||||
name : local.bootstrap_nic_ip_v4_configuration_name,
|
||||
ip_address_version : "IPv4",
|
||||
public_ip_id : var.private ? null : azurerm_public_ip.bootstrap_public_ip_v4[0].id,
|
||||
include : var.use_ipv4 || var.use_ipv6,
|
||||
},
|
||||
{
|
||||
primary : ! var.use_ipv4,
|
||||
name : local.bootstrap_nic_ip_v6_configuration_name,
|
||||
ip_address_version : "IPv6",
|
||||
public_ip_id : var.private ? null : azurerm_public_ip.bootstrap_public_ip_v6[0].id,
|
||||
include : var.use_ipv6,
|
||||
},
|
||||
] : {
|
||||
primary : ip.primary
|
||||
name : ip.name
|
||||
ip_address_version : ip.ip_address_version
|
||||
public_ip_id : ip.public_ip_id
|
||||
include : ip.include
|
||||
} if ip.include
|
||||
]
|
||||
content {
|
||||
primary = ip_configuration.value.primary
|
||||
name = ip_configuration.value.name
|
||||
subnet_id = var.subnet_id
|
||||
private_ip_address_version = ip_configuration.value.ip_address_version
|
||||
private_ip_address_allocation = "Dynamic"
|
||||
public_ip_address_id = ip_configuration.value.public_ip_id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurerm_network_interface_backend_address_pool_association" "public_lb_bootstrap" {
|
||||
resource "azurerm_network_interface_backend_address_pool_association" "public_lb_bootstrap_v4" {
|
||||
// should be 'count = var.use_ipv4 && ! var.emulate_single_stack_ipv6 ? 1 : 0', but we need a V4 LB for egress for quay
|
||||
count = var.use_ipv4 ? 1 : 0
|
||||
|
||||
network_interface_id = azurerm_network_interface.bootstrap.id
|
||||
backend_address_pool_id = var.elb_backend_pool_id
|
||||
ip_configuration_name = local.bootstrap_nic_ip_configuration_name
|
||||
backend_address_pool_id = var.elb_backend_pool_v4_id
|
||||
ip_configuration_name = local.bootstrap_nic_ip_v4_configuration_name
|
||||
}
|
||||
|
||||
resource "azurerm_network_interface_backend_address_pool_association" "internal_lb_bootstrap" {
|
||||
resource "azurerm_network_interface_backend_address_pool_association" "public_lb_bootstrap_v6" {
|
||||
count = var.use_ipv6 ? 1 : 0
|
||||
|
||||
network_interface_id = azurerm_network_interface.bootstrap.id
|
||||
backend_address_pool_id = var.ilb_backend_pool_id
|
||||
ip_configuration_name = local.bootstrap_nic_ip_configuration_name
|
||||
backend_address_pool_id = var.elb_backend_pool_v6_id
|
||||
ip_configuration_name = local.bootstrap_nic_ip_v6_configuration_name
|
||||
}
|
||||
|
||||
resource "azurerm_network_interface_backend_address_pool_association" "internal_lb_bootstrap_v4" {
|
||||
count = var.use_ipv4 ? 1 : 0
|
||||
|
||||
network_interface_id = azurerm_network_interface.bootstrap.id
|
||||
backend_address_pool_id = var.ilb_backend_pool_v4_id
|
||||
ip_configuration_name = local.bootstrap_nic_ip_v4_configuration_name
|
||||
}
|
||||
|
||||
resource "azurerm_network_interface_backend_address_pool_association" "internal_lb_bootstrap_v6" {
|
||||
count = var.use_ipv6 ? 1 : 0
|
||||
|
||||
network_interface_id = azurerm_network_interface.bootstrap.id
|
||||
backend_address_pool_id = var.ilb_backend_pool_v6_id
|
||||
ip_configuration_name = local.bootstrap_nic_ip_v6_configuration_name
|
||||
}
|
||||
|
||||
resource "azurerm_virtual_machine" "bootstrap" {
|
||||
|
@ -150,8 +218,10 @@ resource "azurerm_virtual_machine" "bootstrap" {
|
|||
}
|
||||
|
||||
depends_on = [
|
||||
azurerm_network_interface_backend_address_pool_association.public_lb_bootstrap,
|
||||
azurerm_network_interface_backend_address_pool_association.internal_lb_bootstrap
|
||||
azurerm_network_interface_backend_address_pool_association.public_lb_bootstrap_v4,
|
||||
azurerm_network_interface_backend_address_pool_association.public_lb_bootstrap_v6,
|
||||
azurerm_network_interface_backend_address_pool_association.internal_lb_bootstrap_v4,
|
||||
azurerm_network_interface_backend_address_pool_association.internal_lb_bootstrap_v6
|
||||
]
|
||||
}
|
||||
|
||||
|
|
29
vendor/github.com/openshift/installer/data/data/azure/bootstrap/variables.tf
сгенерированный
поставляемый
29
vendor/github.com/openshift/installer/data/data/azure/bootstrap/variables.tf
сгенерированный
поставляемый
|
@ -38,16 +38,26 @@ variable "subnet_id" {
|
|||
description = "The subnet ID for the bootstrap node."
|
||||
}
|
||||
|
||||
variable "elb_backend_pool_id" {
|
||||
variable "elb_backend_pool_v4_id" {
|
||||
type = string
|
||||
description = "The external load balancer bakend pool id. used to attach the bootstrap NIC"
|
||||
}
|
||||
|
||||
variable "ilb_backend_pool_id" {
|
||||
variable "elb_backend_pool_v6_id" {
|
||||
type = string
|
||||
description = "The external load balancer bakend pool id for ipv6. used to attach the bootstrap NIC"
|
||||
}
|
||||
|
||||
variable "ilb_backend_pool_v4_id" {
|
||||
type = string
|
||||
description = "The internal load balancer bakend pool id. used to attach the bootstrap NIC"
|
||||
}
|
||||
|
||||
variable "ilb_backend_pool_v6_id" {
|
||||
type = string
|
||||
description = "The internal load balancer bakend pool id for ipv6. used to attach the bootstrap NIC"
|
||||
}
|
||||
|
||||
variable "storage_account" {
|
||||
type = any
|
||||
description = "the storage account for the cluster. It can be used for boot diagnostics."
|
||||
|
@ -68,3 +78,18 @@ variable "private" {
|
|||
type = bool
|
||||
description = "This value determines if this is a private cluster or not."
|
||||
}
|
||||
|
||||
variable "use_ipv4" {
|
||||
type = bool
|
||||
description = "This value determines if this is cluster should use IPv4 networking."
|
||||
}
|
||||
|
||||
variable "use_ipv6" {
|
||||
type = bool
|
||||
description = "This value determines if this is cluster should use IPv6 networking."
|
||||
}
|
||||
|
||||
variable "emulate_single_stack_ipv6" {
|
||||
type = bool
|
||||
description = "This determines whether a dual-stack cluster is configured to emulate single-stack IPv6."
|
||||
}
|
||||
|
|
|
@ -6,6 +6,8 @@ locals {
|
|||
resource "azureprivatedns_zone" "private" {
|
||||
name = var.cluster_domain
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
depends_on = [azurerm_dns_cname_record.api_external_v4, azurerm_dns_cname_record.api_external_v6]
|
||||
}
|
||||
|
||||
resource "azureprivatedns_zone_virtual_network_link" "network" {
|
||||
|
@ -16,38 +18,85 @@ resource "azureprivatedns_zone_virtual_network_link" "network" {
|
|||
}
|
||||
|
||||
resource "azureprivatedns_a_record" "apiint_internal" {
|
||||
// TODO: internal LB should block v4 for better single stack emulation (&& ! var.emulate_single_stack_ipv6)
|
||||
// but RHCoS initramfs can't do v6 and so fails to ignite. https://issues.redhat.com/browse/GRPA-1343
|
||||
count = var.use_ipv4 ? 1 : 0
|
||||
|
||||
name = "api-int"
|
||||
zone_name = azureprivatedns_zone.private.name
|
||||
resource_group_name = var.resource_group_name
|
||||
ttl = 300
|
||||
records = [var.internal_lb_ipaddress]
|
||||
records = [var.internal_lb_ipaddress_v4]
|
||||
}
|
||||
|
||||
resource "azureprivatedns_aaaa_record" "apiint_internal_v6" {
|
||||
count = var.use_ipv6 ? 1 : 0
|
||||
|
||||
name = "api-int"
|
||||
zone_name = azureprivatedns_zone.private.name
|
||||
resource_group_name = var.resource_group_name
|
||||
ttl = 300
|
||||
records = [var.internal_lb_ipaddress_v6]
|
||||
}
|
||||
|
||||
resource "azureprivatedns_a_record" "api_internal" {
|
||||
// TODO: internal LB should block v4 for better single stack emulation (&& ! var.emulate_single_stack_ipv6)
|
||||
// but RHCoS initramfs can't do v6 and so fails to ignite. https://issues.redhat.com/browse/GRPA-1343
|
||||
count = var.use_ipv4 ? 1 : 0
|
||||
|
||||
name = "api"
|
||||
zone_name = azureprivatedns_zone.private.name
|
||||
resource_group_name = var.resource_group_name
|
||||
ttl = 300
|
||||
records = [var.internal_lb_ipaddress]
|
||||
records = [var.internal_lb_ipaddress_v4]
|
||||
}
|
||||
|
||||
resource "azurerm_dns_cname_record" "api_external" {
|
||||
count = var.private ? 0 : 1
|
||||
resource "azureprivatedns_aaaa_record" "api_internal_v6" {
|
||||
count = var.use_ipv6 ? 1 : 0
|
||||
|
||||
name = "api"
|
||||
zone_name = azureprivatedns_zone.private.name
|
||||
resource_group_name = var.resource_group_name
|
||||
ttl = 300
|
||||
records = [var.internal_lb_ipaddress_v6]
|
||||
}
|
||||
|
||||
resource "azurerm_dns_cname_record" "api_external_v4" {
|
||||
count = var.private || ! var.use_ipv4 ? 0 : 1
|
||||
|
||||
name = local.api_external_name
|
||||
zone_name = var.base_domain
|
||||
resource_group_name = var.base_domain_resource_group_name
|
||||
ttl = 300
|
||||
record = var.external_lb_fqdn
|
||||
record = var.external_lb_fqdn_v4
|
||||
}
|
||||
|
||||
resource "azurerm_dns_cname_record" "api_external_v6" {
|
||||
count = var.private || ! var.use_ipv6 ? 0 : 1
|
||||
|
||||
name = "v6-${local.api_external_name}"
|
||||
zone_name = var.base_domain
|
||||
resource_group_name = var.base_domain_resource_group_name
|
||||
ttl = 300
|
||||
record = var.external_lb_fqdn_v6
|
||||
}
|
||||
|
||||
resource "azureprivatedns_a_record" "etcd_a_nodes" {
|
||||
count = var.etcd_count
|
||||
count = var.use_ipv4 && ! var.emulate_single_stack_ipv6 ? var.etcd_count : 0
|
||||
name = "etcd-${count.index}"
|
||||
zone_name = azureprivatedns_zone.private.name
|
||||
resource_group_name = var.resource_group_name
|
||||
ttl = 60
|
||||
records = [var.etcd_ip_addresses[count.index]]
|
||||
records = [var.etcd_ip_v4_addresses[count.index]]
|
||||
}
|
||||
|
||||
resource "azureprivatedns_aaaa_record" "etcd_aaaa_nodes" {
|
||||
count = var.use_ipv6 ? var.etcd_count : 0
|
||||
name = "etcd-${count.index}"
|
||||
zone_name = azureprivatedns_zone.private.name
|
||||
resource_group_name = var.resource_group_name
|
||||
ttl = 60
|
||||
records = [var.etcd_ip_v6_addresses[count.index]]
|
||||
}
|
||||
|
||||
resource "azureprivatedns_srv_record" "etcd_cluster" {
|
||||
|
@ -57,7 +106,7 @@ resource "azureprivatedns_srv_record" "etcd_cluster" {
|
|||
ttl = 60
|
||||
|
||||
dynamic "record" {
|
||||
for_each = azureprivatedns_a_record.etcd_a_nodes.*.name
|
||||
for_each = concat(azureprivatedns_a_record.etcd_a_nodes.*.name, azureprivatedns_aaaa_record.etcd_aaaa_nodes.*.name)
|
||||
iterator = name
|
||||
content {
|
||||
target = "${name.value}.${azureprivatedns_zone.private.name}"
|
||||
|
|
43
vendor/github.com/openshift/installer/data/data/azure/dns/variables.tf
сгенерированный
поставляемый
43
vendor/github.com/openshift/installer/data/data/azure/dns/variables.tf
сгенерированный
поставляемый
|
@ -24,13 +24,23 @@ variable "base_domain_resource_group_name" {
|
|||
type = string
|
||||
}
|
||||
|
||||
variable "external_lb_fqdn" {
|
||||
description = "External API's LB fqdn"
|
||||
variable "external_lb_fqdn_v4" {
|
||||
description = "External API's LB fqdn for IPv4"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "internal_lb_ipaddress" {
|
||||
description = "External API's LB Ip address"
|
||||
variable "external_lb_fqdn_v6" {
|
||||
description = "External API's LB fqdn for IPv6"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "internal_lb_ipaddress_v4" {
|
||||
description = "External API's LB IP v4 address"
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "internal_lb_ipaddress_v6" {
|
||||
description = "External API's LB IP v6 address"
|
||||
type = string
|
||||
}
|
||||
|
||||
|
@ -44,8 +54,14 @@ variable "etcd_count" {
|
|||
type = string
|
||||
}
|
||||
|
||||
variable "etcd_ip_addresses" {
|
||||
description = "List of string IPs for machines running etcd members."
|
||||
variable "etcd_ip_v4_addresses" {
|
||||
description = "List of string IPs in IPv4 for machines running etcd members."
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "etcd_ip_v6_addresses" {
|
||||
description = "List of string IPs in IPv6 for machines running etcd members."
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
@ -59,3 +75,18 @@ variable "private" {
|
|||
type = bool
|
||||
description = "This value determines if this is a private cluster or not."
|
||||
}
|
||||
|
||||
variable "use_ipv4" {
|
||||
type = bool
|
||||
description = "This value determines if this is cluster should use IPv4 networking."
|
||||
}
|
||||
|
||||
variable "use_ipv6" {
|
||||
type = bool
|
||||
description = "This value determines if this is cluster should use IPv6 networking."
|
||||
}
|
||||
|
||||
variable "emulate_single_stack_ipv6" {
|
||||
type = bool
|
||||
description = "This determines whether a dual-stack cluster is configured to emulate single-stack IPv6."
|
||||
}
|
||||
|
|
|
@ -22,27 +22,34 @@ provider "azureprivatedns" {
|
|||
}
|
||||
|
||||
module "bootstrap" {
|
||||
source = "./bootstrap"
|
||||
resource_group_name = azurerm_resource_group.main.name
|
||||
region = var.azure_region
|
||||
vm_size = var.azure_bootstrap_vm_type
|
||||
vm_image = azurerm_image.cluster.id
|
||||
identity = azurerm_user_assigned_identity.main.id
|
||||
cluster_id = var.cluster_id
|
||||
ignition = var.ignition_bootstrap
|
||||
subnet_id = module.vnet.master_subnet_id
|
||||
elb_backend_pool_id = module.vnet.public_lb_backend_pool_id
|
||||
ilb_backend_pool_id = module.vnet.internal_lb_backend_pool_id
|
||||
tags = local.tags
|
||||
storage_account = azurerm_storage_account.cluster
|
||||
nsg_name = module.vnet.master_nsg_name
|
||||
private = module.vnet.private
|
||||
source = "./bootstrap"
|
||||
resource_group_name = azurerm_resource_group.main.name
|
||||
region = var.azure_region
|
||||
vm_size = var.azure_bootstrap_vm_type
|
||||
vm_image = azurerm_image.cluster.id
|
||||
identity = azurerm_user_assigned_identity.main.id
|
||||
cluster_id = var.cluster_id
|
||||
ignition = var.ignition_bootstrap
|
||||
subnet_id = module.vnet.master_subnet_id
|
||||
elb_backend_pool_v4_id = module.vnet.public_lb_backend_pool_v4_id
|
||||
elb_backend_pool_v6_id = module.vnet.public_lb_backend_pool_v6_id
|
||||
ilb_backend_pool_v4_id = module.vnet.internal_lb_backend_pool_v4_id
|
||||
ilb_backend_pool_v6_id = module.vnet.internal_lb_backend_pool_v6_id
|
||||
tags = local.tags
|
||||
storage_account = azurerm_storage_account.cluster
|
||||
nsg_name = module.vnet.master_nsg_name
|
||||
private = module.vnet.private
|
||||
|
||||
use_ipv4 = var.use_ipv4 || var.azure_emulate_single_stack_ipv6
|
||||
use_ipv6 = var.use_ipv6
|
||||
emulate_single_stack_ipv6 = var.azure_emulate_single_stack_ipv6
|
||||
}
|
||||
|
||||
module "vnet" {
|
||||
source = "./vnet"
|
||||
resource_group_name = azurerm_resource_group.main.name
|
||||
vnet_cidr = var.machine_cidr
|
||||
vnet_v4_cidrs = var.azure_machine_v4_cidrs
|
||||
vnet_v6_cidrs = var.azure_machine_v6_cidrs
|
||||
cluster_id = var.cluster_id
|
||||
region = var.azure_region
|
||||
dns_label = var.cluster_id
|
||||
|
@ -53,27 +60,37 @@ module "vnet" {
|
|||
master_subnet = var.azure_control_plane_subnet
|
||||
worker_subnet = var.azure_compute_subnet
|
||||
private = var.azure_private
|
||||
|
||||
use_ipv4 = var.use_ipv4 || var.azure_emulate_single_stack_ipv6
|
||||
use_ipv6 = var.use_ipv6
|
||||
emulate_single_stack_ipv6 = var.azure_emulate_single_stack_ipv6
|
||||
}
|
||||
|
||||
module "master" {
|
||||
source = "./master"
|
||||
resource_group_name = azurerm_resource_group.main.name
|
||||
cluster_id = var.cluster_id
|
||||
region = var.azure_region
|
||||
availability_zones = var.azure_master_availability_zones
|
||||
vm_size = var.azure_master_vm_type
|
||||
vm_image = azurerm_image.cluster.id
|
||||
identity = azurerm_user_assigned_identity.main.id
|
||||
ignition = var.ignition_master
|
||||
external_lb_id = module.vnet.public_lb_id
|
||||
elb_backend_pool_id = module.vnet.public_lb_backend_pool_id
|
||||
ilb_backend_pool_id = module.vnet.internal_lb_backend_pool_id
|
||||
subnet_id = module.vnet.master_subnet_id
|
||||
instance_count = var.master_count
|
||||
storage_account = azurerm_storage_account.cluster
|
||||
os_volume_type = var.azure_master_root_volume_type
|
||||
os_volume_size = var.azure_master_root_volume_size
|
||||
private = module.vnet.private
|
||||
source = "./master"
|
||||
resource_group_name = azurerm_resource_group.main.name
|
||||
cluster_id = var.cluster_id
|
||||
region = var.azure_region
|
||||
availability_zones = var.azure_master_availability_zones
|
||||
vm_size = var.azure_master_vm_type
|
||||
vm_image = azurerm_image.cluster.id
|
||||
identity = azurerm_user_assigned_identity.main.id
|
||||
ignition = var.ignition_master
|
||||
external_lb_id = module.vnet.public_lb_id
|
||||
elb_backend_pool_v4_id = module.vnet.public_lb_backend_pool_v4_id
|
||||
elb_backend_pool_v6_id = module.vnet.public_lb_backend_pool_v6_id
|
||||
ilb_backend_pool_v4_id = module.vnet.internal_lb_backend_pool_v4_id
|
||||
ilb_backend_pool_v6_id = module.vnet.internal_lb_backend_pool_v6_id
|
||||
subnet_id = module.vnet.master_subnet_id
|
||||
instance_count = var.master_count
|
||||
storage_account = azurerm_storage_account.cluster
|
||||
os_volume_type = var.azure_master_root_volume_type
|
||||
os_volume_size = var.azure_master_root_volume_size
|
||||
private = module.vnet.private
|
||||
|
||||
use_ipv4 = var.use_ipv4 || var.azure_emulate_single_stack_ipv6
|
||||
use_ipv6 = var.use_ipv6
|
||||
emulate_single_stack_ipv6 = var.azure_emulate_single_stack_ipv6
|
||||
}
|
||||
|
||||
module "dns" {
|
||||
|
@ -82,13 +99,20 @@ module "dns" {
|
|||
cluster_id = var.cluster_id
|
||||
base_domain = var.base_domain
|
||||
virtual_network_id = module.vnet.virtual_network_id
|
||||
external_lb_fqdn = module.vnet.public_lb_pip_fqdn
|
||||
internal_lb_ipaddress = module.vnet.internal_lb_ip_address
|
||||
external_lb_fqdn_v4 = module.vnet.public_lb_pip_v4_fqdn
|
||||
external_lb_fqdn_v6 = module.vnet.public_lb_pip_v6_fqdn
|
||||
internal_lb_ipaddress_v4 = module.vnet.internal_lb_ip_v4_address
|
||||
internal_lb_ipaddress_v6 = module.vnet.internal_lb_ip_v6_address
|
||||
resource_group_name = azurerm_resource_group.main.name
|
||||
base_domain_resource_group_name = var.azure_base_domain_resource_group_name
|
||||
etcd_count = var.master_count
|
||||
etcd_ip_addresses = module.master.ip_addresses
|
||||
etcd_ip_v4_addresses = module.master.ip_v4_addresses
|
||||
etcd_ip_v6_addresses = module.master.ip_v6_addresses
|
||||
private = module.vnet.private
|
||||
|
||||
use_ipv4 = var.use_ipv4 || var.azure_emulate_single_stack_ipv6
|
||||
use_ipv6 = var.use_ipv6
|
||||
emulate_single_stack_ipv6 = var.azure_emulate_single_stack_ipv6
|
||||
}
|
||||
|
||||
resource "random_string" "storage_suffix" {
|
||||
|
|
70
vendor/github.com/openshift/installer/data/data/azure/master/master.tf
сгенерированный
поставляемый
70
vendor/github.com/openshift/installer/data/data/azure/master/master.tf
сгенерированный
поставляемый
|
@ -1,7 +1,9 @@
|
|||
locals {
|
||||
// The name of the masters' ipconfiguration is hardcoded to "pipconfig". It needs to match cluster-api
|
||||
// https://github.com/openshift/cluster-api-provider-azure/blob/master/pkg/cloud/azure/services/networkinterfaces/networkinterfaces.go#L131
|
||||
ip_configuration_name = "pipConfig"
|
||||
ip_v4_configuration_name = "pipConfig"
|
||||
// TODO: Azure machine provider probably needs to look for pipConfig-v6 as well (or a different name like pipConfig-secondary)
|
||||
ip_v6_configuration_name = "pipConfig-v6"
|
||||
}
|
||||
|
||||
resource "azurerm_network_interface" "master" {
|
||||
|
@ -11,27 +13,69 @@ resource "azurerm_network_interface" "master" {
|
|||
location = var.region
|
||||
resource_group_name = var.resource_group_name
|
||||
|
||||
ip_configuration {
|
||||
subnet_id = var.subnet_id
|
||||
name = local.ip_configuration_name
|
||||
private_ip_address_allocation = "Dynamic"
|
||||
dynamic "ip_configuration" {
|
||||
for_each = [for ip in [
|
||||
{
|
||||
// LIMITATION: azure does not allow an ipv6 address to be primary today
|
||||
primary : var.use_ipv4,
|
||||
name : local.ip_v4_configuration_name,
|
||||
ip_address_version : "IPv4",
|
||||
include : var.use_ipv4 || var.use_ipv6
|
||||
},
|
||||
{
|
||||
primary : ! var.use_ipv4,
|
||||
name : local.ip_v6_configuration_name,
|
||||
ip_address_version : "IPv6",
|
||||
include : var.use_ipv6
|
||||
},
|
||||
] : {
|
||||
primary : ip.primary
|
||||
name : ip.name
|
||||
ip_address_version : ip.ip_address_version
|
||||
include : ip.include
|
||||
} if ip.include
|
||||
]
|
||||
content {
|
||||
primary = ip_configuration.value.primary
|
||||
name = ip_configuration.value.name
|
||||
subnet_id = var.subnet_id
|
||||
private_ip_address_version = ip_configuration.value.ip_address_version
|
||||
private_ip_address_allocation = "Dynamic"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurerm_network_interface_backend_address_pool_association" "master" {
|
||||
count = var.instance_count
|
||||
resource "azurerm_network_interface_backend_address_pool_association" "master_v4" {
|
||||
// should be 'count = var.use_ipv4 && ! var.emulate_single_stack_ipv6 ? var.instance_count : 0', but we need a V4 LB for egress for quay
|
||||
count = var.use_ipv4 ? var.instance_count : 0
|
||||
|
||||
network_interface_id = element(azurerm_network_interface.master.*.id, count.index)
|
||||
backend_address_pool_id = var.elb_backend_pool_id
|
||||
ip_configuration_name = local.ip_configuration_name #must be the same as nic's ip configuration name.
|
||||
backend_address_pool_id = var.elb_backend_pool_v4_id
|
||||
ip_configuration_name = local.ip_v4_configuration_name
|
||||
}
|
||||
|
||||
resource "azurerm_network_interface_backend_address_pool_association" "master_internal" {
|
||||
count = var.instance_count
|
||||
resource "azurerm_network_interface_backend_address_pool_association" "master_v6" {
|
||||
count = var.use_ipv6 ? var.instance_count : 0
|
||||
|
||||
network_interface_id = element(azurerm_network_interface.master.*.id, count.index)
|
||||
backend_address_pool_id = var.ilb_backend_pool_id
|
||||
ip_configuration_name = local.ip_configuration_name #must be the same as nic's ip configuration name.
|
||||
backend_address_pool_id = var.elb_backend_pool_v6_id
|
||||
ip_configuration_name = local.ip_v6_configuration_name
|
||||
}
|
||||
|
||||
resource "azurerm_network_interface_backend_address_pool_association" "master_internal_v4" {
|
||||
count = var.use_ipv4 ? var.instance_count : 0
|
||||
|
||||
network_interface_id = element(azurerm_network_interface.master.*.id, count.index)
|
||||
backend_address_pool_id = var.ilb_backend_pool_v4_id
|
||||
ip_configuration_name = local.ip_v4_configuration_name
|
||||
}
|
||||
|
||||
resource "azurerm_network_interface_backend_address_pool_association" "master_internal_v6" {
|
||||
count = var.use_ipv6 ? var.instance_count : 0
|
||||
|
||||
network_interface_id = element(azurerm_network_interface.master.*.id, count.index)
|
||||
backend_address_pool_id = var.ilb_backend_pool_v6_id
|
||||
ip_configuration_name = local.ip_v6_configuration_name
|
||||
}
|
||||
|
||||
resource "azurerm_virtual_machine" "master" {
|
||||
|
|
8
vendor/github.com/openshift/installer/data/data/azure/master/outputs.tf
сгенерированный
поставляемый
8
vendor/github.com/openshift/installer/data/data/azure/master/outputs.tf
сгенерированный
поставляемый
|
@ -1,4 +1,8 @@
|
|||
output "ip_addresses" {
|
||||
value = azurerm_network_interface.master.*.private_ip_address
|
||||
output "ip_v4_addresses" {
|
||||
value = var.use_ipv4 ? azurerm_network_interface.master.*.private_ip_address : []
|
||||
}
|
||||
|
||||
output "ip_v6_addresses" {
|
||||
value = var.use_ipv6 ? azurerm_network_interface.master.*.private_ip_addresses.1 : []
|
||||
}
|
||||
|
||||
|
|
27
vendor/github.com/openshift/installer/data/data/azure/master/variables.tf
сгенерированный
поставляемый
27
vendor/github.com/openshift/installer/data/data/azure/master/variables.tf
сгенерированный
поставляемый
|
@ -34,11 +34,19 @@ variable "external_lb_id" {
|
|||
type = string
|
||||
}
|
||||
|
||||
variable "elb_backend_pool_id" {
|
||||
variable "elb_backend_pool_v4_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ilb_backend_pool_id" {
|
||||
variable "elb_backend_pool_v6_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ilb_backend_pool_v4_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "ilb_backend_pool_v6_id" {
|
||||
type = string
|
||||
}
|
||||
|
||||
|
@ -91,3 +99,18 @@ variable "private" {
|
|||
type = bool
|
||||
description = "This value determines if this is a private cluster or not."
|
||||
}
|
||||
|
||||
variable "use_ipv4" {
|
||||
type = bool
|
||||
description = "This value determines if this is cluster should use IPv4 networking."
|
||||
}
|
||||
|
||||
variable "use_ipv6" {
|
||||
type = bool
|
||||
description = "This value determines if this is cluster should use IPv6 networking."
|
||||
}
|
||||
|
||||
variable "emulate_single_stack_ipv6" {
|
||||
type = bool
|
||||
description = "This determines whether a dual-stack cluster is configured to emulate single-stack IPv6."
|
||||
}
|
||||
|
|
23
vendor/github.com/openshift/installer/data/data/azure/variables-azure.tf
сгенерированный
поставляемый
23
vendor/github.com/openshift/installer/data/data/azure/variables-azure.tf
сгенерированный
поставляемый
|
@ -116,3 +116,26 @@ variable "azure_private" {
|
|||
type = bool
|
||||
description = "This determines if this is a private cluster or not."
|
||||
}
|
||||
|
||||
variable "azure_machine_v4_cidrs" {
|
||||
type = list(string)
|
||||
|
||||
description = <<EOF
|
||||
The list of IPv4 address spaces from which to assign machine IPs.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "azure_machine_v6_cidrs" {
|
||||
type = list(string)
|
||||
|
||||
description = <<EOF
|
||||
The list of IPv6 address spaces from which to assign machine IPs.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "azure_emulate_single_stack_ipv6" {
|
||||
type = bool
|
||||
description = "This determines whether a dual-stack cluster is configured to emulate single-stack IPv6."
|
||||
}
|
||||
|
|
|
@ -26,8 +26,11 @@ data "azurerm_virtual_network" "preexisting_virtual_network" {
|
|||
|
||||
// Only reference data sources which are guaranteed to exist at any time (above) in this locals{} block
|
||||
locals {
|
||||
master_subnet_cidr = cidrsubnet(var.vnet_cidr, 3, 0) #master subnet is a smaller subnet within the vnet. i.e from /21 to /24
|
||||
worker_subnet_cidr = cidrsubnet(var.vnet_cidr, 3, 1) #node subnet is a smaller subnet within the vnet. i.e from /21 to /24
|
||||
master_subnet_cidr_v4 = var.use_ipv4 ? cidrsubnet(var.vnet_v4_cidrs[0], 3, 0) : null #master subnet is a smaller subnet within the vnet. i.e from /21 to /24
|
||||
master_subnet_cidr_v6 = var.use_ipv6 ? cidrsubnet(var.vnet_v6_cidrs[0], 16, 0) : null #master subnet is a smaller subnet within the vnet. i.e from /48 to /64
|
||||
|
||||
worker_subnet_cidr_v4 = var.use_ipv4 ? cidrsubnet(var.vnet_v4_cidrs[0], 3, 1) : null #node subnet is a smaller subnet within the vnet. i.e from /21 to /24
|
||||
worker_subnet_cidr_v6 = var.use_ipv6 ? cidrsubnet(var.vnet_v6_cidrs[0], 16, 1) : null #node subnet is a smaller subnet within the vnet. i.e from /48 to /64
|
||||
|
||||
master_subnet_id = var.preexisting_network ? data.azurerm_subnet.preexisting_master_subnet[0].id : azurerm_subnet.master_subnet[0].id
|
||||
worker_subnet_id = var.preexisting_network ? data.azurerm_subnet.preexisting_worker_subnet[0].id : azurerm_subnet.worker_subnet[0].id
|
||||
|
|
98
vendor/github.com/openshift/installer/data/data/azure/vnet/internal-lb.tf
сгенерированный
поставляемый
98
vendor/github.com/openshift/installer/data/data/azure/vnet/internal-lb.tf
сгенерированный
поставляемый
|
@ -1,5 +1,6 @@
|
|||
locals {
|
||||
internal_lb_frontend_ip_configuration_name = "internal-lb-ip"
|
||||
internal_lb_frontend_ip_v4_configuration_name = "internal-lb-ip-v4"
|
||||
internal_lb_frontend_ip_v6_configuration_name = "internal-lb-ip-v6"
|
||||
}
|
||||
|
||||
resource "azurerm_lb" "internal" {
|
||||
|
@ -8,43 +9,109 @@ resource "azurerm_lb" "internal" {
|
|||
resource_group_name = var.resource_group_name
|
||||
location = var.region
|
||||
|
||||
frontend_ip_configuration {
|
||||
name = local.internal_lb_frontend_ip_configuration_name
|
||||
subnet_id = local.master_subnet_id
|
||||
private_ip_address_allocation = "Dynamic"
|
||||
dynamic "frontend_ip_configuration" {
|
||||
for_each = [for ip in [
|
||||
// TODO: internal LB should block v4 for better single stack emulation (&& ! var.emulate_single_stack_ipv6)
|
||||
// but RHCoS initramfs can't do v6 and so fails to ignite. https://issues.redhat.com/browse/GRPA-1343
|
||||
{ name : local.internal_lb_frontend_ip_v4_configuration_name, ipv6 : false, include : var.use_ipv4 },
|
||||
{ name : local.internal_lb_frontend_ip_v6_configuration_name, ipv6 : true, include : var.use_ipv6 },
|
||||
] : {
|
||||
name : ip.name
|
||||
ipv6 : ip.ipv6
|
||||
include : ip.include
|
||||
} if ip.include
|
||||
]
|
||||
|
||||
content {
|
||||
name = frontend_ip_configuration.value.name
|
||||
subnet_id = local.master_subnet_id
|
||||
private_ip_address_version = frontend_ip_configuration.value.ipv6 ? "IPv6" : "IPv4"
|
||||
# WORKAROUND: Allocate a high ipv6 internal LB address to avoid the race with NIC allocation (a master and the LB
|
||||
# were being assigned the same IP dynamically). Issue is being tracked as a support ticket to Azure.
|
||||
private_ip_address_allocation = frontend_ip_configuration.value.ipv6 ? "Static" : "Dynamic"
|
||||
private_ip_address = frontend_ip_configuration.value.ipv6 ? cidrhost(local.master_subnet_cidr_v6, -2) : null
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurerm_lb_backend_address_pool" "internal_lb_controlplane_pool" {
|
||||
resource "azurerm_lb_backend_address_pool" "internal_lb_controlplane_pool_v4" {
|
||||
count = var.use_ipv4 ? 1 : 0
|
||||
|
||||
resource_group_name = var.resource_group_name
|
||||
loadbalancer_id = azurerm_lb.internal.id
|
||||
name = "${var.cluster_id}-internal-controlplane"
|
||||
name = "${var.cluster_id}-internal-controlplane-v4"
|
||||
}
|
||||
|
||||
resource "azurerm_lb_rule" "internal_lb_rule_api_internal" {
|
||||
name = "api-internal"
|
||||
resource "azurerm_lb_backend_address_pool" "internal_lb_controlplane_pool_v6" {
|
||||
count = var.use_ipv6 ? 1 : 0
|
||||
|
||||
resource_group_name = var.resource_group_name
|
||||
loadbalancer_id = azurerm_lb.internal.id
|
||||
name = "${var.cluster_id}-internal-controlplane-v6"
|
||||
}
|
||||
|
||||
resource "azurerm_lb_rule" "internal_lb_rule_api_internal_v4" {
|
||||
count = var.use_ipv4 ? 1 : 0
|
||||
|
||||
name = "api-internal-v4"
|
||||
resource_group_name = var.resource_group_name
|
||||
protocol = "Tcp"
|
||||
backend_address_pool_id = azurerm_lb_backend_address_pool.internal_lb_controlplane_pool.id
|
||||
backend_address_pool_id = azurerm_lb_backend_address_pool.internal_lb_controlplane_pool_v4[0].id
|
||||
loadbalancer_id = azurerm_lb.internal.id
|
||||
frontend_port = 6443
|
||||
backend_port = 6443
|
||||
frontend_ip_configuration_name = local.internal_lb_frontend_ip_configuration_name
|
||||
frontend_ip_configuration_name = local.internal_lb_frontend_ip_v4_configuration_name
|
||||
enable_floating_ip = false
|
||||
idle_timeout_in_minutes = 30
|
||||
load_distribution = "Default"
|
||||
probe_id = azurerm_lb_probe.internal_lb_probe_api_internal.id
|
||||
}
|
||||
|
||||
resource "azurerm_lb_rule" "internal_lb_rule_sint" {
|
||||
name = "sint"
|
||||
resource "azurerm_lb_rule" "internal_lb_rule_api_internal_v6" {
|
||||
count = var.use_ipv6 ? 1 : 0
|
||||
|
||||
name = "api-internal-v6"
|
||||
resource_group_name = var.resource_group_name
|
||||
protocol = "Tcp"
|
||||
backend_address_pool_id = azurerm_lb_backend_address_pool.internal_lb_controlplane_pool.id
|
||||
backend_address_pool_id = azurerm_lb_backend_address_pool.internal_lb_controlplane_pool_v6[0].id
|
||||
loadbalancer_id = azurerm_lb.internal.id
|
||||
frontend_port = 6443
|
||||
backend_port = 6443
|
||||
frontend_ip_configuration_name = local.internal_lb_frontend_ip_v6_configuration_name
|
||||
enable_floating_ip = false
|
||||
idle_timeout_in_minutes = 30
|
||||
load_distribution = "Default"
|
||||
probe_id = azurerm_lb_probe.internal_lb_probe_api_internal.id
|
||||
}
|
||||
|
||||
resource "azurerm_lb_rule" "internal_lb_rule_sint_v4" {
|
||||
count = var.use_ipv4 ? 1 : 0
|
||||
|
||||
name = "sint-v4"
|
||||
resource_group_name = var.resource_group_name
|
||||
protocol = "Tcp"
|
||||
backend_address_pool_id = azurerm_lb_backend_address_pool.internal_lb_controlplane_pool_v4[0].id
|
||||
loadbalancer_id = azurerm_lb.internal.id
|
||||
frontend_port = 22623
|
||||
backend_port = 22623
|
||||
frontend_ip_configuration_name = local.internal_lb_frontend_ip_configuration_name
|
||||
frontend_ip_configuration_name = local.internal_lb_frontend_ip_v4_configuration_name
|
||||
enable_floating_ip = false
|
||||
idle_timeout_in_minutes = 30
|
||||
load_distribution = "Default"
|
||||
probe_id = azurerm_lb_probe.internal_lb_probe_sint.id
|
||||
}
|
||||
|
||||
resource "azurerm_lb_rule" "internal_lb_rule_sint_v6" {
|
||||
count = var.use_ipv6 ? 1 : 0
|
||||
|
||||
name = "sint-v6"
|
||||
resource_group_name = var.resource_group_name
|
||||
protocol = "Tcp"
|
||||
backend_address_pool_id = azurerm_lb_backend_address_pool.internal_lb_controlplane_pool_v6[0].id
|
||||
loadbalancer_id = azurerm_lb.internal.id
|
||||
frontend_port = 22623
|
||||
backend_port = 22623
|
||||
frontend_ip_configuration_name = local.internal_lb_frontend_ip_v6_configuration_name
|
||||
enable_floating_ip = false
|
||||
idle_timeout_in_minutes = 30
|
||||
load_distribution = "Default"
|
||||
|
@ -70,4 +137,3 @@ resource "azurerm_lb_probe" "internal_lb_probe_api_internal" {
|
|||
port = 6443
|
||||
protocol = "TCP"
|
||||
}
|
||||
|
||||
|
|
34
vendor/github.com/openshift/installer/data/data/azure/vnet/outputs.tf
сгенерированный
поставляемый
34
vendor/github.com/openshift/installer/data/data/azure/vnet/outputs.tf
сгенерированный
поставляемый
|
@ -1,25 +1,39 @@
|
|||
output "cluster-pip" {
|
||||
value = var.private ? null : azurerm_public_ip.cluster_public_ip.ip_address
|
||||
output "public_lb_backend_pool_v4_id" {
|
||||
value = var.use_ipv4 ? azurerm_lb_backend_address_pool.master_public_lb_pool_v4[0].id : null
|
||||
}
|
||||
|
||||
output "public_lb_backend_pool_id" {
|
||||
value = azurerm_lb_backend_address_pool.master_public_lb_pool.id
|
||||
output "public_lb_backend_pool_v6_id" {
|
||||
value = var.use_ipv6 ? azurerm_lb_backend_address_pool.master_public_lb_pool_v6[0].id : null
|
||||
}
|
||||
|
||||
output "internal_lb_backend_pool_id" {
|
||||
value = azurerm_lb_backend_address_pool.internal_lb_controlplane_pool.id
|
||||
output "internal_lb_backend_pool_v4_id" {
|
||||
value = var.use_ipv4 ? azurerm_lb_backend_address_pool.internal_lb_controlplane_pool_v4[0].id : null
|
||||
}
|
||||
|
||||
output "internal_lb_backend_pool_v6_id" {
|
||||
value = var.use_ipv6 ? azurerm_lb_backend_address_pool.internal_lb_controlplane_pool_v6[0].id : null
|
||||
}
|
||||
|
||||
output "public_lb_id" {
|
||||
value = var.private ? null : azurerm_lb.public.id
|
||||
}
|
||||
|
||||
output "public_lb_pip_fqdn" {
|
||||
value = var.private ? null : data.azurerm_public_ip.cluster_public_ip.fqdn
|
||||
output "public_lb_pip_v4_fqdn" {
|
||||
value = var.private || ! var.use_ipv4 ? null : data.azurerm_public_ip.cluster_public_ip_v4[0].fqdn
|
||||
}
|
||||
|
||||
output "internal_lb_ip_address" {
|
||||
value = azurerm_lb.internal.private_ip_address
|
||||
output "public_lb_pip_v6_fqdn" {
|
||||
value = var.private || ! var.use_ipv6 ? null : data.azurerm_public_ip.cluster_public_ip_v6[0].fqdn
|
||||
}
|
||||
|
||||
output "internal_lb_ip_v4_address" {
|
||||
value = var.use_ipv4 ? azurerm_lb.internal.private_ip_addresses[0] : null
|
||||
}
|
||||
|
||||
output "internal_lb_ip_v6_address" {
|
||||
// TODO: internal LB should block v4 for better single stack emulation (&& ! var.emulate_single_stack_ipv6)
|
||||
// but RHCoS initramfs can't do v6 and so fails to ignite. https://issues.redhat.com/browse/GRPA-1343
|
||||
value = var.use_ipv6 ? azurerm_lb.internal.private_ip_addresses[1] : null
|
||||
}
|
||||
|
||||
output "master_nsg_name" {
|
||||
|
|
125
vendor/github.com/openshift/installer/data/data/azure/vnet/public-lb.tf
сгенерированный
поставляемый
125
vendor/github.com/openshift/installer/data/data/azure/vnet/public-lb.tf
сгенерированный
поставляемый
|
@ -1,18 +1,44 @@
|
|||
locals {
|
||||
public_lb_frontend_ip_configuration_name = "public-lb-ip"
|
||||
public_lb_frontend_ip_v4_configuration_name = "public-lb-ip-v4"
|
||||
public_lb_frontend_ip_v6_configuration_name = "public-lb-ip-v6"
|
||||
}
|
||||
|
||||
resource "azurerm_public_ip" "cluster_public_ip" {
|
||||
resource "azurerm_public_ip" "cluster_public_ip_v4" {
|
||||
// DEBUG: Azure apparently requires dual stack LB for v6
|
||||
count = var.use_ipv4 || true ? 1 : 0
|
||||
|
||||
sku = "Standard"
|
||||
location = var.region
|
||||
name = "${var.cluster_id}-pip"
|
||||
name = "${var.cluster_id}-pip-v4"
|
||||
resource_group_name = var.resource_group_name
|
||||
allocation_method = "Static"
|
||||
domain_name_label = var.dns_label
|
||||
}
|
||||
|
||||
data "azurerm_public_ip" "cluster_public_ip" {
|
||||
name = azurerm_public_ip.cluster_public_ip.name
|
||||
data "azurerm_public_ip" "cluster_public_ip_v4" {
|
||||
// DEBUG: Azure apparently requires dual stack LB for v6
|
||||
count = var.use_ipv4 || true ? 1 : 0
|
||||
|
||||
name = azurerm_public_ip.cluster_public_ip_v4[0].name
|
||||
resource_group_name = var.resource_group_name
|
||||
}
|
||||
|
||||
resource "azurerm_public_ip" "cluster_public_ip_v6" {
|
||||
count = var.use_ipv6 ? 1 : 0
|
||||
|
||||
ip_version = "IPv6"
|
||||
sku = "Standard"
|
||||
location = var.region
|
||||
name = "${var.cluster_id}-pip-v6"
|
||||
resource_group_name = var.resource_group_name
|
||||
allocation_method = "Static"
|
||||
domain_name_label = var.dns_label
|
||||
}
|
||||
|
||||
data "azurerm_public_ip" "cluster_public_ip_v6" {
|
||||
count = var.use_ipv6 ? 1 : 0
|
||||
|
||||
name = azurerm_public_ip.cluster_public_ip_v6[0].name
|
||||
resource_group_name = var.resource_group_name
|
||||
}
|
||||
|
||||
|
@ -22,46 +48,105 @@ resource "azurerm_lb" "public" {
|
|||
resource_group_name = var.resource_group_name
|
||||
location = var.region
|
||||
|
||||
frontend_ip_configuration {
|
||||
name = local.public_lb_frontend_ip_configuration_name
|
||||
public_ip_address_id = azurerm_public_ip.cluster_public_ip.id
|
||||
dynamic "frontend_ip_configuration" {
|
||||
for_each = [for ip in [
|
||||
// DEBUG: Azure apparently requires dual stack LB for external load balancers v6
|
||||
{ name : local.public_lb_frontend_ip_v4_configuration_name, value : azurerm_public_ip.cluster_public_ip_v4[0].id, include : true, ipv6 : false },
|
||||
{ name : local.public_lb_frontend_ip_v6_configuration_name, value : azurerm_public_ip.cluster_public_ip_v6[0].id, include : var.use_ipv6, ipv6 : true },
|
||||
] : {
|
||||
name : ip.name
|
||||
value : ip.value
|
||||
ipv6 : ip.ipv6
|
||||
include : ip.include
|
||||
} if ip.include
|
||||
]
|
||||
|
||||
content {
|
||||
name = frontend_ip_configuration.value.name
|
||||
public_ip_address_id = frontend_ip_configuration.value.value
|
||||
private_ip_address_version = frontend_ip_configuration.value.ipv6 ? "IPv6" : "IPv4"
|
||||
private_ip_address_allocation = "Dynamic"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "azurerm_lb_backend_address_pool" "master_public_lb_pool" {
|
||||
resource "azurerm_lb_backend_address_pool" "master_public_lb_pool_v4" {
|
||||
count = var.use_ipv4 ? 1 : 0
|
||||
|
||||
resource_group_name = var.resource_group_name
|
||||
loadbalancer_id = azurerm_lb.public.id
|
||||
name = "${var.cluster_id}-public-lb-control-plane"
|
||||
name = "${var.cluster_id}-public-lb-control-plane-v4"
|
||||
}
|
||||
|
||||
resource "azurerm_lb_rule" "public_lb_rule_api_internal" {
|
||||
count = var.private ? 0 : 1
|
||||
resource "azurerm_lb_backend_address_pool" "master_public_lb_pool_v6" {
|
||||
count = var.use_ipv6 ? 1 : 0
|
||||
|
||||
name = "api-internal"
|
||||
resource_group_name = var.resource_group_name
|
||||
loadbalancer_id = azurerm_lb.public.id
|
||||
name = "${var.cluster_id}-public-lb-control-plane-v6"
|
||||
}
|
||||
|
||||
resource "azurerm_lb_rule" "public_lb_rule_api_internal_v4" {
|
||||
count = var.private || ! var.use_ipv4 ? 0 : 1
|
||||
|
||||
name = "api-internal-v4"
|
||||
resource_group_name = var.resource_group_name
|
||||
protocol = "Tcp"
|
||||
backend_address_pool_id = azurerm_lb_backend_address_pool.master_public_lb_pool.id
|
||||
backend_address_pool_id = azurerm_lb_backend_address_pool.master_public_lb_pool_v4[0].id
|
||||
loadbalancer_id = azurerm_lb.public.id
|
||||
frontend_port = 6443
|
||||
backend_port = 6443
|
||||
frontend_ip_configuration_name = local.public_lb_frontend_ip_configuration_name
|
||||
frontend_ip_configuration_name = local.public_lb_frontend_ip_v4_configuration_name
|
||||
enable_floating_ip = false
|
||||
idle_timeout_in_minutes = 30
|
||||
load_distribution = "Default"
|
||||
probe_id = azurerm_lb_probe.public_lb_probe_api_internal[0].id
|
||||
}
|
||||
|
||||
resource "azurerm_lb_rule" "internal_outbound_rule" {
|
||||
count = var.private ? 1 : 0
|
||||
resource "azurerm_lb_rule" "public_lb_rule_api_internal_v6" {
|
||||
count = var.private || ! var.use_ipv6 ? 0 : 1
|
||||
|
||||
name = "internal_outbound_rule"
|
||||
name = "api-internal-v6"
|
||||
resource_group_name = var.resource_group_name
|
||||
protocol = "Tcp"
|
||||
backend_address_pool_id = azurerm_lb_backend_address_pool.master_public_lb_pool.id
|
||||
backend_address_pool_id = azurerm_lb_backend_address_pool.master_public_lb_pool_v6[0].id
|
||||
loadbalancer_id = azurerm_lb.public.id
|
||||
frontend_port = 6443
|
||||
backend_port = 6443
|
||||
frontend_ip_configuration_name = local.public_lb_frontend_ip_v6_configuration_name
|
||||
enable_floating_ip = false
|
||||
idle_timeout_in_minutes = 30
|
||||
load_distribution = "Default"
|
||||
probe_id = azurerm_lb_probe.public_lb_probe_api_internal[0].id
|
||||
}
|
||||
|
||||
resource "azurerm_lb_rule" "internal_outbound_rule_v4" {
|
||||
count = var.private && var.use_ipv4 ? 1 : 0
|
||||
|
||||
name = "internal_outbound_rule_v4"
|
||||
resource_group_name = var.resource_group_name
|
||||
protocol = "Tcp"
|
||||
backend_address_pool_id = azurerm_lb_backend_address_pool.master_public_lb_pool_v4[0].id
|
||||
loadbalancer_id = azurerm_lb.public.id
|
||||
frontend_port = 27627
|
||||
backend_port = 27627
|
||||
frontend_ip_configuration_name = local.public_lb_frontend_ip_configuration_name
|
||||
frontend_ip_configuration_name = local.public_lb_frontend_ip_v4_configuration_name
|
||||
enable_floating_ip = false
|
||||
idle_timeout_in_minutes = 30
|
||||
load_distribution = "Default"
|
||||
}
|
||||
|
||||
resource "azurerm_lb_rule" "internal_outbound_rule_v6" {
|
||||
count = var.private && var.use_ipv6 ? 1 : 0
|
||||
|
||||
name = "internal_outbound_rule_v6"
|
||||
resource_group_name = var.resource_group_name
|
||||
protocol = "Tcp"
|
||||
backend_address_pool_id = azurerm_lb_backend_address_pool.master_public_lb_pool_v6[0].id
|
||||
loadbalancer_id = azurerm_lb.public.id
|
||||
frontend_port = 27627
|
||||
backend_port = 27627
|
||||
frontend_ip_configuration_name = local.public_lb_frontend_ip_v6_configuration_name
|
||||
enable_floating_ip = false
|
||||
idle_timeout_in_minutes = 30
|
||||
load_distribution = "Default"
|
||||
|
|
23
vendor/github.com/openshift/installer/data/data/azure/vnet/variables.tf
сгенерированный
поставляемый
23
vendor/github.com/openshift/installer/data/data/azure/vnet/variables.tf
сгенерированный
поставляемый
|
@ -1,5 +1,9 @@
|
|||
variable "vnet_cidr" {
|
||||
type = string
|
||||
variable "vnet_v4_cidrs" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "vnet_v6_cidrs" {
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "resource_group_name" {
|
||||
|
@ -57,3 +61,18 @@ variable "private" {
|
|||
type = bool
|
||||
description = "The determines if this is a private/internal cluster or not."
|
||||
}
|
||||
|
||||
variable "use_ipv4" {
|
||||
type = bool
|
||||
description = "This value determines if this is cluster should use IPv4 networking."
|
||||
}
|
||||
|
||||
variable "use_ipv6" {
|
||||
type = bool
|
||||
description = "This value determines if this is cluster should use IPv6 networking."
|
||||
}
|
||||
|
||||
variable "emulate_single_stack_ipv6" {
|
||||
type = bool
|
||||
description = "This determines whether a dual-stack cluster is configured to emulate single-stack IPv6."
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ resource "azurerm_virtual_network" "cluster_vnet" {
|
|||
name = var.virtual_network_name
|
||||
resource_group_name = var.resource_group_name
|
||||
location = var.region
|
||||
address_space = [var.vnet_cidr]
|
||||
address_space = concat(var.vnet_v4_cidrs, var.vnet_v6_cidrs)
|
||||
}
|
||||
|
||||
resource "azurerm_route_table" "route_table" {
|
||||
|
@ -16,8 +16,11 @@ resource "azurerm_route_table" "route_table" {
|
|||
resource "azurerm_subnet" "master_subnet" {
|
||||
count = var.preexisting_network ? 0 : 1
|
||||
|
||||
resource_group_name = var.resource_group_name
|
||||
address_prefix = local.master_subnet_cidr
|
||||
resource_group_name = var.resource_group_name
|
||||
address_prefixes = [for cidr in [
|
||||
{ value : local.master_subnet_cidr_v4, include : var.use_ipv4 },
|
||||
{ value : local.master_subnet_cidr_v6, include : var.use_ipv6 }
|
||||
] : cidr.value if cidr.include]
|
||||
virtual_network_name = local.virtual_network
|
||||
name = var.master_subnet
|
||||
}
|
||||
|
@ -25,8 +28,11 @@ resource "azurerm_subnet" "master_subnet" {
|
|||
resource "azurerm_subnet" "worker_subnet" {
|
||||
count = var.preexisting_network ? 0 : 1
|
||||
|
||||
resource_group_name = var.resource_group_name
|
||||
address_prefix = local.worker_subnet_cidr
|
||||
resource_group_name = var.resource_group_name
|
||||
address_prefixes = [for cidr in [
|
||||
{ value : local.worker_subnet_cidr_v4, include : var.use_ipv4 },
|
||||
{ value : local.worker_subnet_cidr_v6, include : var.use_ipv6 }
|
||||
] : cidr.value if cidr.include]
|
||||
virtual_network_name = local.virtual_network
|
||||
name = var.worker_subnet
|
||||
}
|
||||
|
|
20
vendor/github.com/openshift/installer/data/data/bootstrap/baremetal/files/etc/NetworkManager/dispatcher.d/30-local-dns-prepender
сгенерированный
поставляемый
Executable file
20
vendor/github.com/openshift/installer/data/data/bootstrap/baremetal/files/etc/NetworkManager/dispatcher.d/30-local-dns-prepender
сгенерированный
поставляемый
Executable file
|
@ -0,0 +1,20 @@
|
|||
#!/bin/bash
|
||||
IFACE=$1
|
||||
STATUS=$2
|
||||
case "$STATUS" in
|
||||
up)
|
||||
logger -s "NM local-dns-prepender triggered by ${1} ${2}."
|
||||
DNS_IP="127.0.0.1"
|
||||
set +e
|
||||
logger -s "NM local-dns-prepender: Checking if local DNS IP is the first entry in resolv.conf"
|
||||
if grep nameserver /etc/resolv.conf | head -n 1 | grep -q "$DNS_IP" ; then
|
||||
logger -s "NM local-dns-prepender: local DNS IP already is the first entry in resolv.conf"
|
||||
exit 0
|
||||
else
|
||||
logger -s "NM local-dns-prepender: Looking for '# Generated by NetworkManager' in /etc/resolv.conf to place 'nameserver $DNS_IP'"
|
||||
sed -i "/^# Generated by.*$/a nameserver $DNS_IP" /etc/resolv.conf
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
7
vendor/github.com/openshift/installer/data/data/bootstrap/baremetal/files/etc/dhcp/dhclient.conf
сгенерированный
поставляемый
7
vendor/github.com/openshift/installer/data/data/bootstrap/baremetal/files/etc/dhcp/dhclient.conf
сгенерированный
поставляемый
|
@ -1,7 +0,0 @@
|
|||
# Specifies that the bootstrap node should use its own local DNS server for
|
||||
# name resolution.
|
||||
#
|
||||
# For more information, see installer/data/data/bootstrap/baremetal/README.md
|
||||
#
|
||||
|
||||
prepend domain-name-servers 127.0.0.1;
|
8
vendor/github.com/openshift/installer/data/data/bootstrap/files/usr/local/bin/bootkube.sh.template
сгенерированный
поставляемый
8
vendor/github.com/openshift/installer/data/data/bootstrap/files/usr/local/bin/bootkube.sh.template
сгенерированный
поставляемый
|
@ -181,6 +181,11 @@ then
|
|||
|
||||
rm --recursive --force mco-bootstrap
|
||||
|
||||
ADDITIONAL_FLAGS=""
|
||||
if [ -f "/opt/openshift/tls/cloud-ca-cert.pem" ]; then
|
||||
ADDITIONAL_FLAGS="--cloud-provider-ca-file=/assets/tls/cloud-ca-cert.pem"
|
||||
fi
|
||||
|
||||
bootkube_podman_run \
|
||||
--user 0 \
|
||||
--volume "$PWD:/assets:z" \
|
||||
|
@ -203,7 +208,8 @@ then
|
|||
--mdns-publisher-image="${MDNS_PUBLISHER_IMAGE}" \
|
||||
--haproxy-image="${HAPROXY_IMAGE}" \
|
||||
--baremetal-runtimecfg-image="${BAREMETAL_RUNTIMECFG_IMAGE}" \
|
||||
--cloud-config-file=/assets/manifests/cloud-provider-config.yaml
|
||||
--cloud-config-file=/assets/manifests/cloud-provider-config.yaml \
|
||||
${ADDITIONAL_FLAGS}
|
||||
|
||||
# Bootstrap MachineConfigController uses /etc/mcc/bootstrap/manifests/ dir to
|
||||
# 1. read the controller config rendered by MachineConfigOperator
|
||||
|
|
|
@ -83,3 +83,20 @@ EOF
|
|||
|
||||
}
|
||||
|
||||
variable "use_ipv4" {
|
||||
type = bool
|
||||
|
||||
description = <<EOF
|
||||
Should the cluster be created with ipv4 networking.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
||||
variable "use_ipv6" {
|
||||
type = bool
|
||||
|
||||
description = <<EOF
|
||||
Should the cluster be created with ipv6 networking.
|
||||
EOF
|
||||
|
||||
}
|
||||
|
|
|
@ -8,6 +8,8 @@ resource "google_dns_managed_zone" "int" {
|
|||
network_url = var.network
|
||||
}
|
||||
}
|
||||
|
||||
depends_on = [google_dns_record_set.api_external]
|
||||
}
|
||||
|
||||
resource "google_dns_record_set" "api_external" {
|
||||
|
|
52
vendor/github.com/openshift/installer/data/data/gcp/network/firewall.tf
сгенерированный
поставляемый
52
vendor/github.com/openshift/installer/data/data/gcp/network/firewall.tf
сгенерированный
поставляемый
|
@ -119,6 +119,18 @@ resource "google_compute_firewall" "internal_cluster" {
|
|||
ports = ["10250"]
|
||||
}
|
||||
|
||||
# services tcp
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["30000-32767"]
|
||||
}
|
||||
|
||||
# services udp
|
||||
allow {
|
||||
protocol = "udp"
|
||||
ports = ["30000-32767"]
|
||||
}
|
||||
|
||||
source_tags = [
|
||||
"${var.cluster_id}-master",
|
||||
"${var.cluster_id}-worker"
|
||||
|
@ -128,43 +140,3 @@ resource "google_compute_firewall" "internal_cluster" {
|
|||
"${var.cluster_id}-worker"
|
||||
]
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "internal_services_master" {
|
||||
name = "${var.cluster_id}-internal-services-master"
|
||||
network = local.cluster_network
|
||||
|
||||
# services tcp
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["30000-32767"]
|
||||
}
|
||||
|
||||
# services udp
|
||||
allow {
|
||||
protocol = "udp"
|
||||
ports = ["30000-32767"]
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_id}-master"]
|
||||
target_tags = ["${var.cluster_id}-master"]
|
||||
}
|
||||
|
||||
resource "google_compute_firewall" "internal_services_worker" {
|
||||
name = "${var.cluster_id}-internal-services-worker"
|
||||
network = local.cluster_network
|
||||
|
||||
# services tcp
|
||||
allow {
|
||||
protocol = "tcp"
|
||||
ports = ["30000-32767"]
|
||||
}
|
||||
|
||||
# services udp
|
||||
allow {
|
||||
protocol = "udp"
|
||||
ports = ["30000-32767"]
|
||||
}
|
||||
|
||||
source_tags = ["${var.cluster_id}-worker"]
|
||||
target_tags = ["${var.cluster_id}-worker"]
|
||||
}
|
||||
|
|
16
vendor/github.com/openshift/installer/data/data/openstack/bootstrap/main.tf
сгенерированный
поставляемый
16
vendor/github.com/openshift/installer/data/data/openstack/bootstrap/main.tf
сгенерированный
поставляемый
|
@ -24,13 +24,6 @@ resource "openstack_networking_port_v2" "bootstrap_port" {
|
|||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "bootstrap_fip" {
|
||||
description = "${var.cluster_id}-bootstrap-fip"
|
||||
pool = var.external_network
|
||||
port_id = openstack_networking_port_v2.bootstrap_port.id
|
||||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
}
|
||||
|
||||
data "openstack_compute_flavor_v2" "bootstrap_flavor" {
|
||||
name = var.flavor_name
|
||||
}
|
||||
|
@ -52,3 +45,12 @@ resource "openstack_compute_instance_v2" "bootstrap" {
|
|||
openshiftClusterID = var.cluster_id
|
||||
}
|
||||
}
|
||||
|
||||
resource "openstack_networking_floatingip_v2" "bootstrap_fip" {
|
||||
description = "${var.cluster_id}-bootstrap-fip"
|
||||
pool = var.external_network
|
||||
port_id = openstack_networking_port_v2.bootstrap_port.id
|
||||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
|
||||
depends_on = ["openstack_compute_instance_v2.bootstrap"]
|
||||
}
|
||||
|
|
1
vendor/github.com/openshift/installer/data/data/openstack/bootstrap/variables.tf
сгенерированный
поставляемый
1
vendor/github.com/openshift/installer/data/data/openstack/bootstrap/variables.tf
сгенерированный
поставляемый
|
@ -62,4 +62,3 @@ variable "nodes_subnet_id" {
|
|||
variable "cluster_domain" {
|
||||
type = string
|
||||
}
|
||||
|
||||
|
|
82
vendor/github.com/openshift/installer/data/data/openstack/topology/sg-master.tf
сгенерированный
поставляемый
82
vendor/github.com/openshift/installer/data/data/openstack/topology/sg-master.tf
сгенерированный
поставляемый
|
@ -3,6 +3,16 @@ resource "openstack_networking_secgroup_v2" "master" {
|
|||
tags = ["openshiftClusterID=${var.cluster_id}"]
|
||||
}
|
||||
|
||||
// We can't create all security group rules at once because it may lead to
|
||||
// conflicts in Neutron. Therefore we have to create rules sequentially by
|
||||
// setting explicit dependencies between them.
|
||||
// For more information: https://github.com/hashicorp/terraform/issues/7519
|
||||
|
||||
// FIXME(mfedosin): ideally we need to resolve this in the OpenStack Terraform
|
||||
// provider.
|
||||
// Remove the dependencies when https://github.com/terraform-providers/terraform-provider-openstack/issues/952
|
||||
// is fixed.
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_mcs" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
|
@ -24,6 +34,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_icmp" {
|
|||
# FIXME(mandre) AWS only allows ICMP from cidr_block
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_mcs"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_ssh" {
|
||||
|
@ -35,6 +47,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_ssh" {
|
|||
# FIXME(mandre) AWS only allows SSH from cidr_block
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_icmp"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_dns_tcp" {
|
||||
|
@ -45,6 +59,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_dns_tcp" {
|
|||
port_range_max = 53
|
||||
remote_ip_prefix = var.cidr_block
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_ssh"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_dns_udp" {
|
||||
|
@ -55,6 +71,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_dns_udp" {
|
|||
port_range_max = 53
|
||||
remote_ip_prefix = var.cidr_block
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_dns_tcp"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_mdns_udp" {
|
||||
|
@ -65,6 +83,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_mdns_udp" {
|
|||
port_range_max = 5353
|
||||
remote_ip_prefix = var.cidr_block
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_dns_udp"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_https" {
|
||||
|
@ -76,6 +96,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_https" {
|
|||
# FIXME(mandre) AWS only allows API port from cidr_block
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_mdns_udp"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_vxlan" {
|
||||
|
@ -86,6 +108,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_vxlan" {
|
|||
port_range_max = 4789
|
||||
remote_group_id = openstack_networking_secgroup_v2.master.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_https"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_vxlan_from_worker" {
|
||||
|
@ -96,6 +120,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_vxlan_from_work
|
|||
port_range_max = 4789
|
||||
remote_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_vxlan"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_geneve" {
|
||||
|
@ -106,6 +132,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_geneve" {
|
|||
port_range_max = 6081
|
||||
remote_group_id = openstack_networking_secgroup_v2.master.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_vxlan_from_worker"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_geneve_from_worker" {
|
||||
|
@ -116,6 +144,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_geneve_from_wor
|
|||
port_range_max = 6081
|
||||
remote_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_geneve"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_ovndb" {
|
||||
|
@ -126,6 +156,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_ovndb" {
|
|||
port_range_max = 6642
|
||||
remote_group_id = openstack_networking_secgroup_v2.master.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_geneve_from_worker"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_ovndb_from_worker" {
|
||||
|
@ -136,6 +168,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_ovndb_from_work
|
|||
port_range_max = 6642
|
||||
remote_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_ovndb"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_internal" {
|
||||
|
@ -146,6 +180,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_internal" {
|
|||
port_range_max = 9999
|
||||
remote_group_id = openstack_networking_secgroup_v2.master.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_ovndb_from_worker"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_internal_from_worker" {
|
||||
|
@ -156,6 +192,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_internal_from_w
|
|||
port_range_max = 9999
|
||||
remote_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_internal"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_internal_udp" {
|
||||
|
@ -166,6 +204,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_internal_udp" {
|
|||
port_range_max = 9999
|
||||
remote_group_id = openstack_networking_secgroup_v2.master.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_internal_from_worker"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_internal_from_worker_udp" {
|
||||
|
@ -176,6 +216,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_internal_from_w
|
|||
port_range_max = 9999
|
||||
remote_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_internal_udp"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_kube_scheduler" {
|
||||
|
@ -186,6 +228,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_kube_scheduler"
|
|||
port_range_max = 10259
|
||||
remote_group_id = openstack_networking_secgroup_v2.master.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_internal_from_worker_udp"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_kube_scheduler_from_worker" {
|
||||
|
@ -196,6 +240,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_kube_scheduler_
|
|||
port_range_max = 10259
|
||||
remote_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_kube_scheduler"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_kube_controller_manager" {
|
||||
|
@ -206,6 +252,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_kube_controller
|
|||
port_range_max = 10257
|
||||
remote_group_id = openstack_networking_secgroup_v2.master.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_kube_scheduler_from_worker"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_kube_controller_manager_from_worker" {
|
||||
|
@ -216,6 +264,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_kube_controller
|
|||
port_range_max = 10257
|
||||
remote_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_kube_controller_manager"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_kubelet_secure" {
|
||||
|
@ -226,6 +276,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_kubelet_secure"
|
|||
port_range_max = 10250
|
||||
remote_group_id = openstack_networking_secgroup_v2.master.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_kube_controller_manager_from_worker"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_kubelet_secure_from_worker" {
|
||||
|
@ -236,6 +288,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_kubelet_secure_
|
|||
port_range_max = 10250
|
||||
remote_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_kubelet_secure"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_etcd" {
|
||||
|
@ -246,6 +300,8 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_etcd" {
|
|||
port_range_max = 2380
|
||||
remote_group_id = openstack_networking_secgroup_v2.master.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_kubelet_secure_from_worker"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_services_tcp" {
|
||||
|
@ -256,6 +312,18 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_services_tcp" {
|
|||
port_range_max = 32767
|
||||
remote_group_id = openstack_networking_secgroup_v2.master.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_etcd"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_services_tcp_from_worker" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 30000
|
||||
port_range_max = 32767
|
||||
remote_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_services_udp" {
|
||||
|
@ -266,6 +334,18 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_services_udp" {
|
|||
port_range_max = 32767
|
||||
remote_group_id = openstack_networking_secgroup_v2.master.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_services_tcp"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_services_udp_from_worker" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "udp"
|
||||
port_range_min = 30000
|
||||
port_range_max = 32767
|
||||
remote_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "master_ingress_vrrp" {
|
||||
|
@ -274,5 +354,7 @@ resource "openstack_networking_secgroup_rule_v2" "master_ingress_vrrp" {
|
|||
protocol = "vrrp"
|
||||
remote_ip_prefix = var.cidr_block
|
||||
security_group_id = openstack_networking_secgroup_v2.master.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_services_udp"]
|
||||
}
|
||||
|
||||
|
|
68
vendor/github.com/openshift/installer/data/data/openstack/topology/sg-worker.tf
сгенерированный
поставляемый
68
vendor/github.com/openshift/installer/data/data/openstack/topology/sg-worker.tf
сгенерированный
поставляемый
|
@ -5,6 +5,16 @@ resource "openstack_networking_secgroup_v2" "worker" {
|
|||
|
||||
# TODO(mandre) Explicitely enable egress
|
||||
|
||||
// We can't create all security group rules at once because it may lead to
|
||||
// conflicts in Neutron. Therefore we have to create rules sequentially by
|
||||
// setting explicit dependencies between them.
|
||||
// For more information: https://github.com/hashicorp/terraform/issues/7519
|
||||
|
||||
// FIXME(mfedosin): ideally we need to resolve this in the OpenStack Terraform
|
||||
// provider.
|
||||
// Remove the dependencies when https://github.com/terraform-providers/terraform-provider-openstack/issues/952
|
||||
// is fixed.
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_icmp" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
|
@ -14,6 +24,8 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_icmp" {
|
|||
# FIXME(mandre) AWS only allows ICMP from cidr_block
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.master_ingress_vrrp"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_ssh" {
|
||||
|
@ -25,6 +37,8 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_ssh" {
|
|||
# FIXME(mandre) AWS only allows SSH from cidr_block
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.worker_ingress_icmp"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_mdns_udp" {
|
||||
|
@ -35,6 +49,8 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_mdns_udp" {
|
|||
port_range_max = 5353
|
||||
remote_ip_prefix = var.cidr_block
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.worker_ingress_ssh"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_http" {
|
||||
|
@ -45,6 +61,8 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_http" {
|
|||
port_range_max = 80
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.worker_ingress_mdns_udp"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_https" {
|
||||
|
@ -55,6 +73,8 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_https" {
|
|||
port_range_max = 443
|
||||
remote_ip_prefix = "0.0.0.0/0"
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.worker_ingress_http"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_router" {
|
||||
|
@ -65,6 +85,8 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_router" {
|
|||
port_range_max = 1936
|
||||
remote_ip_prefix = var.cidr_block
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.worker_ingress_https"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_vxlan" {
|
||||
|
@ -75,6 +97,8 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_vxlan" {
|
|||
port_range_max = 4789
|
||||
remote_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.worker_ingress_router"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_vxlan_from_master" {
|
||||
|
@ -85,6 +109,8 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_vxlan_from_mast
|
|||
port_range_max = 4789
|
||||
remote_group_id = openstack_networking_secgroup_v2.master.id
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.worker_ingress_vxlan"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_geneve" {
|
||||
|
@ -95,6 +121,8 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_geneve" {
|
|||
port_range_max = 6081
|
||||
remote_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.worker_ingress_vxlan_from_master"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_geneve_from_master" {
|
||||
|
@ -105,6 +133,8 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_geneve_from_mas
|
|||
port_range_max = 6081
|
||||
remote_group_id = openstack_networking_secgroup_v2.master.id
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.worker_ingress_geneve"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_internal" {
|
||||
|
@ -115,6 +145,8 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_internal" {
|
|||
port_range_max = 9999
|
||||
remote_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.worker_ingress_geneve_from_master"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_internal_from_master" {
|
||||
|
@ -125,6 +157,8 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_internal_from_m
|
|||
port_range_max = 9999
|
||||
remote_group_id = openstack_networking_secgroup_v2.master.id
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.worker_ingress_internal"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_internal_udp" {
|
||||
|
@ -135,6 +169,8 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_internal_udp" {
|
|||
port_range_max = 9999
|
||||
remote_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.worker_ingress_internal_from_master"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_internal_from_master_udp" {
|
||||
|
@ -145,6 +181,8 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_internal_from_m
|
|||
port_range_max = 9999
|
||||
remote_group_id = openstack_networking_secgroup_v2.master.id
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.worker_ingress_internal_udp"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_kubelet_insecure" {
|
||||
|
@ -155,6 +193,8 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_kubelet_insecur
|
|||
port_range_max = 10250
|
||||
remote_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.worker_ingress_internal_from_master_udp"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_kubelet_insecure_from_master" {
|
||||
|
@ -165,6 +205,8 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_kubelet_insecur
|
|||
port_range_max = 10250
|
||||
remote_group_id = openstack_networking_secgroup_v2.master.id
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.worker_ingress_kubelet_insecure"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_services_tcp" {
|
||||
|
@ -175,6 +217,18 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_services_tcp" {
|
|||
port_range_max = 32767
|
||||
remote_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.worker_ingress_kubelet_insecure_from_master"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_services_tcp_from_master" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "tcp"
|
||||
port_range_min = 30000
|
||||
port_range_max = 32767
|
||||
remote_group_id = openstack_networking_secgroup_v2.master.id
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_services_udp" {
|
||||
|
@ -185,6 +239,18 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_services_udp" {
|
|||
port_range_max = 32767
|
||||
remote_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.worker_ingress_services_tcp"]
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_services_udp_from_master" {
|
||||
direction = "ingress"
|
||||
ethertype = "IPv4"
|
||||
protocol = "udp"
|
||||
port_range_min = 30000
|
||||
port_range_max = 32767
|
||||
remote_group_id = openstack_networking_secgroup_v2.master.id
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
}
|
||||
|
||||
resource "openstack_networking_secgroup_rule_v2" "worker_ingress_vrrp" {
|
||||
|
@ -193,4 +259,6 @@ resource "openstack_networking_secgroup_rule_v2" "worker_ingress_vrrp" {
|
|||
protocol = "vrrp"
|
||||
remote_ip_prefix = var.cidr_block
|
||||
security_group_id = openstack_networking_secgroup_v2.worker.id
|
||||
|
||||
depends_on = ["openstack_networking_secgroup_rule_v2.worker_ingress_services_udp"]
|
||||
}
|
||||
|
|
135
vendor/github.com/openshift/installer/data/data/rhcos-amd64.json
сгенерированный
поставляемый
Normal file
135
vendor/github.com/openshift/installer/data/data/rhcos-amd64.json
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,135 @@
|
|||
{
|
||||
"amis": {
|
||||
"ap-northeast-1": {
|
||||
"hvm": "ami-0ade724aa9d3514b2"
|
||||
},
|
||||
"ap-northeast-2": {
|
||||
"hvm": "ami-0465f2a5450aa0257"
|
||||
},
|
||||
"ap-south-1": {
|
||||
"hvm": "ami-05a3e4b22ecffdf62"
|
||||
},
|
||||
"ap-southeast-1": {
|
||||
"hvm": "ami-00df6135b05c0a02a"
|
||||
},
|
||||
"ap-southeast-2": {
|
||||
"hvm": "ami-075295f492dbaa347"
|
||||
},
|
||||
"ca-central-1": {
|
||||
"hvm": "ami-0a27aa00147a3a2d9"
|
||||
},
|
||||
"eu-central-1": {
|
||||
"hvm": "ami-0e8ca170012209d72"
|
||||
},
|
||||
"eu-north-1": {
|
||||
"hvm": "ami-0c736720637f6b42d"
|
||||
},
|
||||
"eu-west-1": {
|
||||
"hvm": "ami-0770d1d7e95da7ba3"
|
||||
},
|
||||
"eu-west-2": {
|
||||
"hvm": "ami-08499730d4db69065"
|
||||
},
|
||||
"eu-west-3": {
|
||||
"hvm": "ami-0658bcfda04098635"
|
||||
},
|
||||
"me-south-1": {
|
||||
"hvm": "ami-0ea763ffb3e1c62cf"
|
||||
},
|
||||
"sa-east-1": {
|
||||
"hvm": "ami-0298057a4a12e874a"
|
||||
},
|
||||
"us-east-1": {
|
||||
"hvm": "ami-0523c75e911667e58"
|
||||
},
|
||||
"us-east-2": {
|
||||
"hvm": "ami-0d8f77b753c0d96dd"
|
||||
},
|
||||
"us-west-1": {
|
||||
"hvm": "ami-0782247660ad3a3bb"
|
||||
},
|
||||
"us-west-2": {
|
||||
"hvm": "ami-0f0fac946d1d31e97"
|
||||
}
|
||||
},
|
||||
"azure": {
|
||||
"image": "rhcos-43.81.202003111353.0-azure.x86_64.vhd",
|
||||
"url": "https://rhcos.blob.core.windows.net/imagebucket/rhcos-43.81.202003111353.0-azure.x86_64.vhd"
|
||||
},
|
||||
"baseURI": "https://releases-art-rhcos.svc.ci.openshift.org/art/storage/releases/rhcos-4.3/43.81.202003111353.0/x86_64/",
|
||||
"buildid": "43.81.202003111353.0",
|
||||
"gcp": {
|
||||
"image": "rhcos-43-81-202003111353-0",
|
||||
"url": "https://storage.googleapis.com/rhcos/rhcos/43.81.202003111353.0.tar.gz"
|
||||
},
|
||||
"images": {
|
||||
"aws": {
|
||||
"path": "rhcos-43.81.202003111353.0-aws.x86_64.vmdk.gz",
|
||||
"sha256": "e4cbc50409d93fb88d711a89e62c56639579abf804bf2d25b210f43929939000",
|
||||
"size": 814861898,
|
||||
"uncompressed-sha256": "2383f9687db4b2f40bf70f2a9750f651c135d09973f7e7f7ed02ac05179e0ea2",
|
||||
"uncompressed-size": 831565312
|
||||
},
|
||||
"azure": {
|
||||
"path": "rhcos-43.81.202003111353.0-azure.x86_64.vhd.gz",
|
||||
"sha256": "a2c75bfb3f1c75bd21bba1d669631b05692c1a91a88802bbcd7a3218e1834ff6",
|
||||
"size": 802153013,
|
||||
"uncompressed-sha256": "bd427aaa3fab89261ac565a89b0b6d066e3a559e2d62d1f6cb749294963162df",
|
||||
"uncompressed-size": 2189996544
|
||||
},
|
||||
"gcp": {
|
||||
"path": "rhcos-43.81.202003111353.0-gcp.x86_64.tar.gz",
|
||||
"sha256": "8baade8d055181d538f75e00367a860c9197c684924062919eb982f5101d27d1",
|
||||
"size": 801779472
|
||||
},
|
||||
"initramfs": {
|
||||
"path": "rhcos-43.81.202003111353.0-installer-initramfs.x86_64.img",
|
||||
"sha256": "fa01f1eeeaf6924d8d20bf5834d3853985167b670a0de30a32bc80d3f8c700d4"
|
||||
},
|
||||
"iso": {
|
||||
"path": "rhcos-43.81.202003111353.0-installer.x86_64.iso",
|
||||
"sha256": "b10975f240769e6f606981be4fe4740536522f7afefe30c95d93f059db48c756"
|
||||
},
|
||||
"kernel": {
|
||||
"path": "rhcos-43.81.202003111353.0-installer-kernel-x86_64",
|
||||
"sha256": "4d7f7b0a631a8f3fd34c9d39e7a037655871f05d503af240e7647a5f4e6490c9"
|
||||
},
|
||||
"metal": {
|
||||
"path": "rhcos-43.81.202003111353.0-metal.x86_64.raw.gz",
|
||||
"sha256": "de35f0e0b75c907c805aef687120b34c00e158bd5afaaf7aa60097fe3ed65480",
|
||||
"size": 803474932,
|
||||
"uncompressed-sha256": "30e867fb2c2490873276c175e178d77646dd304c91e05d8f99493ebfb16c2fef",
|
||||
"uncompressed-size": 3369074688
|
||||
},
|
||||
"openstack": {
|
||||
"path": "rhcos-43.81.202003111353.0-openstack.x86_64.qcow2.gz",
|
||||
"sha256": "8f17baa5564450eea4d3b6f817df3df58af7c3294583be62de615663c0ec55a5",
|
||||
"size": 803742118,
|
||||
"uncompressed-sha256": "4d204e638d365d9de121f5d513cff2567abd9232710f4bb79992efa4ba718008",
|
||||
"uncompressed-size": 2148728832
|
||||
},
|
||||
"ostree": {
|
||||
"path": "rhcos-43.81.202003111353.0-ostree.x86_64.tar",
|
||||
"sha256": "c1501350436424ec6d7a805c52b3fc665fe490912f5a16d94bf267c9efa2848f",
|
||||
"size": 722647040
|
||||
},
|
||||
"qemu": {
|
||||
"path": "rhcos-43.81.202003111353.0-qemu.x86_64.qcow2.gz",
|
||||
"sha256": "cd3260155e494efdb38d0b3019a29980675bff2fee05a80162bd7a587a9bdba6",
|
||||
"size": 804202741,
|
||||
"uncompressed-sha256": "bee078cfef57f51d11dcdc7211185e5e85016e044081f3aec9b42637ebd05fec",
|
||||
"uncompressed-size": 2148663296
|
||||
},
|
||||
"vmware": {
|
||||
"path": "rhcos-43.81.202003111353.0-vmware.x86_64.ova",
|
||||
"sha256": "c60c94b3ee918379230c63ca18ea144fed57088bc51eee5f12cf839ceb6c1fb6",
|
||||
"size": 831580160
|
||||
}
|
||||
},
|
||||
"oscontainer": {
|
||||
"digest": "sha256:eb81a7625f9fc3d1575f92dd4e825b02ec6e362c88a1bd6e048c789a7f965771",
|
||||
"image": "quay.io/openshift-release-dev/ocp-v4.0-art-dev"
|
||||
},
|
||||
"ostree-commit": "86e3934e5a039782f1f1df0f827ce00be7572f9be2441e0d7631a20dff9b2933",
|
||||
"ostree-version": "43.81.202003111353.0"
|
||||
}
|
50
vendor/github.com/openshift/installer/data/data/rhcos-ppc64le.json
сгенерированный
поставляемый
Normal file
50
vendor/github.com/openshift/installer/data/data/rhcos-ppc64le.json
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,50 @@
|
|||
{
|
||||
"baseURI": "https://releases-art-rhcos.svc.ci.openshift.org/art/storage/releases/rhcos-4.3-ppc64le/43.81.202003172241.0/ppc64le/",
|
||||
"buildid": "43.81.202003172241.0",
|
||||
"images": {
|
||||
"initramfs": {
|
||||
"path": "rhcos-43.81.202003172241.0-installer-initramfs.ppc64le.img",
|
||||
"sha256": "451bfc5b1a872b92c3567d0892eac101bc43bc2d9934c4d6933c379d1936b409"
|
||||
},
|
||||
"iso": {
|
||||
"path": "rhcos-43.81.202003172241.0-installer.ppc64le.iso",
|
||||
"sha256": "9debdc0bfe2380a2ee057cb1db686f80e95ea9d1c0e45ea8ca70a956467622e8"
|
||||
},
|
||||
"kernel": {
|
||||
"path": "rhcos-43.81.202003172241.0-installer-kernel-ppc64le",
|
||||
"sha256": "5a78b5099da27cbe6d22032c8e55542c1b2885e8c1e69206f7fc390dae9897b7"
|
||||
},
|
||||
"metal": {
|
||||
"path": "rhcos-43.81.202003172241.0-metal.ppc64le.raw.gz",
|
||||
"sha256": "0f62f700d805e104e6d5f7743a42101050ab390f18005c11991c80e7a8a42fd6",
|
||||
"size": 775658410,
|
||||
"uncompressed-sha256": "2d9678ffbd09ea9f1536f981e9104c5bc26e6326e9c75aa1b162957f7c83f661",
|
||||
"uncompressed-size": 3486515200
|
||||
},
|
||||
"openstack": {
|
||||
"path": "rhcos-43.81.202003172241.0-openstack.ppc64le.qcow2.gz",
|
||||
"sha256": "a9c35106ba9ae2c7bf0543d460b5f9278f0624bd39e121b3baaecead68b1326a",
|
||||
"size": 774596933,
|
||||
"uncompressed-sha256": "0af1fecc742a46d376a053783d46210188fc77c93e4186e8aa3586d7b09ed0f1",
|
||||
"uncompressed-size": 2246377472
|
||||
},
|
||||
"ostree": {
|
||||
"path": "rhcos-43.81.202003172241.0-ostree.ppc64le.tar",
|
||||
"sha256": "57da39c5b22ea077e2f7835ca8b140d55c1154c376b911bbf22cd103480f357e",
|
||||
"size": 692090880
|
||||
},
|
||||
"qemu": {
|
||||
"path": "rhcos-43.81.202003172241.0-qemu.ppc64le.qcow2.gz",
|
||||
"sha256": "cc7b2ae27936cd67be3338432a11f5b24f36edcef0c6c43c5c2807bb20f4ae1c",
|
||||
"size": 775059270,
|
||||
"uncompressed-sha256": "caef1dc9cc9d08f56241c37a77e08f90d4f72d448569caead522ae5aba239c8f",
|
||||
"uncompressed-size": 2246311936
|
||||
}
|
||||
},
|
||||
"oscontainer": {
|
||||
"digest": "sha256:d61cfef76f88a5d3028d28c80b9ecbbb2a20c3dc152d66566375fb5589e9105e",
|
||||
"image": "quay.io/openshift-release-dev/ocp-v4.0-art-dev"
|
||||
},
|
||||
"ostree-commit": "2212098f5ea4361b60987472185f0406e8d78253b5b16399610809abe74d26cc",
|
||||
"ostree-version": "43.81.202003172241.0"
|
||||
}
|
57
vendor/github.com/openshift/installer/data/data/rhcos-s390x.json
сгенерированный
поставляемый
Normal file
57
vendor/github.com/openshift/installer/data/data/rhcos-s390x.json
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,57 @@
|
|||
{
|
||||
"baseURI": "https://releases-art-rhcos.svc.ci.openshift.org/art/storage/releases/rhcos-4.3-s390x/43.81.202003172338.0/s390x/",
|
||||
"buildid": "43.81.202003172338.0",
|
||||
"images": {
|
||||
"dasd": {
|
||||
"path": "rhcos-43.81.202003172338.0-dasd.s390x.raw.gz",
|
||||
"sha256": "6b2e0ac527cc33f47154e125be51542291657fea3c51d7e30000a79b46d27446",
|
||||
"size": 702271061,
|
||||
"uncompressed-sha256": "789d52cb84fdfd7d5167f6a6b4e1b0803c7696bf96777e7c4911caf6b5ad31a1",
|
||||
"uncompressed-size": 3171942400
|
||||
},
|
||||
"initramfs": {
|
||||
"path": "rhcos-43.81.202003172338.0-installer-initramfs.s390x.img",
|
||||
"sha256": "fcb4958ab779bc2d59a451deb62e62b929b7d2fb2b404c698d04968c69ee82ea"
|
||||
},
|
||||
"iso": {
|
||||
"path": "rhcos-43.81.202003172338.0-installer.s390x.iso",
|
||||
"sha256": "1df3e0cbe8e15c27b3133e59df4a19fd23b01743f37f7cfc5b2283f2129e4cbc"
|
||||
},
|
||||
"kernel": {
|
||||
"path": "rhcos-43.81.202003172338.0-installer-kernel-s390x",
|
||||
"sha256": "02084b138d77182a3a58dc2e290ab47ec412c5a0441f6d2989c02a89d60f613a"
|
||||
},
|
||||
"metal": {
|
||||
"path": "rhcos-43.81.202003172338.0-metal.s390x.raw.gz",
|
||||
"sha256": "5e9f11b6c0c310daecabe75fd3435eeb810f9283d322e9efc890e5d8a97c0202",
|
||||
"size": 702238370,
|
||||
"uncompressed-sha256": "884be2265e5bc9f310874aed3d093dd4deed187caa240a32672d315bbfeddafa",
|
||||
"uncompressed-size": 3171942400
|
||||
},
|
||||
"openstack": {
|
||||
"path": "rhcos-43.81.202003172338.0-openstack.s390x.qcow2.gz",
|
||||
"sha256": "9784da7eee7b7c8d07d68da3900634ad17e3d59c86b4eb79554b8a8b2b4fcf11",
|
||||
"size": 702634988,
|
||||
"uncompressed-sha256": "59d290e40071298b852d233ebcbae9df527836729d2b35862768e0b7e27857d5",
|
||||
"uncompressed-size": 1978335232
|
||||
},
|
||||
"ostree": {
|
||||
"path": "rhcos-43.81.202003172338.0-ostree.s390x.tar",
|
||||
"sha256": "e85d4cff77aeb3e8fd4c04e3ae578be017995ba16975a62256f68515186ec3a1",
|
||||
"size": 642109440
|
||||
},
|
||||
"qemu": {
|
||||
"path": "rhcos-43.81.202003172338.0-qemu.s390x.qcow2.gz",
|
||||
"sha256": "5384458dcaa5e8a8795355046106335fda9a7c079cd165addf7729989afd9377",
|
||||
"size": 703123878,
|
||||
"uncompressed-sha256": "717e3d74403e56878961d4e654a161c088c9c1ccf94bbd407fd273b302db376d",
|
||||
"uncompressed-size": 1978204160
|
||||
}
|
||||
},
|
||||
"oscontainer": {
|
||||
"digest": "sha256:0b513016131fdab7760dee9ffeab65f7b1c4a928123e81367a0c34c4393a2c0e",
|
||||
"image": "quay.io/openshift-release-dev/ocp-v4.0-art-dev"
|
||||
},
|
||||
"ostree-commit": "fed64caf6e88503e9190e84fbb9a57ab63783e12ab3895bf67e68f66d11b84d1",
|
||||
"ostree-version": "43.81.202003172338.0"
|
||||
}
|
|
@ -1,135 +1,135 @@
|
|||
{
|
||||
"amis": {
|
||||
"ap-northeast-1": {
|
||||
"hvm": "ami-023d0452866845125"
|
||||
"hvm": "ami-0ade724aa9d3514b2"
|
||||
},
|
||||
"ap-northeast-2": {
|
||||
"hvm": "ami-0ba4f9a0358bcb44a"
|
||||
"hvm": "ami-0465f2a5450aa0257"
|
||||
},
|
||||
"ap-south-1": {
|
||||
"hvm": "ami-0bf62e963a473068e"
|
||||
"hvm": "ami-05a3e4b22ecffdf62"
|
||||
},
|
||||
"ap-southeast-1": {
|
||||
"hvm": "ami-086b93722336bd1d9"
|
||||
"hvm": "ami-00df6135b05c0a02a"
|
||||
},
|
||||
"ap-southeast-2": {
|
||||
"hvm": "ami-08929f33bfab49b83"
|
||||
"hvm": "ami-075295f492dbaa347"
|
||||
},
|
||||
"ca-central-1": {
|
||||
"hvm": "ami-0f6d943a1fa9172fd"
|
||||
"hvm": "ami-0a27aa00147a3a2d9"
|
||||
},
|
||||
"eu-central-1": {
|
||||
"hvm": "ami-0ceea534b63224411"
|
||||
"hvm": "ami-0e8ca170012209d72"
|
||||
},
|
||||
"eu-north-1": {
|
||||
"hvm": "ami-06b7087b2768f644a"
|
||||
"hvm": "ami-0c736720637f6b42d"
|
||||
},
|
||||
"eu-west-1": {
|
||||
"hvm": "ami-0e95125b57fa63b0d"
|
||||
"hvm": "ami-0770d1d7e95da7ba3"
|
||||
},
|
||||
"eu-west-2": {
|
||||
"hvm": "ami-0eef98c447b85ffcd"
|
||||
"hvm": "ami-08499730d4db69065"
|
||||
},
|
||||
"eu-west-3": {
|
||||
"hvm": "ami-0049e16104f360df6"
|
||||
"hvm": "ami-0658bcfda04098635"
|
||||
},
|
||||
"me-south-1": {
|
||||
"hvm": "ami-0b03ea038629fd02e"
|
||||
"hvm": "ami-0ea763ffb3e1c62cf"
|
||||
},
|
||||
"sa-east-1": {
|
||||
"hvm": "ami-0c80d785b30eef121"
|
||||
"hvm": "ami-0298057a4a12e874a"
|
||||
},
|
||||
"us-east-1": {
|
||||
"hvm": "ami-06f85a7940faa3217"
|
||||
"hvm": "ami-0523c75e911667e58"
|
||||
},
|
||||
"us-east-2": {
|
||||
"hvm": "ami-04a79d8d7cfa540cc"
|
||||
"hvm": "ami-0d8f77b753c0d96dd"
|
||||
},
|
||||
"us-west-1": {
|
||||
"hvm": "ami-0633b392e8eff25e7"
|
||||
"hvm": "ami-0782247660ad3a3bb"
|
||||
},
|
||||
"us-west-2": {
|
||||
"hvm": "ami-0d231993dddc5cd2e"
|
||||
"hvm": "ami-0f0fac946d1d31e97"
|
||||
}
|
||||
},
|
||||
"azure": {
|
||||
"image": "rhcos-43.81.202001142154.0-azure.x86_64.vhd",
|
||||
"url": "https://rhcos.blob.core.windows.net/imagebucket/rhcos-43.81.202001142154.0-azure.x86_64.vhd"
|
||||
"image": "rhcos-43.81.202003111353.0-azure.x86_64.vhd",
|
||||
"url": "https://rhcos.blob.core.windows.net/imagebucket/rhcos-43.81.202003111353.0-azure.x86_64.vhd"
|
||||
},
|
||||
"baseURI": "https://releases-art-rhcos.svc.ci.openshift.org/art/storage/releases/rhcos-4.3/43.81.202001142154.0/x86_64/",
|
||||
"buildid": "43.81.202001142154.0",
|
||||
"baseURI": "https://releases-art-rhcos.svc.ci.openshift.org/art/storage/releases/rhcos-4.3/43.81.202003111353.0/x86_64/",
|
||||
"buildid": "43.81.202003111353.0",
|
||||
"gcp": {
|
||||
"image": "rhcos-43-81-202001142154-0",
|
||||
"url": "https://storage.googleapis.com/rhcos/rhcos/43.81.202001142154.0.tar.gz"
|
||||
"image": "rhcos-43-81-202003111353-0",
|
||||
"url": "https://storage.googleapis.com/rhcos/rhcos/43.81.202003111353.0.tar.gz"
|
||||
},
|
||||
"images": {
|
||||
"aws": {
|
||||
"path": "rhcos-43.81.202001142154.0-aws.x86_64.vmdk.gz",
|
||||
"sha256": "d1436d2fb0bf19ea13e90b2e3a459441f0145a914fdd32c2ef09836372667035",
|
||||
"size": 813199011,
|
||||
"uncompressed-sha256": "1549d5dceacddf7b0955d372a19e7d49747aed77c06a3a8daa16a3fb401847cf",
|
||||
"uncompressed-size": 829533184
|
||||
"path": "rhcos-43.81.202003111353.0-aws.x86_64.vmdk.gz",
|
||||
"sha256": "e4cbc50409d93fb88d711a89e62c56639579abf804bf2d25b210f43929939000",
|
||||
"size": 814861898,
|
||||
"uncompressed-sha256": "2383f9687db4b2f40bf70f2a9750f651c135d09973f7e7f7ed02ac05179e0ea2",
|
||||
"uncompressed-size": 831565312
|
||||
},
|
||||
"azure": {
|
||||
"path": "rhcos-43.81.202001142154.0-azure.x86_64.vhd.gz",
|
||||
"sha256": "5011e20132838daabb218c9722700fffac6a25b744132e93d2dc6b0091a7c04c",
|
||||
"size": 800063766,
|
||||
"uncompressed-sha256": "fe82be89aa8eae7434ea4c8af9a5b5c85d10148a1dc3301b96d72704562b9208",
|
||||
"uncompressed-size": 2179508224
|
||||
"path": "rhcos-43.81.202003111353.0-azure.x86_64.vhd.gz",
|
||||
"sha256": "a2c75bfb3f1c75bd21bba1d669631b05692c1a91a88802bbcd7a3218e1834ff6",
|
||||
"size": 802153013,
|
||||
"uncompressed-sha256": "bd427aaa3fab89261ac565a89b0b6d066e3a559e2d62d1f6cb749294963162df",
|
||||
"uncompressed-size": 2189996544
|
||||
},
|
||||
"gcp": {
|
||||
"path": "rhcos-43.81.202001142154.0-gcp.x86_64.tar.gz",
|
||||
"sha256": "5173ba725ad867a743f423eafbd8fc476f833d69163edef6fa08d5b63d6de505",
|
||||
"size": 799655869
|
||||
"path": "rhcos-43.81.202003111353.0-gcp.x86_64.tar.gz",
|
||||
"sha256": "8baade8d055181d538f75e00367a860c9197c684924062919eb982f5101d27d1",
|
||||
"size": 801779472
|
||||
},
|
||||
"initramfs": {
|
||||
"path": "rhcos-43.81.202001142154.0-installer-initramfs.x86_64.img",
|
||||
"sha256": "3389babec8afc37023d122efba7785c3aea7797c9b4a038e98a9b90aacef1483"
|
||||
"path": "rhcos-43.81.202003111353.0-installer-initramfs.x86_64.img",
|
||||
"sha256": "fa01f1eeeaf6924d8d20bf5834d3853985167b670a0de30a32bc80d3f8c700d4"
|
||||
},
|
||||
"iso": {
|
||||
"path": "rhcos-43.81.202001142154.0-installer.x86_64.iso",
|
||||
"sha256": "302081da24277ed752fee8d69839227f4f24ec71261f9dfe2752ea8c0f20a0ed"
|
||||
"path": "rhcos-43.81.202003111353.0-installer.x86_64.iso",
|
||||
"sha256": "b10975f240769e6f606981be4fe4740536522f7afefe30c95d93f059db48c756"
|
||||
},
|
||||
"kernel": {
|
||||
"path": "rhcos-43.81.202001142154.0-installer-kernel-x86_64",
|
||||
"sha256": "7ace7ebdb828e1dc4d242b2fb8a360e7b97da7748d2fde4ffa3bd30232c04865"
|
||||
"path": "rhcos-43.81.202003111353.0-installer-kernel-x86_64",
|
||||
"sha256": "4d7f7b0a631a8f3fd34c9d39e7a037655871f05d503af240e7647a5f4e6490c9"
|
||||
},
|
||||
"metal": {
|
||||
"path": "rhcos-43.81.202001142154.0-metal.x86_64.raw.gz",
|
||||
"sha256": "a22248455ad8adc95ae208ffebe4bc4afa1d50048807df884cae02bc794d7ea4",
|
||||
"size": 801591981,
|
||||
"uncompressed-sha256": "3643016762b248b079f4239e6f72948ebdb8d73e1fb1601bc666cbb46c0e238f",
|
||||
"uncompressed-size": 3340763136
|
||||
"path": "rhcos-43.81.202003111353.0-metal.x86_64.raw.gz",
|
||||
"sha256": "de35f0e0b75c907c805aef687120b34c00e158bd5afaaf7aa60097fe3ed65480",
|
||||
"size": 803474932,
|
||||
"uncompressed-sha256": "30e867fb2c2490873276c175e178d77646dd304c91e05d8f99493ebfb16c2fef",
|
||||
"uncompressed-size": 3369074688
|
||||
},
|
||||
"openstack": {
|
||||
"path": "rhcos-43.81.202001142154.0-openstack.x86_64.qcow2.gz",
|
||||
"sha256": "a1bda656fa0892f7b936fdc6b6a6086bddaed5dafacedcd7a1e811abb78fe3b0",
|
||||
"size": 801466080,
|
||||
"uncompressed-sha256": "504b9008adf89bb3d05b75d393e057c6d66ba6c92cf631ca4445d99bbf7e2a57",
|
||||
"uncompressed-size": 2131492864
|
||||
"path": "rhcos-43.81.202003111353.0-openstack.x86_64.qcow2.gz",
|
||||
"sha256": "8f17baa5564450eea4d3b6f817df3df58af7c3294583be62de615663c0ec55a5",
|
||||
"size": 803742118,
|
||||
"uncompressed-sha256": "4d204e638d365d9de121f5d513cff2567abd9232710f4bb79992efa4ba718008",
|
||||
"uncompressed-size": 2148728832
|
||||
},
|
||||
"ostree": {
|
||||
"path": "rhcos-43.81.202001142154.0-ostree.x86_64.tar",
|
||||
"sha256": "bc9460975c9a3db1f39428129dab87796dc1d2739c38289c5156ceb710ae1e10",
|
||||
"size": 720916480
|
||||
"path": "rhcos-43.81.202003111353.0-ostree.x86_64.tar",
|
||||
"sha256": "c1501350436424ec6d7a805c52b3fc665fe490912f5a16d94bf267c9efa2848f",
|
||||
"size": 722647040
|
||||
},
|
||||
"qemu": {
|
||||
"path": "rhcos-43.81.202001142154.0-qemu.x86_64.qcow2.gz",
|
||||
"sha256": "2da72a1eaee005458014cfbab93f77c459b8c63ff110f0d4cbf4c13e7001de68",
|
||||
"size": 801832578,
|
||||
"uncompressed-sha256": "6dbd3513a824cfe6de157e5d86972c2005b23d71bd8b3a61548f7a1f0a516b68",
|
||||
"uncompressed-size": 2131427328
|
||||
"path": "rhcos-43.81.202003111353.0-qemu.x86_64.qcow2.gz",
|
||||
"sha256": "cd3260155e494efdb38d0b3019a29980675bff2fee05a80162bd7a587a9bdba6",
|
||||
"size": 804202741,
|
||||
"uncompressed-sha256": "bee078cfef57f51d11dcdc7211185e5e85016e044081f3aec9b42637ebd05fec",
|
||||
"uncompressed-size": 2148663296
|
||||
},
|
||||
"vmware": {
|
||||
"path": "rhcos-43.81.202001142154.0-vmware.x86_64.ova",
|
||||
"sha256": "29b98763bc538ec0b7ad39774b643ef69dc0c0fdad25bd0da3078e54ab86253b",
|
||||
"size": 829542400
|
||||
"path": "rhcos-43.81.202003111353.0-vmware.x86_64.ova",
|
||||
"sha256": "c60c94b3ee918379230c63ca18ea144fed57088bc51eee5f12cf839ceb6c1fb6",
|
||||
"size": 831580160
|
||||
}
|
||||
},
|
||||
"oscontainer": {
|
||||
"digest": "sha256:8c4059f184596157f64d69c4edbea9c9ef600560b7804a482779f513c3e0f40e",
|
||||
"digest": "sha256:eb81a7625f9fc3d1575f92dd4e825b02ec6e362c88a1bd6e048c789a7f965771",
|
||||
"image": "quay.io/openshift-release-dev/ocp-v4.0-art-dev"
|
||||
},
|
||||
"ostree-commit": "23527ffc123c6e2bedf3479ff7e96f38d92cec88d5a7951fa56e9d0ec75ddd77",
|
||||
"ostree-version": "43.81.202001142154.0"
|
||||
"ostree-commit": "86e3934e5a039782f1f1df0f827ce00be7572f9be2441e0d7631a20dff9b2933",
|
||||
"ostree-version": "43.81.202003111353.0"
|
||||
}
|
1
vendor/github.com/openshift/installer/docs/user/azure/customization.md
сгенерированный
поставляемый
1
vendor/github.com/openshift/installer/docs/user/azure/customization.md
сгенерированный
поставляемый
|
@ -29,7 +29,6 @@ The installer can use an existing VNet and subnets when provisioning an OpenShif
|
|||
|
||||
When pre-existing subnets are provided, the installer will not create a network security group (NSG) or alter an existing one attached to the subnet. This restriction means that no security rules are created. If multiple clusters are installed to the same VNet and isolation is desired, it must be enforced through an administrative task after the cluster is installed.
|
||||
|
||||
|
||||
## Examples
|
||||
|
||||
Some example `install-config.yaml` are shown below.
|
||||
|
|
563
vendor/github.com/openshift/installer/docs/user/azure/install_upi.md
сгенерированный
поставляемый
Normal file
563
vendor/github.com/openshift/installer/docs/user/azure/install_upi.md
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,563 @@
|
|||
# Install: User Provided Infrastructure (UPI)
|
||||
|
||||
The steps for performing a user-provided infrastructure install are outlined here. Several
|
||||
[Azure Resource Manager][azuretemplates] templates are provided to assist in
|
||||
completing these steps or to help model your own. You are also free to create
|
||||
the required resources through other methods; the templates are just an
|
||||
example.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* all prerequisites from [README](README.md)
|
||||
* the following binaries installed and in $PATH:
|
||||
* [openshift-install][openshiftinstall]
|
||||
* It is recommended that the OpenShift installer CLI version is the same of the cluster being deployed. The version used in this example is 4.3.0 GA.
|
||||
* [az (Azure CLI)][azurecli] installed and aunthenticated
|
||||
* Commands flags and structure may vary between `az` versions. The recommended version used in this example is 2.0.80.
|
||||
* python3
|
||||
* [jq][jqjson]
|
||||
* [yq][yqyaml]
|
||||
|
||||
## Create an install config
|
||||
|
||||
Create an install configuration as for [the usual approach](install.md#create-configuration).
|
||||
|
||||
```console
|
||||
$ openshift-install create install-config
|
||||
? SSH Public Key /home/user_id/.ssh/id_rsa.pub
|
||||
? Platform azure
|
||||
? azure subscription id xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
? azure tenant id xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
? azure service principal client id xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
? azure service principal client secret xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
INFO Saving user credentials to "/home/user_id/.azure/osServicePrincipal.json"
|
||||
? Region centralus
|
||||
? Base Domain example.com
|
||||
? Cluster Name test
|
||||
? Pull Secret [? for help]
|
||||
```
|
||||
|
||||
Note that we're going to have a new Virtual Network and subnetworks created specifically for this deployment, but it is also possible to use a networking
|
||||
infrastructure already existing in your organization. Please refer to the [customization instructions](customization.md) for more details about setting
|
||||
up an install config for that scenario.
|
||||
|
||||
### Extract data from install config
|
||||
|
||||
Some data from the install configuration file will be used on later steps. Export them as environment variables with:
|
||||
|
||||
```sh
|
||||
export CLUSTER_NAME=`yq -r .metadata.name install-config.yaml`
|
||||
export AZURE_REGION=`yq -r .platform.azure.region install-config.yaml`
|
||||
export SSH_KEY=`yq -r .sshKey install-config.yaml | xargs`
|
||||
export BASE_DOMAIN=`yq -r .baseDomain install-config.yaml`
|
||||
export BASE_DOMAIN_RESOURCE_GROUP=`yq -r .platform.azure.baseDomainResourceGroupName install-config.yaml`
|
||||
```
|
||||
|
||||
### Empty the compute pool
|
||||
|
||||
We'll be providing the compute machines ourselves, so edit the resulting `install-config.yaml` to set `replicas` to 0 for the `compute` pool:
|
||||
|
||||
```sh
|
||||
python3 -c '
|
||||
import yaml;
|
||||
path = "install-config.yaml";
|
||||
data = yaml.full_load(open(path));
|
||||
data["compute"][0]["replicas"] = 0;
|
||||
open(path, "w").write(yaml.dump(data, default_flow_style=False))'
|
||||
```
|
||||
|
||||
## Create manifests
|
||||
|
||||
Create manifests to enable customizations that are not exposed via the install configuration.
|
||||
|
||||
```console
|
||||
$ openshift-install create manifests
|
||||
INFO Credentials loaded from file "/home/user_id/.azure/osServicePrincipal.json"
|
||||
INFO Consuming "Install Config" from target directory
|
||||
WARNING Making control-plane schedulable by setting MastersSchedulable to true for Scheduler cluster settings
|
||||
```
|
||||
|
||||
### Remove control plane machines and machinesets
|
||||
|
||||
Remove the control plane machines and compute machinesets from the manifests.
|
||||
We'll be providing those ourselves and don't want to involve the [machine-API operator][machine-api-operator].
|
||||
|
||||
```sh
|
||||
rm -f openshift/99_openshift-cluster-api_master-machines-*.yaml
|
||||
rm -f openshift/99_openshift-cluster-api_worker-machineset-*.yaml
|
||||
```
|
||||
|
||||
### Make control-plane nodes unschedulable
|
||||
|
||||
Currently [emptying the compute pools](#empty-the-compute-pool) makes control-plane nodes schedulable.
|
||||
But due to a [Kubernetes limitation][kubernetes-service-load-balancers-exclude-masters], router pods running on control-plane nodes will not be reachable by the ingress load balancer.
|
||||
Update the scheduler configuration to keep router pods and other workloads off the control-plane nodes:
|
||||
|
||||
```sh
|
||||
python3 -c '
|
||||
import yaml;
|
||||
path = "manifests/cluster-scheduler-02-config.yml";
|
||||
data = yaml.full_load(open(path));
|
||||
data["spec"]["mastersSchedulable"] = False;
|
||||
open(path, "w").write(yaml.dump(data, default_flow_style=False))'
|
||||
```
|
||||
|
||||
### Remove DNS Zones
|
||||
|
||||
We don't want [the ingress operator][ingress-operator] to create DNS records (we're going to do it manually) so we need to remove
|
||||
the `privateZone` and `publicZone` sections from the DNS configuration in manifests.
|
||||
|
||||
```sh
|
||||
python3 -c '
|
||||
import yaml;
|
||||
path = "manifests/cluster-dns-02-config.yml";
|
||||
data = yaml.full_load(open(path));
|
||||
del data["spec"]["publicZone"];
|
||||
del data["spec"]["privateZone"];
|
||||
open(path, "w").write(yaml.dump(data, default_flow_style=False))'
|
||||
```
|
||||
|
||||
### Resource Group Name and Infra ID
|
||||
|
||||
The OpenShift cluster has been assigned an identifier in the form of `<cluster_name>-<random_string>`. This identifier, called "Infra ID", will be used as
|
||||
the base name of most resources that will be created in this example. Export the Infra ID as an environment variable that will be used later in this example:
|
||||
|
||||
```sh
|
||||
export INFRA_ID=`yq -r '.status.infrastructureName' manifests/cluster-infrastructure-02-config.yml`
|
||||
```
|
||||
|
||||
Also, all resources created in this Azure deployment will exist as part of a [resource group][azure-resource-group]. The resource group name is also
|
||||
based on the Infra ID, in the form of `<cluster_name>-<random_string>-rg`. Export the resource group name to an environment variable that will be user later:
|
||||
|
||||
```sh
|
||||
export RESOURCE_GROUP=`yq -r '.status.platformStatus.azure.resourceGroupName' manifests/cluster-infrastructure-02-config.yml`
|
||||
```
|
||||
|
||||
**Optional:** it's possible to choose any other name for the Infra ID and/or the resource group, but in that case some adjustments in manifests are needed.
|
||||
A Python script is provided to help with these adjustments. Export the `INFRA_ID` and the `RESOURCE_GROUP` environment variables with the desired names, copy the
|
||||
[`setup-manifests.py`](../../../upi/azure/setup-manifests.py) script locally and invoke it with:
|
||||
|
||||
```sh
|
||||
python3 setup-manifests.py $RESOURCE_GROUP $INFRA_ID
|
||||
```
|
||||
|
||||
## Create ignition configs
|
||||
|
||||
Now we can create the bootstrap ignition configs:
|
||||
|
||||
```console
|
||||
$ openshift-install create ignition-configs
|
||||
INFO Consuming Openshift Manifests from target directory
|
||||
INFO Consuming Worker Machines from target directory
|
||||
INFO Consuming Common Manifests from target directory
|
||||
INFO Consuming Master Machines from target directory
|
||||
```
|
||||
|
||||
After running the command, several files will be available in the directory.
|
||||
|
||||
```console
|
||||
$ tree
|
||||
.
|
||||
├── auth
|
||||
│ └── kubeconfig
|
||||
├── bootstrap.ign
|
||||
├── master.ign
|
||||
├── metadata.json
|
||||
└── worker.ign
|
||||
```
|
||||
|
||||
## Create The Resource Group and identity
|
||||
|
||||
Use the command below to create the resource group in the selected Azure region:
|
||||
|
||||
```sh
|
||||
az group create --name $RESOURCE_GROUP --location $AZURE_REGION
|
||||
```
|
||||
|
||||
Also, create an identity which will be used to grant the required access to cluster operators:
|
||||
|
||||
```sh
|
||||
az identity create -g $RESOURCE_GROUP -n ${INFRA_ID}-identity
|
||||
```
|
||||
|
||||
## Upload the files to a Storage Account
|
||||
|
||||
The deployment steps will read the Red Hat Enterprise Linux CoreOS virtual hard disk (VHD) image and the bootstrap ignition config file
|
||||
from a blob. Create a storage account that will be used to store them and export its key as an environment variable.
|
||||
|
||||
```sh
|
||||
az storage account create -g $RESOURCE_GROUP --location $AZURE_REGION --name ${CLUSTER_NAME}sa --kind Storage --sku Standard_LRS
|
||||
export ACCOUNT_KEY=`az storage account keys list -g $RESOURCE_GROUP --account-name ${CLUSTER_NAME}sa --query "[0].value" -o tsv`
|
||||
```
|
||||
|
||||
### Copy the cluster image
|
||||
|
||||
Given the size of the RHCOS VHD, it's not possible to run the deployments with this file stored locally on your machine.
|
||||
We must copy and store it in a storage container instead. To do so, first create a blob storage container and then copy the VHD.
|
||||
|
||||
Choose the RHCOS version you'd like to use and export the URL of its VHD to an environment variable. For example, to use the latest release available for the 4.3 version, use:
|
||||
|
||||
```sh
|
||||
export VHD_URL=`curl -s https://raw.githubusercontent.com/openshift/installer/release-4.3/data/data/rhcos.json | jq -r .azure.url`
|
||||
```
|
||||
|
||||
If you'd just like to use the latest _development_ version available (master branch), use:
|
||||
|
||||
```sh
|
||||
export VHD_URL=`curl -s https://raw.githubusercontent.com/openshift/installer/master/data/data/rhcos.json | jq -r .azure.url`
|
||||
```
|
||||
|
||||
Copy the chosen VHD to a blob:
|
||||
|
||||
```sh
|
||||
az storage container create --name vhd --account-name ${CLUSTER_NAME}sa
|
||||
az storage blob copy start --account-name ${CLUSTER_NAME}sa --account-key $ACCOUNT_KEY --destination-blob "rhcos.vhd" --destination-container vhd --source-uri "$VHD_URL"
|
||||
```
|
||||
|
||||
To track the progress, you can use:
|
||||
|
||||
```sh
|
||||
status="unknown"
|
||||
while [ "$status" != "success" ]
|
||||
do
|
||||
status=`az storage blob show --container-name vhd --name "rhcos.vhd" --account-name ${CLUSTER_NAME}sa --account-key $ACCOUNT_KEY -o tsv --query properties.copy.status`
|
||||
echo $status
|
||||
done
|
||||
```
|
||||
|
||||
### Upload the bootstrap ignition
|
||||
|
||||
Create a blob storage container and upload the generated `bootstrap.ign` file:
|
||||
|
||||
```sh
|
||||
az storage container create --name files --account-name ${CLUSTER_NAME}sa --public-access blob
|
||||
az storage blob upload --account-name ${CLUSTER_NAME}sa --account-key $ACCOUNT_KEY -c "files" -f "bootstrap.ign" -n "bootstrap.ign"
|
||||
```
|
||||
|
||||
## Create the DNS zones
|
||||
|
||||
A few DNS records are required for clusters that use user-provisioned infrastructure. Feel free to choose the DNS strategy that fits you best.
|
||||
|
||||
In this example we're going to use [Azure's own DNS solution][azure-dns], so we're going to create a new public DNS zone for external (internet) visibility, and
|
||||
a private DNS zone for internal cluster resolution. Note that the public zone don't necessarily need to exist in the same resource group of the
|
||||
cluster deployment itself and may even already exist in your organization for the desired base domain. If that's the case, you can skip the public DNS
|
||||
zone creation step, but make sure the install config generated earlier [reflects that scenario](customization.md#cluster-scoped-properties).
|
||||
|
||||
Create the new *public* DNS zone in the resource group exported in the `BASE_DOMAIN_RESOURCE_GROUP` environment variable, or just skip this step if you're going
|
||||
to use one that already exists in your organization:
|
||||
|
||||
```sh
|
||||
az network dns zone create -g $BASE_DOMAIN_RESOURCE_GROUP -n ${CLUSTER_NAME}.${BASE_DOMAIN}
|
||||
```
|
||||
|
||||
Create the *private* zone in the same resource group of the rest of this deployment:
|
||||
|
||||
```sh
|
||||
az network private-dns zone create -g $RESOURCE_GROUP -n ${CLUSTER_NAME}.${BASE_DOMAIN}
|
||||
```
|
||||
|
||||
## Grant access to the identity
|
||||
|
||||
Grant the *Contributor* role to the Azure identity so that the Ingress Operator can create a public IP and its load balancer. You can do that with:
|
||||
|
||||
```sh
|
||||
export PRINCIPAL_ID=`az identity show -g $RESOURCE_GROUP -n ${INFRA_ID}-identity --query principalId --out tsv`
|
||||
export RESOURCE_GROUP_ID=`az group show -g $RESOURCE_GROUP --query id --out tsv`
|
||||
az role assignment create --assignee "$PRINCIPAL_ID" --role 'Contributor' --scope "$RESOURCE_GROUP_ID"
|
||||
```
|
||||
|
||||
## Deployment
|
||||
|
||||
The key part of this UPI deployment are the [Azure Resource Manager][azuretemplates] templates, which are responsible
|
||||
for deploying most resources. They're provided as a few json files named following the "NN_name.json" pattern. In the
|
||||
next steps we're going to deploy each one of them in order, using [az (Azure CLI)][azurecli] and providing the expected parameters.
|
||||
|
||||
## Deploy the Virtual Network
|
||||
|
||||
In this example we're going to create a Virtual Network and subnets specifically for the OpenShift cluster. You can skip this step
|
||||
if the cluster is going to live in a VNet already existing in your organization, or you can edit the `01_vnet.json` file to your
|
||||
own needs (e.g. change the subnets address prefixes in CIDR format).
|
||||
|
||||
Copy the [`01_vnet.json`](../../../upi/azure/01_vnet.json) ARM template locally.
|
||||
|
||||
Create the deployment using the `az` client:
|
||||
|
||||
```sh
|
||||
az group deployment create -g $RESOURCE_GROUP \
|
||||
--template-file "01_vnet.json" \
|
||||
--parameters baseName="$INFRA_ID"
|
||||
```
|
||||
|
||||
Link the VNet just created to the private DNS zone:
|
||||
|
||||
```sh
|
||||
az network private-dns link vnet create -g $RESOURCE_GROUP -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n ${INFRA_ID}-network-link -v "${INFRA_ID}-vnet" -e false
|
||||
```
|
||||
|
||||
## Deploy the image
|
||||
|
||||
Copy the [`02_storage.json`](../../../upi/azure/02_storage.json) ARM template locally.
|
||||
|
||||
Create the deployment using the `az` client:
|
||||
|
||||
```sh
|
||||
export VHD_BLOB_URL=`az storage blob url --account-name ${CLUSTER_NAME}sa --account-key $ACCOUNT_KEY -c vhd -n "rhcos.vhd" -o tsv`
|
||||
|
||||
az group deployment create -g $RESOURCE_GROUP \
|
||||
--template-file "02_storage.json" \
|
||||
--parameters vhdBlobURL="$VHD_BLOB_URL" \
|
||||
--parameters baseName="$INFRA_ID"
|
||||
```
|
||||
|
||||
## Deploy the load balancers
|
||||
|
||||
Copy the [`03_infra.json`](../../../upi/azure/03_infra.json) ARM template locally.
|
||||
|
||||
Deploy the load balancers and public IP addresses using the `az` client:
|
||||
|
||||
```sh
|
||||
az group deployment create -g $RESOURCE_GROUP \
|
||||
--template-file "03_infra.json" \
|
||||
--parameters privateDNSZoneName="${CLUSTER_NAME}.${BASE_DOMAIN}" \
|
||||
--parameters baseName="$INFRA_ID"
|
||||
```
|
||||
|
||||
Create an `api` DNS record in the *public* zone for the API public load balancer. Note that the `BASE_DOMAIN_RESOURCE_GROUP` must point to the resource group where the public DNS zone exists.
|
||||
|
||||
```sh
|
||||
export PUBLIC_IP=`az network public-ip list -g $RESOURCE_GROUP --query "[?name=='${INFRA_ID}-master-pip'] | [0].ipAddress" -o tsv`
|
||||
az network dns record-set a add-record -g $BASE_DOMAIN_RESOURCE_GROUP -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n api -a $PUBLIC_IP --ttl 60
|
||||
```
|
||||
|
||||
Or, in case of adding this cluster to an already existing public zone, use instead:
|
||||
|
||||
```sh
|
||||
export PUBLIC_IP=`az network public-ip list -g $RESOURCE_GROUP --query "[?name=='${INFRA_ID}-master-pip'] | [0].ipAddress" -o tsv`
|
||||
az network dns record-set a add-record -g $BASE_DOMAIN_RESOURCE_GROUP -z ${BASE_DOMAIN} -n api.${CLUSTER_NAME} -a $PUBLIC_IP --ttl 60
|
||||
```
|
||||
|
||||
## Launch the temporary cluster bootstrap
|
||||
|
||||
Copy the [`04_bootstrap.json`](../../../upi/azure/04_bootstrap.json) ARM template locally.
|
||||
|
||||
Create the deployment using the `az` client:
|
||||
|
||||
```sh
|
||||
export BOOTSTRAP_URL=`az storage blob url --account-name ${CLUSTER_NAME}sa --account-key $ACCOUNT_KEY -c "files" -n "bootstrap.ign" -o tsv`
|
||||
export BOOTSTRAP_IGNITION=`jq -rcnM --arg v "2.2.0" --arg url $BOOTSTRAP_URL '{ignition:{version:$v,config:{replace:{source:$url}}}}' | base64 -w0`
|
||||
|
||||
az group deployment create -g $RESOURCE_GROUP \
|
||||
--template-file "04_bootstrap.json" \
|
||||
--parameters bootstrapIgnition="$BOOTSTRAP_IGNITION" \
|
||||
--parameters sshKeyData="$SSH_KEY" \
|
||||
--parameters baseName="$INFRA_ID"
|
||||
```
|
||||
|
||||
## Launch the permanent control plane
|
||||
|
||||
Copy the [`05_masters.json`](../../../upi/azure/05_masters.json) ARM template locally.
|
||||
|
||||
Create the deployment using the `az` client:
|
||||
|
||||
```sh
|
||||
export MASTER_IGNITION=`cat master.ign | base64`
|
||||
|
||||
az group deployment create -g $RESOURCE_GROUP \
|
||||
--template-file "05_masters.json" \
|
||||
--parameters masterIgnition="$MASTER_IGNITION" \
|
||||
--parameters sshKeyData="$SSH_KEY" \
|
||||
--parameters privateDNSZoneName="${CLUSTER_NAME}.${BASE_DOMAIN}" \
|
||||
--parameters baseName="$INFRA_ID"
|
||||
```
|
||||
|
||||
## Wait for the bootstrap complete
|
||||
|
||||
Wait until cluster bootstrapping has completed:
|
||||
|
||||
```console
|
||||
$ openshift-install wait-for bootstrap-complete --log-level debug
|
||||
DEBUG OpenShift Installer v4.n
|
||||
DEBUG Built from commit 6b629f0c847887f22c7a95586e49b0e2434161ca
|
||||
INFO Waiting up to 30m0s for the Kubernetes API at https://api.cluster.basedomain.com:6443...
|
||||
DEBUG Still waiting for the Kubernetes API: the server could not find the requested resource
|
||||
DEBUG Still waiting for the Kubernetes API: the server could not find the requested resource
|
||||
DEBUG Still waiting for the Kubernetes API: Get https://api.cluster.basedomain.com:6443/version?timeout=32s: dial tcp: connect: connection refused
|
||||
INFO API v1.14.n up
|
||||
INFO Waiting up to 30m0s for bootstrapping to complete...
|
||||
DEBUG Bootstrap status: complete
|
||||
INFO It is now safe to remove the bootstrap resources
|
||||
```
|
||||
|
||||
Once the bootstrapping process is complete you can deallocate and delete bootstrap resources:
|
||||
|
||||
```sh
|
||||
az network nsg rule delete -g $RESOURCE_GROUP --nsg-name ${INFRA_ID}-controlplane-nsg --name bootstrap_ssh_in
|
||||
az vm stop -g $RESOURCE_GROUP --name ${INFRA_ID}-bootstrap
|
||||
az vm deallocate -g $RESOURCE_GROUP --name ${INFRA_ID}-bootstrap
|
||||
az vm delete -g $RESOURCE_GROUP --name ${INFRA_ID}-bootstrap --yes
|
||||
az disk delete -g $RESOURCE_GROUP --name ${INFRA_ID}-bootstrap_OSDisk --no-wait --yes
|
||||
az network nic delete -g $RESOURCE_GROUP --name ${INFRA_ID}-bootstrap-nic --no-wait
|
||||
az storage blob delete --account-key $ACCOUNT_KEY --account-name ${CLUSTER_NAME}sa --container-name files --name bootstrap.ign
|
||||
az network public-ip delete -g $RESOURCE_GROUP --name ${INFRA_ID}-bootstrap-ssh-pip
|
||||
```
|
||||
|
||||
## Access the OpenShift API
|
||||
|
||||
You can now use the `oc` or `kubectl` commands to talk to the OpenShift API. The admin credentials are in `auth/kubeconfig`. For example:
|
||||
|
||||
```sh
|
||||
export KUBECONFIG="$PWD/auth/kubeconfig"
|
||||
oc get nodes
|
||||
oc get clusteroperator
|
||||
```
|
||||
|
||||
Note that only the API will be up at this point. The OpenShift web console will run on the compute nodes.
|
||||
|
||||
## Launch compute nodes
|
||||
|
||||
You may create compute nodes by launching individual instances discretely or by automated processes outside the cluster (e.g. Auto Scaling Groups).
|
||||
You can also take advantage of the built in cluster scaling mechanisms and the machine API in OpenShift.
|
||||
|
||||
In this example, we'll manually launch three instances via the provided ARM template. Additional instances can be launched by editing the `06_workers.json` file.
|
||||
|
||||
Copy the [`06_workers.json`](../../../upi/azure/06_workers.json) ARM template locally.
|
||||
|
||||
Create the deployment using the `az` client:
|
||||
|
||||
```sh
|
||||
export WORKER_IGNITION=`cat worker.ign | base64`
|
||||
|
||||
az group deployment create -g $RESOURCE_GROUP \
|
||||
--template-file "06_workers.json" \
|
||||
--parameters workerIgnition="$WORKER_IGNITION" \
|
||||
--parameters sshKeyData="$SSH_KEY" \
|
||||
--parameters baseName="$INFRA_ID"
|
||||
```
|
||||
|
||||
### Approve the worker CSRs
|
||||
|
||||
Even after they've booted up, the workers will not show up in `oc get nodes`.
|
||||
|
||||
Instead, they will create certificate signing requests (CSRs) which need to be approved. Eventually, you should see `Pending` entries looking like the ones below.
|
||||
You can use `watch oc get csr -A` to watch until the pending CSR's are available.
|
||||
|
||||
```console
|
||||
$ oc get csr -A
|
||||
NAME AGE REQUESTOR CONDITION
|
||||
csr-8bppf 2m8s system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending
|
||||
csr-dj2w4 112s system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending
|
||||
csr-ph8s8 11s system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending
|
||||
csr-q7f6q 19m system:node:master01 Approved,Issued
|
||||
csr-5ztvt 19m system:node:master02 Approved,Issued
|
||||
csr-576l2 19m system:node:master03 Approved,Issued
|
||||
csr-htmtm 19m system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Approved,Issued
|
||||
csr-wpvxq 19m system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Approved,Issued
|
||||
csr-xpp49 19m system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Approved,Issued
|
||||
```
|
||||
|
||||
You should inspect each pending CSR with the `oc describe csr <name>` command and verify that it comes from a node you recognise. If it does, they can be approved:
|
||||
|
||||
```console
|
||||
$ oc adm certificate approve csr-8bppf csr-dj2w4 csr-ph8s8
|
||||
certificatesigningrequest.certificates.k8s.io/csr-8bppf approved
|
||||
certificatesigningrequest.certificates.k8s.io/csr-dj2w4 approved
|
||||
certificatesigningrequest.certificates.k8s.io/csr-ph8s8 approved
|
||||
```
|
||||
|
||||
Approved nodes should now show up in `oc get nodes`, but they will be in the `NotReady` state. They will create a second CSR which must also be reviewed and approved.
|
||||
Repeat the process of inspecting the pending CSR's and approving them.
|
||||
|
||||
Once all CSR's are approved, the node should switch to `Ready` and pods will be scheduled on it.
|
||||
|
||||
```console
|
||||
$ oc get nodes
|
||||
NAME STATUS ROLES AGE VERSION
|
||||
master01 Ready master 23m v1.14.6+cebabbf7a
|
||||
master02 Ready master 23m v1.14.6+cebabbf7a
|
||||
master03 Ready master 23m v1.14.6+cebabbf7a
|
||||
node01 Ready worker 2m30s v1.14.6+cebabbf7a
|
||||
node02 Ready worker 2m35s v1.14.6+cebabbf7a
|
||||
node03 Ready worker 2m34s v1.14.6+cebabbf7a
|
||||
```
|
||||
|
||||
### Add the Ingress DNS Records
|
||||
|
||||
Create DNS records in the *public* and *private* zones pointing at the ingress load balancer. Use A, CNAME, etc. records, as you see fit.
|
||||
You can create either a wildcard `*.apps.{baseDomain}.` or [specific records](#specific-route-records) for every route (more on the specific records below).
|
||||
|
||||
First, wait for the ingress default router to create a load balancer and populate the `EXTERNAL-IP` column:
|
||||
|
||||
```console
|
||||
$ oc -n openshift-ingress get service router-default
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
router-default LoadBalancer 172.30.20.10 35.130.120.110 80:32288/TCP,443:31215/TCP 20
|
||||
```
|
||||
|
||||
Add a `*.apps` record to the *public* DNS zone:
|
||||
|
||||
```sh
|
||||
export PUBLIC_IP_ROUTER=`oc -n openshift-ingress get service router-default --no-headers | awk '{print $4}'`
|
||||
az network dns record-set a add-record -g $BASE_DOMAIN_RESOURCE_GROUP -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n *.apps -a $PUBLIC_IP_ROUTER --ttl 300
|
||||
```
|
||||
|
||||
Or, in case of adding this cluster to an already existing public zone, use instead:
|
||||
|
||||
```sh
|
||||
export PUBLIC_IP_ROUTER=`oc -n openshift-ingress get service router-default --no-headers | awk '{print $4}'`
|
||||
az network dns record-set a add-record -g $BASE_DOMAIN_RESOURCE_GROUP -z ${BASE_DOMAIN} -n *.apps.${CLUSTER_NAME} -a $PUBLIC_IP_ROUTER --ttl 300
|
||||
```
|
||||
|
||||
Finally, add a `*.apps` record to the *private* DNS zone:
|
||||
|
||||
```sh
|
||||
export PUBLIC_IP_ROUTER=`oc -n openshift-ingress get service router-default --no-headers | awk '{print $4}'`
|
||||
az network private-dns record-set a create -g $RESOURCE_GROUP -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n *.apps --ttl 300
|
||||
az network private-dns record-set a add-record -g $RESOURCE_GROUP -z ${CLUSTER_NAME}.${BASE_DOMAIN} -n *.apps -a $PUBLIC_IP_ROUTER
|
||||
```
|
||||
|
||||
#### Specific route records
|
||||
|
||||
If you prefer to add explicit domains instead of using a wildcard, you can create entries for each of the cluster's current routes. Use the command below to check what they are:
|
||||
|
||||
```console
|
||||
$ oc get --all-namespaces -o jsonpath='{range .items[*]}{range .status.ingress[*]}{.host}{"\n"}{end}{end}' routes
|
||||
oauth-openshift.apps.cluster.basedomain.com
|
||||
console-openshift-console.apps.cluster.basedomain.com
|
||||
downloads-openshift-console.apps.cluster.basedomain.com
|
||||
alertmanager-main-openshift-monitoring.apps.cluster.basedomain.com
|
||||
grafana-openshift-monitoring.apps.cluster.basedomain.com
|
||||
prometheus-k8s-openshift-monitoring.apps.cluster.basedomain.com
|
||||
```
|
||||
|
||||
## Wait for the installation complete
|
||||
|
||||
Wait until cluster is ready:
|
||||
|
||||
```console
|
||||
$ openshift-install wait-for install-complete --log-level debug
|
||||
DEBUG Built from commit 6b629f0c847887f22c7a95586e49b0e2434161ca
|
||||
INFO Waiting up to 30m0s for the cluster at https://api.cluster.basedomain.com:6443 to initialize...
|
||||
DEBUG Still waiting for the cluster to initialize: Working towards 4.2.12: 99% complete, waiting on authentication, console, monitoring
|
||||
DEBUG Still waiting for the cluster to initialize: Working towards 4.2.12: 100% complete
|
||||
DEBUG Cluster is initialized
|
||||
INFO Waiting up to 10m0s for the openshift-console route to be created...
|
||||
DEBUG Route found in openshift-console namespace: console
|
||||
DEBUG Route found in openshift-console namespace: downloads
|
||||
DEBUG OpenShift console route is created
|
||||
INFO Install complete!
|
||||
INFO To access the cluster as the system:admin user when using 'oc', run 'export KUBECONFIG=${PWD}/auth/kubeconfig'
|
||||
INFO Access the OpenShift web-console here: https://console-openshift-console.apps.cluster.basedomain.com
|
||||
INFO Login to the console with user: kubeadmin, password: REDACTED
|
||||
```
|
||||
|
||||
[azuretemplates]: https://docs.microsoft.com/en-us/azure/azure-resource-manager/template-deployment-overview
|
||||
[openshiftinstall]: https://github.com/openshift/installer
|
||||
[azurecli]: https://docs.microsoft.com/en-us/cli/azure/
|
||||
[jqjson]: https://stedolan.github.io/jq/
|
||||
[yqyaml]: https://yq.readthedocs.io/en/latest/
|
||||
[ingress-operator]: https://github.com/openshift/cluster-ingress-operator
|
||||
[machine-api-operator]: https://github.com/openshift/machine-api-operator
|
||||
[azure-identity]: https://docs.microsoft.com/en-us/azure/architecture/framework/security/identity
|
||||
[azure-resource-group]: https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/overview#resource-groups
|
||||
[azure-dns]: https://docs.microsoft.com/en-us/azure/dns/dns-overview
|
||||
[kubernetes-service-load-balancers-exclude-masters]: https://github.com/kubernetes/kubernetes/issues/65618
|
|
@ -34,17 +34,18 @@ The following `install-config.yaml` properties are available:
|
|||
* `name` (required string): The name of the cluster.
|
||||
DNS records for the cluster are all subdomains of `{{.metadata.name}}.{{.baseDomain}}`.
|
||||
* `networking` (optional object): The configuration for the pod network provider in the cluster.
|
||||
* `clusterNetwork` (optional array of objects): The IP address pool for pods.
|
||||
* `clusterNetwork` (optional array of objects): The IP address pools for pods.
|
||||
The default is 10.128.0.0/14 with a host prefix of /23.
|
||||
* `cidr` (required [IP network](#ip-networks)): The IP block address pool.
|
||||
* `hostPrefix` (required integer): The prefix size to allocate to each node from the CIDR.
|
||||
For example, 24 would allocate 2^8=256 adresses to each node.
|
||||
* `machineCIDR` (optional [IP network](#ip-networks)): The IP address pool for machines.
|
||||
The default is 10.0.0.0/16 for all platforms other than libvirt.
|
||||
For libvirt, the default is 192.168.126.0/24.
|
||||
* `machineNetwork` (optional array of objects): The IP address pools for machines.
|
||||
* `cidr` (required [IP network](#ip-networks)): The IP block address pool.
|
||||
The default is 10.0.0.0/16 for all platforms other than libvirt.
|
||||
For libvirt, the default is 192.168.126.0/24.
|
||||
* `networkType` (optional string): The type of network to install.
|
||||
The default is [OpenShiftSDN][openshift-sdn].
|
||||
* `serviceNetwork` (optional array of [IP networks](#ip-networks)): The IP address pool for services.
|
||||
* `serviceNetwork` (optional array of [IP networks](#ip-networks)): The IP address pools for services.
|
||||
The default is 172.30.0.0/16.
|
||||
* `platform` (required object): The configuration for the specific platform upon which to perform the installation.
|
||||
* `aws` (optional object): [AWS-specific properties](aws/customization.md#cluster-scoped-properties).
|
||||
|
@ -68,6 +69,8 @@ For example, 10.0.0.0/16 represents IP addresses 10.0.0.0 through 10.0.255.255.
|
|||
|
||||
The following machine-pool properties are available:
|
||||
|
||||
* `architecture` (optional string): Determines the instruction set architecture of the machines in the pool. Currently, heteregeneous clusters are not supported, so all pools must specify the same architecture.
|
||||
Valid values are `amd64` (the default).
|
||||
* `hyperthreading` (optional string): Determines the mode of hyperthreading that machines in the pool will utilize.
|
||||
Valid values are `Enabled` (the default) and `Disabled`.
|
||||
* `name` (required string): The name of the machine pool.
|
||||
|
@ -137,7 +140,8 @@ networking:
|
|||
clusterNetworks:
|
||||
- cidr: 10.128.0.0/14
|
||||
hostPrefix: 23
|
||||
machineCIDR: 10.0.0.0/16
|
||||
machineNetwork:
|
||||
- cidr: 10.0.0.0/16
|
||||
networkType: OpenShiftSDN
|
||||
serviceNetwork:
|
||||
- 172.30.0.0/16
|
||||
|
|
|
@ -113,7 +113,8 @@ baseDomain: test.metalkube.org
|
|||
metadata:
|
||||
name: ostest
|
||||
networking:
|
||||
machineCIDR: 192.168.111.0/24
|
||||
machineNetwork:
|
||||
- cidr: 192.168.111.0/24
|
||||
compute:
|
||||
- name: worker
|
||||
replicas: 1
|
||||
|
|
|
@ -16,6 +16,7 @@ In addition, it covers the installation with the default CNI (OpenShiftSDN), as
|
|||
- [Disk Requirements](#disk-requirements)
|
||||
- [Neutron Public Network](#neutron-public-network)
|
||||
- [OpenStack Credentials](#openstack-credentials)
|
||||
- [Self Signed OpenStack CA certificates](#self-signed-openstack-ca-certificates)
|
||||
- [Standalone Single-Node Development Environment](#standalone-single-node-development-environment)
|
||||
- [Running The Installer](#running-the-installer)
|
||||
- [Known Issues](#known-issues)
|
||||
|
@ -27,7 +28,9 @@ In addition, it covers the installation with the default CNI (OpenShiftSDN), as
|
|||
- [Current Expected Behavior](#current-expected-behavior)
|
||||
- [Checking Cluster Status](#checking-cluster-status)
|
||||
- [Destroying The Cluster](#destroying-the-cluster)
|
||||
- [Using an External Load Balancer](#using-an-external-load-balancer)
|
||||
- [Post Install Operations](#post-install-operations)
|
||||
- [Using an External Load Balancer](#using-an-external-load-balancer)
|
||||
- [Refreshing a CA Certificate](#refreshing-a-ca-certificate)
|
||||
- [Reporting Issues](#reporting-issues)
|
||||
|
||||
## Reference Documents
|
||||
|
@ -190,6 +193,24 @@ clouds:
|
|||
The file can contain information about several clouds. For instance, the example above describes two clouds: `shiftstack` and `dev-evn`.
|
||||
In order to determine which cloud to use, the user can either specify it in the `install-config.yaml` file under `platform.openstack.cloud` or with `OS_CLOUD` environment variable. If both are omitted, then the cloud name defaults to `openstack`.
|
||||
|
||||
### Self Signed OpenStack CA certificates
|
||||
|
||||
If your OpenStack cluster uses self signed CA certificates for endpoint authentication, you will need a few additional steps to run the installer. First, make sure that the host running the installer trusts your CA certificates. If you want more information on how to do this, refer to the [Red Hat OpenStack Plaform documentation](https://access.redhat.com/documentation/en-us/red_hat_openstack_platform/13/html/director_installation_and_usage/appe-ssltls_certificate_configuration#Adding_the_Certificate_Authority_to_Clients). In the future, we plan to modify the installer to be able to trust certificates independently of the host OS.
|
||||
|
||||
```sh
|
||||
sudo cp ca.crt.pem /etc/pki/ca-trust/source/anchors/
|
||||
sudo update-ca-trust extract
|
||||
```
|
||||
|
||||
Next, you should add the `cacert` key to your `clouds.yaml`. Its value should be a valid path to your CA cert that does not require root privilege to read.
|
||||
|
||||
```yaml
|
||||
clouds:
|
||||
shiftstack:
|
||||
auth: ...
|
||||
cacert: "ca.crt.pem"
|
||||
```
|
||||
|
||||
## Standalone Single-Node Development Environment
|
||||
|
||||
If you would like to set up an isolated development environment, you may use a bare metal host running CentOS 7. The following repository includes some instructions and scripts to help with creating a single-node OpenStack development environment for running the installer. Please refer to [this documentation](https://github.com/shiftstack-dev-tools/ocp-doit) for further details.
|
||||
|
@ -372,7 +393,6 @@ Finally, to see all the running pods in your cluster, you can do:
|
|||
```sh
|
||||
oc get pods -A
|
||||
```
|
||||
|
||||
### Destroying The Cluster
|
||||
|
||||
To destroy the cluster, point it to your cluster with this command:
|
||||
|
@ -386,8 +406,9 @@ Then, you can delete the folder containing the cluster metadata:
|
|||
```sh
|
||||
rm -rf ostest/
|
||||
```
|
||||
## Post Install Operations
|
||||
|
||||
## Using an External Load Balancer
|
||||
### Using an External Load Balancer
|
||||
|
||||
This documents how to shift from the internal load balancer, which is intended for internal networking needs, to an external load balancer.
|
||||
|
||||
|
@ -467,6 +488,14 @@ Another useful thing to check is that the ignition configurations are only avail
|
|||
curl https://<loadbalancer ip>:22623/config/master --insecure
|
||||
```
|
||||
|
||||
### Refreshing a CA Certificate
|
||||
|
||||
If you ran the installer with a [custom CA certificate](#self-signed-openstack-ca-certificates), then this certificate can be changed while the cluster is running. To change your certificate, edit the value of the `ca-cert.pem` key in the `cloud-provider-config` configmap with a valid PEM certificate.
|
||||
|
||||
```sh
|
||||
oc edit -n openshift-config cloud-provider-config
|
||||
```
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
Please see the [Issue Tracker][issues_openstack] for current known issues.
|
||||
|
|
28
vendor/github.com/openshift/installer/docs/user/openstack/customization.md
сгенерированный
поставляемый
28
vendor/github.com/openshift/installer/docs/user/openstack/customization.md
сгенерированный
поставляемый
|
@ -80,3 +80,31 @@ platform:
|
|||
pullSecret: '{"auths": ...}'
|
||||
sshKey: ssh-ed25519 AAAA...
|
||||
```
|
||||
|
||||
## Image Overrides
|
||||
|
||||
Normally the installer downloads the RHCOS image from a predetermined location described in [data/data/rhcos.json](/data/data/rhcos.json)). But the download URL can be overridden, notably for disconnected installations.
|
||||
|
||||
To do so and upload binary data from a custom location the user may set `clusterOSImage` parameter in the install config that points to that location, and then start the installation. In all other respects the process will be consistent with the default.
|
||||
|
||||
**NOTE:** For this to work, the parameter value must be a valid http(s) URL.
|
||||
|
||||
**NOTE:** The optional `sha256` query parameter can be attached to the URL, which will force the installer to check the image file checksum before uploading it into Glance.
|
||||
|
||||
Example:
|
||||
|
||||
```yaml
|
||||
platform:
|
||||
openstack:
|
||||
clusterOSImage: http://mirror.example.com/images/rhcos-43.81.201912131630.0-openstack.x86_64.qcow2.gz?sha256=ffebbd68e8a1f2a245ca19522c16c86f67f9ac8e4e0c1f0a812b068b16f7265d
|
||||
```
|
||||
|
||||
If the user wants to reuse an existing Glance image without any uploading of binary data, then it is possible to set `clusterOSImage` install config parameter that specifies the Glance image name. In this case no new Glance images will be created, and the image will stay when the cluster is destroyed. In other words, if `clusterOSImage` is not an http(s) URL, then the installer will look into Glance for an image with that name.
|
||||
|
||||
Example:
|
||||
|
||||
```yaml
|
||||
platform:
|
||||
openstack:
|
||||
clusterOSImage: my-rhcos
|
||||
```
|
||||
|
|
4
vendor/github.com/openshift/installer/docs/user/openstack/install_upi.md
сгенерированный
поставляемый
4
vendor/github.com/openshift/installer/docs/user/openstack/install_upi.md
сгенерированный
поставляемый
|
@ -497,7 +497,9 @@ openstack security group rule create --description "master ingress kubelet secur
|
|||
openstack security group rule create --description "master ingress kubelet secure from worker" --protocol tcp --dst-port 10250 --remote-group "$INFRA_ID-worker" "$INFRA_ID-master"
|
||||
openstack security group rule create --description "etcd" --protocol tcp --dst-port 2379:2380 --remote-group "$INFRA_ID-master" "$INFRA_ID-master"
|
||||
openstack security group rule create --description "master ingress services (TCP)" --protocol tcp --dst-port 30000:32767 --remote-group "$INFRA_ID-master" "$INFRA_ID-master"
|
||||
openstack security group rule create --description "master ingress services (TCP) from worker" --protocol tcp --dst-port 30000:32767 --remote-group "$INFRA_ID-worker" "$INFRA_ID-master"
|
||||
openstack security group rule create --description "master ingress services (UDP)" --protocol udp --dst-port 30000:32767 --remote-group "$INFRA_ID-master" "$INFRA_ID-master"
|
||||
openstack security group rule create --description "master ingress services (UDP) from worker" --protocol udp --dst-port 30000:32767 --remote-group "$INFRA_ID-worker" "$INFRA_ID-master"
|
||||
openstack security group rule create --description "VRRP" --protocol vrrp --remote-ip 192.0.2.0/24 "$INFRA_ID-master"
|
||||
```
|
||||
|
||||
|
@ -522,7 +524,9 @@ openstack security group rule create --description "worker ingress internal from
|
|||
openstack security group rule create --description "worker ingress kubelet secure" --protocol tcp --dst-port 10250 --remote-group "$INFRA_ID-worker" "$INFRA_ID-worker"
|
||||
openstack security group rule create --description "worker ingress kubelet secure from master" --protocol tcp --dst-port 10250 --remote-group "$INFRA_ID-master" "$INFRA_ID-worker"
|
||||
openstack security group rule create --description "worker ingress services (TCP)" --protocol tcp --dst-port 30000:32767 --remote-group "$INFRA_ID-worker" "$INFRA_ID-worker"
|
||||
openstack security group rule create --description "worker ingress services (TCP) from master" --protocol tcp --dst-port 30000:32767 --remote-group "$INFRA_ID-master" "$INFRA_ID-worker"
|
||||
openstack security group rule create --description "worker ingress services (UDP)" --protocol udp --dst-port 30000:32767 --remote-group "$INFRA_ID-worker" "$INFRA_ID-worker"
|
||||
openstack security group rule create --description "worker ingress services (UDP) from master" --protocol udp --dst-port 30000:32767 --remote-group "$INFRA_ID-master" "$INFRA_ID-worker"
|
||||
openstack security group rule create --description "VRRP" --protocol vrrp --remote-ip 192.0.2.0/24 "$INFRA_ID-worker"
|
||||
```
|
||||
|
||||
|
|
13
vendor/github.com/openshift/installer/docs/user/openstack/known-issues.md
сгенерированный
поставляемый
13
vendor/github.com/openshift/installer/docs/user/openstack/known-issues.md
сгенерированный
поставляемый
|
@ -10,18 +10,17 @@ If the mDNS service name of a server is too long, it will exceed the character l
|
|||
|
||||
Since the installer requires the *Name* of your external network and Red Hat Core OS image, if you have other networks or images with the same name, it will choose one randomly from the set. This is not a reliable way to run the installer. We highly recommend that you resolve this with your cluster administrator by creating unique names for your resources in openstack.
|
||||
|
||||
## Self Signed Certificates
|
||||
|
||||
Support for Certificate Bundles has been fixed in 4.3. If your OpenStack cluster uses self signed certificates, you will need to add them using the AdditionalTrustBundle field in your `install-config.yaml`. For more information on how to do this, please see the [customizations doc](../customization.md).
|
||||
|
||||
## External Network Overlap
|
||||
|
||||
If your external network's CIDR range is the same as one of the default network ranges, then you will need to change the matching network range by running the installer with a custom `install-config.yaml`. If users are experiencing unusual networking problems, please contact your cluster administrator and validate that none of your network CIDRs are overlapping with the external network. We were unfortunately unable to support validation for this due to a lack of support in gophercloud, and even if we were, it is likely that the CIDR range of the floating ip would only be accessible cluster administrators. The default network CIDR are as follows:
|
||||
|
||||
```txt
|
||||
machineCIDR: 10.0.0.0/16
|
||||
serviceNetwork: 172.30.0.0/16
|
||||
clusterNetwork: 10.128.0.0/14
|
||||
machineNetwork:
|
||||
- cidr: "10.0.0.0/16"
|
||||
serviceNetwork:
|
||||
- "172.30.0.0/16"
|
||||
clusterNetwork:
|
||||
- cidr: "10.128.0.0/14"
|
||||
```
|
||||
|
||||
## Lack of default DNS servers on created subnets
|
||||
|
|
34
vendor/github.com/openshift/installer/hack/update-rhcos-bootimage.py
сгенерированный
поставляемый
34
vendor/github.com/openshift/installer/hack/update-rhcos-bootimage.py
сгенерированный
поставляемый
|
@ -1,5 +1,5 @@
|
|||
#!/usr/bin/python3
|
||||
# Usage: ./hack/update-rhcos-bootimage.py https://releases-art-rhcos.svc.ci.openshift.org/storage/releases/ootpa/410.8.20190401.0/meta.json
|
||||
#!/usr/bin/env python3
|
||||
# Usage: ./hack/update-rhcos-bootimage.py https://releases-art-rhcos.svc.ci.openshift.org/storage/releases/ootpa/410.8.20190401.0/meta.json amd64
|
||||
import codecs,os,sys,json,argparse
|
||||
import urllib.parse
|
||||
import urllib.request
|
||||
|
@ -8,12 +8,13 @@ import urllib.request
|
|||
# builds. Do not try to e.g. point to RHT-internal endpoints.
|
||||
RHCOS_RELEASES_APP = 'https://releases-art-rhcos.svc.ci.openshift.org'
|
||||
|
||||
dn = os.path.abspath(os.path.dirname(sys.argv[0]))
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("meta", action='store')
|
||||
parser.add_argument("arch", action='store', choices=['amd64', 's390x', 'ppc64le'])
|
||||
args = parser.parse_args()
|
||||
|
||||
metadata_dir = os.path.join(os.path.dirname(sys.argv[0]), "../data/data")
|
||||
|
||||
if not args.meta.startswith(RHCOS_RELEASES_APP):
|
||||
raise SystemExit("URL must start with: " + RHCOS_RELEASES_APP)
|
||||
|
||||
|
@ -24,13 +25,24 @@ newmeta = {}
|
|||
for k in ['images', 'buildid', 'oscontainer',
|
||||
'ostree-commit', 'ostree-version',
|
||||
'azure', 'gcp']:
|
||||
newmeta[k] = meta[k]
|
||||
newmeta['amis'] = {
|
||||
entry['name']: {
|
||||
'hvm': entry['hvm'],
|
||||
if meta.get(k):
|
||||
newmeta[k] = meta[k]
|
||||
if meta.get(k):
|
||||
newmeta['amis'] = {
|
||||
entry['name']: {
|
||||
'hvm': entry['hvm'],
|
||||
}
|
||||
for entry in meta['amis']
|
||||
}
|
||||
for entry in meta['amis']
|
||||
}
|
||||
newmeta['baseURI'] = urllib.parse.urljoin(args.meta, '.')
|
||||
with open(os.path.join(dn, "../data/data/rhcos.json"), 'w') as f:
|
||||
|
||||
with open(os.path.join(metadata_dir, f"rhcos-{args.arch}.json"), 'w') as f:
|
||||
json.dump(newmeta, f, sort_keys=True, indent=4)
|
||||
|
||||
# Continue to populate the legacy metadata file because there are still
|
||||
# processes consuming this file directly. This normally could just be a symlink
|
||||
# but some of these processes reference raw.githubusercontent.com which doesn't
|
||||
# follow symlinks.
|
||||
if args.arch == 'amd64':
|
||||
with open(os.path.join(metadata_dir, "rhcos.json"), 'w') as f:
|
||||
json.dump(newmeta, f, sort_keys=True, indent=4)
|
||||
|
|
31
vendor/github.com/openshift/installer/images/libvirt/Dockerfile.ci
сгенерированный
поставляемый
Normal file
31
vendor/github.com/openshift/installer/images/libvirt/Dockerfile.ci
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,31 @@
|
|||
# This Dockerfile is a used by CI to publish an installer image for creating libvirt clusters
|
||||
# It builds an image containing openshift-install and nss-wrapper for remote deployments, as well as the google cloud-sdk for nested GCE environments.
|
||||
|
||||
FROM registry.svc.ci.openshift.org/openshift/release:golang-1.12 AS builder
|
||||
RUN yum install -y libvirt-devel && \
|
||||
yum clean all && rm -rf /var/cache/yum/*
|
||||
WORKDIR /go/src/github.com/openshift/installer
|
||||
COPY . .
|
||||
RUN TAGS="libvirt" hack/build.sh
|
||||
|
||||
FROM centos:7
|
||||
COPY --from=builder /go/src/github.com/openshift/installer/bin/openshift-install /bin/openshift-install
|
||||
COPY --from=builder /go/src/github.com/openshift/installer/images/libvirt/mock-nss.sh /bin/mock-nss.sh
|
||||
COPY --from=builder /go/src/github.com/openshift/installer/images/libvirt/google-cloud-sdk.repo /etc/yum.repos.d/google-cloud-sdk.repo
|
||||
|
||||
RUN yum update -y && \
|
||||
yum install --setopt=tsflags=nodocs -y \
|
||||
genisoimage \
|
||||
gettext \
|
||||
google-cloud-sdk \
|
||||
libvirt-client \
|
||||
libvirt-libs \
|
||||
nss_wrapper \
|
||||
openssh-clients && \
|
||||
yum clean all && rm -rf /var/cache/yum/*
|
||||
|
||||
RUN mkdir /output && chown 1000:1000 /output
|
||||
USER 1000:1000
|
||||
ENV PATH /bin
|
||||
ENV HOME /output
|
||||
WORKDIR /output
|
53
vendor/github.com/openshift/installer/images/libvirt/README.md
сгенерированный
поставляемый
Normal file
53
vendor/github.com/openshift/installer/images/libvirt/README.md
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,53 @@
|
|||
# Libvirt Installer for CI
|
||||
|
||||
This image enables launching a libvirt cluster for CI testing through two primary mechanisms:
|
||||
1. Targeting a libvirt service running on a remote host
|
||||
2. Launching a libvirt VM nested in a GCE instance
|
||||
|
||||
This image contains [`nss_wrapper`](https://cwrap.org/nss_wrapper.html) to execute `ssh` commands as
|
||||
a mock user to interact with the remote libvirt API or GCE instance from an OpenShift container.
|
||||
|
||||
OpenShift containers run with an arbitrary uid, but SSH requires a valid user. `nss_wrapper`
|
||||
allows for the container's user ID to be mapped to a username inside of a container.
|
||||
|
||||
### Example Usage
|
||||
|
||||
You can override the container's current user ID and group ID by providing `NSS_WRAPPER_GROUP`
|
||||
and `NSS_WRAPPER_PASSWD` for the mock files, as well as `NSS_USERNAME`, `NSS_UID`, `NSS_GROUPNAME`,
|
||||
and/or `NSS_GID`. In OpenShift CI, `NSS_USERNAME` and `NSS_GROUPNAME` are set.
|
||||
The random UID assigned to the container is the UID that the mock username is mapped to.
|
||||
|
||||
```console
|
||||
$ podman run --rm \
|
||||
> -e NSS_WRAPPER_GROUP=/tmp/group \
|
||||
> -e NSS_WRAPPER_PASSWD=/tmp/passwd \
|
||||
> -e NSS_UID=1000 \
|
||||
> -e NSS_GID=1000 \
|
||||
> -e NSS_USERNAME=testuser \
|
||||
> -e NSS_GROUPNAME=testuser \
|
||||
> nss_wrapper_img mock-nss.sh id testuser
|
||||
uid=1000(testuser) gid=1000(testuser) groups=1000(testuser)
|
||||
```
|
||||
|
||||
Or, in an OpenShift container:
|
||||
|
||||
```yaml
|
||||
containers:
|
||||
- name: setup
|
||||
image: nss-wrapper-image
|
||||
env:
|
||||
- name: NSS_WRAPPER_PASSWD
|
||||
value: /tmp/passwd
|
||||
- name: NSS_WRAPPER_GROUP
|
||||
value: /tmp/group
|
||||
- name: NSS_USERNAME
|
||||
value: mockuser
|
||||
- name: NSS_GROUPNAME
|
||||
value: mockuser
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
#!/bin/sh
|
||||
mock-nss.sh openshift-install <args>
|
||||
```
|
8
vendor/github.com/openshift/installer/images/libvirt/google-cloud-sdk.repo
сгенерированный
поставляемый
Normal file
8
vendor/github.com/openshift/installer/images/libvirt/google-cloud-sdk.repo
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,8 @@
|
|||
[google-cloud-sdk]
|
||||
name=Google Cloud SDK
|
||||
baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el7-x86_64
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
repo_gpgcheck=1
|
||||
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg
|
||||
https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
|
18
vendor/github.com/openshift/installer/images/libvirt/mock-nss.sh
сгенерированный
поставляемый
Executable file
18
vendor/github.com/openshift/installer/images/libvirt/mock-nss.sh
сгенерированный
поставляемый
Executable file
|
@ -0,0 +1,18 @@
|
|||
#!/bin/bash
|
||||
|
||||
# mock passwd and group files
|
||||
(
|
||||
exec 2>/dev/null
|
||||
username="${NSS_USERNAME:-$(id -un)}"
|
||||
uid="${NSS_UID:-$(id -u)}"
|
||||
|
||||
groupname="${NSS_GROUPNAME:-$(id -gn)}"
|
||||
gid="${NSS_GID:-$(id -g)}"
|
||||
|
||||
echo "${username}:x:${uid}:${uid}:gecos:${HOME}:/bin/bash" > "${NSS_WRAPPER_PASSWD}"
|
||||
echo "${groupname}:x:${gid}:" > "${NSS_WRAPPER_GROUP}"
|
||||
)
|
||||
|
||||
# wrap command
|
||||
export LD_PRELOAD=/usr/lib64/libnss_wrapper.so
|
||||
exec "$@"
|
|
@ -4,9 +4,12 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
igntypes "github.com/coreos/ignition/config/v2_2/types"
|
||||
ospclientconfig "github.com/gophercloud/utils/openstack/clientconfig"
|
||||
gcpprovider "github.com/openshift/cluster-api-provider-gcp/pkg/apis/gcpprovider/v1beta1"
|
||||
libvirtprovider "github.com/openshift/cluster-api-provider-libvirt/pkg/apis/libvirtproviderconfig/v1beta1"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -23,6 +26,7 @@ import (
|
|||
azureconfig "github.com/openshift/installer/pkg/asset/installconfig/azure"
|
||||
gcpconfig "github.com/openshift/installer/pkg/asset/installconfig/gcp"
|
||||
"github.com/openshift/installer/pkg/asset/machines"
|
||||
"github.com/openshift/installer/pkg/asset/openshiftinstall"
|
||||
"github.com/openshift/installer/pkg/asset/rhcos"
|
||||
"github.com/openshift/installer/pkg/tfvars"
|
||||
awstfvars "github.com/openshift/installer/pkg/tfvars/aws"
|
||||
|
@ -41,7 +45,6 @@ import (
|
|||
"github.com/openshift/installer/pkg/types/openstack"
|
||||
openstackdefaults "github.com/openshift/installer/pkg/types/openstack/defaults"
|
||||
"github.com/openshift/installer/pkg/types/vsphere"
|
||||
"github.com/openshift/installer/pkg/version"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -111,12 +114,23 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error {
|
|||
return errors.Wrap(err, "unable to inject installation info")
|
||||
}
|
||||
|
||||
var useIPv4, useIPv6 bool
|
||||
for _, network := range installConfig.Config.Networking.ServiceNetwork {
|
||||
if network.IP.To4() != nil {
|
||||
useIPv4 = true
|
||||
} else {
|
||||
useIPv6 = true
|
||||
}
|
||||
}
|
||||
|
||||
masterCount := len(mastersAsset.MachineFiles)
|
||||
data, err := tfvars.TFVars(
|
||||
clusterID.InfraID,
|
||||
installConfig.Config.ClusterDomain(),
|
||||
installConfig.Config.BaseDomain,
|
||||
&installConfig.Config.Networking.MachineCIDR.IPNet,
|
||||
&installConfig.Config.Networking.MachineNetwork[0].CIDR.IPNet,
|
||||
useIPv4,
|
||||
useIPv6,
|
||||
bootstrapIgn,
|
||||
masterIgn,
|
||||
masterCount,
|
||||
|
@ -217,6 +231,19 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error {
|
|||
for i, w := range workers {
|
||||
workerConfigs[i] = w.Spec.Template.Spec.ProviderSpec.Value.Object.(*azureprovider.AzureMachineProviderSpec)
|
||||
}
|
||||
|
||||
var (
|
||||
machineV4CIDRs []net.IPNet
|
||||
machineV6CIDRs []net.IPNet
|
||||
)
|
||||
for _, network := range installConfig.Config.Networking.MachineNetwork {
|
||||
if network.CIDR.IPNet.IP.To4() != nil {
|
||||
machineV4CIDRs = append(machineV4CIDRs, network.CIDR.IPNet)
|
||||
} else {
|
||||
machineV6CIDRs = append(machineV6CIDRs, network.CIDR.IPNet)
|
||||
}
|
||||
}
|
||||
|
||||
preexistingnetwork := installConfig.Config.Azure.VirtualNetwork != ""
|
||||
data, err := azuretfvars.TFVars(
|
||||
azuretfvars.TFVarsSources{
|
||||
|
@ -227,6 +254,8 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error {
|
|||
ImageURL: string(*rhcosImage),
|
||||
PreexistingNetwork: preexistingnetwork,
|
||||
Publish: installConfig.Config.Publish,
|
||||
MachineV4CIDRs: machineV4CIDRs,
|
||||
MachineV6CIDRs: machineV6CIDRs,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -294,7 +323,7 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error {
|
|||
data, err = libvirttfvars.TFVars(
|
||||
masters[0].Spec.ProviderSpec.Value.Object.(*libvirtprovider.LibvirtMachineProviderConfig),
|
||||
string(*rhcosImage),
|
||||
&installConfig.Config.Networking.MachineCIDR.IPNet,
|
||||
&installConfig.Config.Networking.MachineNetwork[0].CIDR.IPNet,
|
||||
installConfig.Config.Platform.Libvirt.Network.IfName,
|
||||
masterCount,
|
||||
)
|
||||
|
@ -306,6 +335,22 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error {
|
|||
Data: data,
|
||||
})
|
||||
case openstack.Name:
|
||||
opts := &ospclientconfig.ClientOpts{}
|
||||
opts.Cloud = installConfig.Config.Platform.OpenStack.Cloud
|
||||
cloud, err := ospclientconfig.GetCloudFromYAML(opts)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get cloud config for openstack")
|
||||
}
|
||||
var caCert string
|
||||
// Get the ca-cert-bundle key if there is a value for cacert in clouds.yaml
|
||||
if caPath := cloud.CACertFile; caPath != "" {
|
||||
caFile, err := ioutil.ReadFile(caPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read clouds.yaml ca-cert from disk")
|
||||
}
|
||||
caCert = string(caFile)
|
||||
}
|
||||
|
||||
masters, err := mastersAsset.Machines()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -335,7 +380,7 @@ func (t *TerraformVariables) Generate(parents asset.Parents) error {
|
|||
installConfig.Config.Platform.OpenStack.OctaviaSupport,
|
||||
string(*rhcosImage),
|
||||
clusterID.InfraID,
|
||||
installConfig.Config.AdditionalTrustBundle,
|
||||
caCert,
|
||||
bootstrapIgn,
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -402,21 +447,12 @@ func injectInstallInfo(bootstrap []byte) (string, error) {
|
|||
return "", errors.Wrap(err, "failed to unmarshal bootstrap Ignition config")
|
||||
}
|
||||
|
||||
invoker := "user"
|
||||
if env := os.Getenv("OPENSHIFT_INSTALL_INVOKER"); env != "" {
|
||||
invoker = env
|
||||
cm, err := openshiftinstall.CreateInstallConfigMap("openshift-install")
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to generate openshift-install config")
|
||||
}
|
||||
|
||||
config.Storage.Files = append(config.Storage.Files, ignition.FileFromString("/opt/openshift/manifests/openshift-install.yml", "root", 0644, fmt.Sprintf(`---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: openshift-install
|
||||
namespace: openshift-config
|
||||
data:
|
||||
version: "%s"
|
||||
invoker: "%s"
|
||||
`, version.Raw, invoker)))
|
||||
config.Storage.Files = append(config.Storage.Files, ignition.FileFromString("/opt/openshift/manifests/openshift-install.yaml", "root", 0644, cm))
|
||||
|
||||
ign, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
|
|
3
vendor/github.com/openshift/installer/pkg/asset/ignition/bootstrap/bootstrap.go
сгенерированный
поставляемый
3
vendor/github.com/openshift/installer/pkg/asset/ignition/bootstrap/bootstrap.go
сгенерированный
поставляемый
|
@ -272,10 +272,11 @@ func (a *Bootstrap) addStorageFiles(base string, uri string, templateData *boots
|
|||
}
|
||||
|
||||
filename := path.Base(uri)
|
||||
parentDir := path.Base(path.Dir(uri))
|
||||
|
||||
var mode int
|
||||
appendToFile := false
|
||||
if path.Base(path.Dir(uri)) == "bin" {
|
||||
if parentDir == "bin" || parentDir == "dispatcher.d" {
|
||||
mode = 0555
|
||||
} else if filename == "motd" {
|
||||
mode = 0644
|
||||
|
|
5
vendor/github.com/openshift/installer/pkg/asset/ignition/machine/node.go
сгенерированный
поставляемый
5
vendor/github.com/openshift/installer/pkg/asset/ignition/machine/node.go
сгенерированный
поставляемый
|
@ -2,6 +2,7 @@ package machine
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
|
||||
ignition "github.com/coreos/ignition/config/v2_2/types"
|
||||
|
@ -21,11 +22,11 @@ func pointerIgnitionConfig(installConfig *types.InstallConfig, rootCA []byte, ro
|
|||
case baremetaltypes.Name:
|
||||
// Baremetal needs to point directly at the VIP because we don't have a
|
||||
// way to configure DNS before Ignition runs.
|
||||
ignitionHost = fmt.Sprintf("%s:22623", installConfig.BareMetal.APIVIP)
|
||||
ignitionHost = net.JoinHostPort(installConfig.BareMetal.APIVIP, "22623")
|
||||
case openstacktypes.Name:
|
||||
apiVIP, err := openstackdefaults.APIVIP(installConfig.Networking)
|
||||
if err == nil {
|
||||
ignitionHost = fmt.Sprintf("%s:22623", apiVIP.String())
|
||||
ignitionHost = net.JoinHostPort(apiVIP.String(), "22623")
|
||||
} else {
|
||||
ignitionHost = fmt.Sprintf("api-int.%s:22623", installConfig.ClusterDomain())
|
||||
}
|
||||
|
|
10
vendor/github.com/openshift/installer/pkg/asset/installconfig/aws/permissions.go
сгенерированный
поставляемый
10
vendor/github.com/openshift/installer/pkg/asset/installconfig/aws/permissions.go
сгенерированный
поставляемый
|
@ -217,7 +217,7 @@ var permissions = map[PermissionGroup][]string{
|
|||
// are sufficient to perform an installation, and that they can be used for cluster runtime
|
||||
// as either capable of creating new credentials for components that interact with the cloud or
|
||||
// being able to be passed through as-is to the components that need cloud credentials
|
||||
func ValidateCreds(ssn *session.Session, groups []PermissionGroup) error {
|
||||
func ValidateCreds(ssn *session.Session, groups []PermissionGroup, region string) error {
|
||||
// Compile a list of permissions based on the permission groups provided
|
||||
requiredPermissions := []string{}
|
||||
for _, group := range groups {
|
||||
|
@ -238,9 +238,13 @@ func ValidateCreds(ssn *session.Session, groups []PermissionGroup) error {
|
|||
return errors.Wrap(err, "initialize cloud-credentials client")
|
||||
}
|
||||
|
||||
sParams := &ccaws.SimulateParams{
|
||||
Region: region,
|
||||
}
|
||||
|
||||
// Check whether we can do an installation
|
||||
logger := logrus.StandardLogger()
|
||||
canInstall, err := ccaws.CheckPermissionsAgainstActions(client, requiredPermissions, logger)
|
||||
canInstall, err := ccaws.CheckPermissionsAgainstActions(client, requiredPermissions, sParams, logger)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "checking install permissions")
|
||||
}
|
||||
|
@ -259,7 +263,7 @@ func ValidateCreds(ssn *session.Session, groups []PermissionGroup) error {
|
|||
|
||||
// Check whether we can use the current credentials in passthrough mode to satisfy
|
||||
// cluster services needing to interact with the cloud
|
||||
canPassthrough, err := ccaws.CheckCloudCredPassthrough(client, logger)
|
||||
canPassthrough, err := ccaws.CheckCloudCredPassthrough(client, sParams, logger)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "passthrough credentials check")
|
||||
}
|
||||
|
|
23
vendor/github.com/openshift/installer/pkg/asset/installconfig/aws/validation.go
сгенерированный
поставляемый
23
vendor/github.com/openshift/installer/pkg/asset/installconfig/aws/validation.go
сгенерированный
поставляемый
|
@ -10,7 +10,6 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
|
||||
"github.com/openshift/installer/pkg/ipnet"
|
||||
"github.com/openshift/installer/pkg/types"
|
||||
awstypes "github.com/openshift/installer/pkg/types/aws"
|
||||
)
|
||||
|
@ -18,6 +17,7 @@ import (
|
|||
// Validate executes platform-specific validation.
|
||||
func Validate(ctx context.Context, meta *Metadata, config *types.InstallConfig) error {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if config.Platform.AWS == nil {
|
||||
return errors.New(field.Required(field.NewPath("platform", "aws"), "AWS validation requires an AWS platform configuration").Error())
|
||||
}
|
||||
|
@ -73,8 +73,8 @@ func validateSubnets(ctx context.Context, meta *Metadata, fldPath *field.Path, s
|
|||
}
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, validateSubnetCIDR(fldPath, privateSubnets, privateSubnetsIdx, networking.MachineCIDR)...)
|
||||
allErrs = append(allErrs, validateSubnetCIDR(fldPath, publicSubnets, publicSubnetsIdx, networking.MachineCIDR)...)
|
||||
allErrs = append(allErrs, validateSubnetCIDR(fldPath, privateSubnets, privateSubnetsIdx, networking.MachineNetwork)...)
|
||||
allErrs = append(allErrs, validateSubnetCIDR(fldPath, publicSubnets, publicSubnetsIdx, networking.MachineNetwork)...)
|
||||
allErrs = append(allErrs, validateDuplicateSubnetZones(fldPath, privateSubnets, privateSubnetsIdx, "private")...)
|
||||
allErrs = append(allErrs, validateDuplicateSubnetZones(fldPath, publicSubnets, publicSubnetsIdx, "public")...)
|
||||
|
||||
|
@ -122,7 +122,7 @@ func validateMachinePool(ctx context.Context, meta *Metadata, fldPath *field.Pat
|
|||
return allErrs
|
||||
}
|
||||
|
||||
func validateSubnetCIDR(fldPath *field.Path, subnets map[string]Subnet, idxMap map[string]int, machineCIDR *ipnet.IPNet) field.ErrorList {
|
||||
func validateSubnetCIDR(fldPath *field.Path, subnets map[string]Subnet, idxMap map[string]int, networks []types.MachineNetworkEntry) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
for id, v := range subnets {
|
||||
fp := fldPath.Index(idxMap[id])
|
||||
|
@ -131,15 +131,20 @@ func validateSubnetCIDR(fldPath *field.Path, subnets map[string]Subnet, idxMap m
|
|||
allErrs = append(allErrs, field.Invalid(fp, id, err.Error()))
|
||||
continue
|
||||
}
|
||||
if !machineCIDR.Contains(cidr) {
|
||||
errMsg := fmt.Sprintf("CIDR range %s is outside of the MachineCIDR %s", v.CIDR, machineCIDR)
|
||||
allErrs = append(allErrs, field.Invalid(fp, id, errMsg))
|
||||
continue
|
||||
}
|
||||
allErrs = append(allErrs, validateMachineNetworksContainIP(fp, networks, id, cidr)...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateMachineNetworksContainIP(fldPath *field.Path, networks []types.MachineNetworkEntry, subnetName string, ip net.IP) field.ErrorList {
|
||||
for _, network := range networks {
|
||||
if network.CIDR.Contains(ip) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return field.ErrorList{field.Invalid(fldPath, subnetName, fmt.Sprintf("subnet's CIDR range start %s is outside of the specified machine networks", ip))}
|
||||
}
|
||||
|
||||
func validateDuplicateSubnetZones(fldPath *field.Path, subnets map[string]Subnet, idxMap map[string]int, typ string) field.ErrorList {
|
||||
var keys []string
|
||||
for id := range subnets {
|
||||
|
|
42
vendor/github.com/openshift/installer/pkg/asset/installconfig/azure/azure.go
сгенерированный
поставляемый
42
vendor/github.com/openshift/installer/pkg/asset/installconfig/azure/azure.go
сгенерированный
поставляемый
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
survey "gopkg.in/AlecAivazis/survey.v1"
|
||||
|
||||
azres "github.com/Azure/azure-sdk-for-go/profiles/latest/resources/mgmt/resources"
|
||||
azsub "github.com/Azure/azure-sdk-for-go/profiles/latest/resources/mgmt/subscriptions"
|
||||
)
|
||||
|
||||
|
@ -31,12 +32,24 @@ func Platform(credentials *Credentials) (*azure.Platform, error) {
|
|||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get list of regions")
|
||||
}
|
||||
|
||||
resourceCapableRegions, err := getResourceCapableRegions(credentials)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get list of resources to check available regions")
|
||||
}
|
||||
|
||||
longRegions := make([]string, 0, len(regions))
|
||||
shortRegions := make([]string, 0, len(regions))
|
||||
for id, location := range regions {
|
||||
longRegions = append(longRegions, fmt.Sprintf("%s (%s)", id, location))
|
||||
shortRegions = append(shortRegions, id)
|
||||
for _, resourceCapableRegion := range resourceCapableRegions {
|
||||
// filter our regions not capable of having resources created (we check for resource groups)
|
||||
if resourceCapableRegion == location {
|
||||
longRegions = append(longRegions, fmt.Sprintf("%s (%s)", id, location))
|
||||
shortRegions = append(shortRegions, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
regionTransform := survey.TransformString(func(s string) string {
|
||||
return strings.SplitN(s, " ", 2)[0]
|
||||
})
|
||||
|
@ -119,3 +132,28 @@ func getRegions(credentials *Credentials) (map[string]string, error) {
|
|||
}
|
||||
return allLocations, nil
|
||||
}
|
||||
|
||||
func getResourceCapableRegions(credentials *Credentials) ([]string, error) {
|
||||
session, err := GetSession(credentials)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := azres.NewProvidersClient(session.Credentials.SubscriptionID)
|
||||
client.Authorizer = session.Authorizer
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
provider, err := client.Get(ctx, "Microsoft.Resources", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, resType := range *provider.ResourceTypes {
|
||||
if *resType.ResourceType == "resourceGroups" {
|
||||
return *resType.Locations, nil
|
||||
}
|
||||
}
|
||||
|
||||
return []string{}, nil
|
||||
}
|
||||
|
|
29
vendor/github.com/openshift/installer/pkg/asset/installconfig/azure/validation.go
сгенерированный
поставляемый
29
vendor/github.com/openshift/installer/pkg/asset/installconfig/azure/validation.go
сгенерированный
поставляемый
|
@ -6,7 +6,6 @@ import (
|
|||
"net"
|
||||
|
||||
aznetwork "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2018-12-01/network"
|
||||
"github.com/openshift/installer/pkg/ipnet"
|
||||
aztypes "github.com/openshift/installer/pkg/types/azure"
|
||||
|
||||
"github.com/openshift/installer/pkg/types"
|
||||
|
@ -16,12 +15,13 @@ import (
|
|||
// Validate executes platform-specific validation.
|
||||
func Validate(client API, ic *types.InstallConfig) error {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, validateNetworks(client, ic.Azure, ic.Networking.MachineCIDR, field.NewPath("platform").Child("azure"))...)
|
||||
|
||||
allErrs = append(allErrs, validateNetworks(client, ic.Azure, ic.Networking.MachineNetwork, field.NewPath("platform").Child("azure"))...)
|
||||
return allErrs.ToAggregate()
|
||||
}
|
||||
|
||||
// validateNetworks checks that the user-provided VNet and subnets are valid.
|
||||
func validateNetworks(client API, p *aztypes.Platform, machineCIDR *ipnet.IPNet, fieldPath *field.Path) field.ErrorList {
|
||||
func validateNetworks(client API, p *aztypes.Platform, machineNetworks []types.MachineNetworkEntry, fieldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if p.VirtualNetwork != "" {
|
||||
|
@ -35,21 +35,21 @@ func validateNetworks(client API, p *aztypes.Platform, machineCIDR *ipnet.IPNet,
|
|||
return append(allErrs, field.Invalid(fieldPath.Child("computeSubnet"), p.ComputeSubnet, "failed to retrieve compute subnet"))
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, validateSubnet(client, machineCIDR, fieldPath.Child("computeSubnet"), computeSubnet, p.ComputeSubnet)...)
|
||||
allErrs = append(allErrs, validateSubnet(client, fieldPath.Child("computeSubnet"), computeSubnet, p.ComputeSubnet, machineNetworks)...)
|
||||
|
||||
controlPlaneSubnet, err := client.GetControlPlaneSubnet(context.TODO(), p.NetworkResourceGroupName, p.VirtualNetwork, p.ControlPlaneSubnet)
|
||||
if err != nil {
|
||||
return append(allErrs, field.Invalid(fieldPath.Child("controlPlaneSubnet"), p.ControlPlaneSubnet, "failed to retrieve control plane subnet"))
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, validateSubnet(client, machineCIDR, fieldPath.Child("controlPlaneSubnet"), controlPlaneSubnet, p.ControlPlaneSubnet)...)
|
||||
allErrs = append(allErrs, validateSubnet(client, fieldPath.Child("controlPlaneSubnet"), controlPlaneSubnet, p.ControlPlaneSubnet, machineNetworks)...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateSubnet checks that the subnet is in the same network as the machine CIDR
|
||||
func validateSubnet(client API, machineCIDR *ipnet.IPNet, fieldPath *field.Path, subnet *aznetwork.Subnet, subnetName string) field.ErrorList {
|
||||
func validateSubnet(client API, fieldPath *field.Path, subnet *aznetwork.Subnet, subnetName string, networks []types.MachineNetworkEntry) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
subnetIP, _, err := net.ParseCIDR(*subnet.AddressPrefix)
|
||||
|
@ -57,10 +57,15 @@ func validateSubnet(client API, machineCIDR *ipnet.IPNet, fieldPath *field.Path,
|
|||
return append(allErrs, field.Invalid(fieldPath, subnetName, "unable to parse subnet CIDR"))
|
||||
}
|
||||
|
||||
if !machineCIDR.Contains(subnetIP) {
|
||||
errMsg := fmt.Sprintf("subnet %v has an IP address range %v outside of the MachineCIDR %v", subnetName, subnet.AddressPrefix, machineCIDR)
|
||||
return append(allErrs, field.Invalid(fieldPath, subnetName, errMsg))
|
||||
}
|
||||
|
||||
return nil
|
||||
allErrs = append(allErrs, validateMachineNetworksContainIP(fieldPath, networks, *subnet.Name, subnetIP)...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateMachineNetworksContainIP(fldPath *field.Path, networks []types.MachineNetworkEntry, subnetName string, ip net.IP) field.ErrorList {
|
||||
for _, network := range networks {
|
||||
if network.CIDR.Contains(ip) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return field.ErrorList{field.Invalid(fldPath, subnetName, fmt.Sprintf("subnet %s address prefix is outside of the specified machine networks", ip))}
|
||||
}
|
||||
|
|
6
vendor/github.com/openshift/installer/pkg/asset/installconfig/clustername.go
сгенерированный
поставляемый
6
vendor/github.com/openshift/installer/pkg/asset/installconfig/clustername.go
сгенерированный
поставляемый
|
@ -4,7 +4,6 @@ import (
|
|||
survey "gopkg.in/AlecAivazis/survey.v1"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
gcpvalidation "github.com/openshift/installer/pkg/types/gcp/validation"
|
||||
"github.com/openshift/installer/pkg/types/validation"
|
||||
"github.com/openshift/installer/pkg/validate"
|
||||
)
|
||||
|
@ -32,8 +31,11 @@ func (a *clusterName) Generate(parents asset.Parents) error {
|
|||
validator := survey.ComposeValidators(survey.Required, func(ans interface{}) error {
|
||||
return validate.DomainName(validation.ClusterDomain(bd.BaseDomain, ans.(string)), false)
|
||||
})
|
||||
|
||||
if platform.GCP != nil {
|
||||
validator = survey.ComposeValidators(validator, func(ans interface{}) error { return gcpvalidation.ValidateClusterName(ans.(string)) })
|
||||
validator = survey.ComposeValidators(validator, func(ans interface{}) error {
|
||||
return validate.ClusterName1035(ans.(string))
|
||||
})
|
||||
}
|
||||
|
||||
return survey.Ask([]*survey.Question{
|
||||
|
|
17
vendor/github.com/openshift/installer/pkg/asset/installconfig/gcp/validation.go
сгенерированный
поставляемый
17
vendor/github.com/openshift/installer/pkg/asset/installconfig/gcp/validation.go
сгенерированный
поставляемый
|
@ -44,7 +44,6 @@ func validateNetworks(client API, ic *types.InstallConfig, fieldPath *field.Path
|
|||
|
||||
func validateSubnet(client API, ic *types.InstallConfig, fieldPath *field.Path, subnets []*compute.Subnetwork, name string) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
machineCIDR := ic.Networking.MachineCIDR
|
||||
|
||||
subnet, errMsg := findSubnet(subnets, name, ic.GCP.Network, ic.GCP.Region)
|
||||
if subnet == nil {
|
||||
|
@ -56,11 +55,8 @@ func validateSubnet(client API, ic *types.InstallConfig, fieldPath *field.Path,
|
|||
return append(allErrs, field.Invalid(fieldPath, name, "unable to parse subnet CIDR"))
|
||||
}
|
||||
|
||||
if !machineCIDR.Contains(subnetIP) {
|
||||
errMsg := fmt.Sprintf("subnet %v has an IP address range %v outside of the MachineCIDR %v", name, subnet.IpCidrRange, machineCIDR)
|
||||
return append(allErrs, field.Invalid(fieldPath, name, errMsg))
|
||||
}
|
||||
return nil
|
||||
allErrs = append(allErrs, validateMachineNetworksContainIP(fieldPath, ic.Networking.MachineNetwork, name, subnetIP)...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// findSubnet checks that the subnets are in the provided VPC and region.
|
||||
|
@ -72,3 +68,12 @@ func findSubnet(subnets []*compute.Subnetwork, userSubnet, network, region strin
|
|||
}
|
||||
return nil, fmt.Sprintf("could not find subnet %s in network %s and region %s", userSubnet, network, region)
|
||||
}
|
||||
|
||||
func validateMachineNetworksContainIP(fldPath *field.Path, networks []types.MachineNetworkEntry, subnetName string, ip net.IP) field.ErrorList {
|
||||
for _, network := range networks {
|
||||
if network.CIDR.Contains(ip) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return field.ErrorList{field.Invalid(fldPath, subnetName, fmt.Sprintf("subnet CIDR range start %s is outside of the specified machine networks", ip))}
|
||||
}
|
||||
|
|
13
vendor/github.com/openshift/installer/pkg/asset/installconfig/platformcredscheck.go
сгенерированный
поставляемый
13
vendor/github.com/openshift/installer/pkg/asset/installconfig/platformcredscheck.go
сгенерированный
поставляемый
|
@ -8,7 +8,6 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
awsconfig "github.com/openshift/installer/pkg/asset/installconfig/aws"
|
||||
azureconfig "github.com/openshift/installer/pkg/asset/installconfig/azure"
|
||||
gcpconfig "github.com/openshift/installer/pkg/asset/installconfig/gcp"
|
||||
"github.com/openshift/installer/pkg/types/aws"
|
||||
|
@ -47,20 +46,10 @@ func (a *PlatformCredsCheck) Generate(dependencies asset.Parents) error {
|
|||
platform := ic.Config.Platform.Name()
|
||||
switch platform {
|
||||
case aws.Name:
|
||||
permissionGroups := []awsconfig.PermissionGroup{awsconfig.PermissionCreateBase, awsconfig.PermissionDeleteBase}
|
||||
// If subnets are not provided in install-config.yaml, include network permissions
|
||||
if len(ic.Config.AWS.Subnets) == 0 {
|
||||
permissionGroups = append(permissionGroups, awsconfig.PermissionCreateNetworking, awsconfig.PermissionDeleteNetworking)
|
||||
}
|
||||
|
||||
ssn, err := ic.AWS.Session(ctx)
|
||||
_, err := ic.AWS.Session(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = awsconfig.ValidateCreds(ssn, permissionGroups)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "validate AWS credentials")
|
||||
}
|
||||
case gcp.Name:
|
||||
_, err = gcpconfig.GetSession(ctx)
|
||||
if err != nil {
|
||||
|
|
71
vendor/github.com/openshift/installer/pkg/asset/installconfig/platformpermscheck.go
сгенерированный
поставляемый
Normal file
71
vendor/github.com/openshift/installer/pkg/asset/installconfig/platformpermscheck.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,71 @@
|
|||
package installconfig
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
awsconfig "github.com/openshift/installer/pkg/asset/installconfig/aws"
|
||||
"github.com/openshift/installer/pkg/types/aws"
|
||||
"github.com/openshift/installer/pkg/types/azure"
|
||||
"github.com/openshift/installer/pkg/types/baremetal"
|
||||
"github.com/openshift/installer/pkg/types/gcp"
|
||||
"github.com/openshift/installer/pkg/types/libvirt"
|
||||
"github.com/openshift/installer/pkg/types/none"
|
||||
"github.com/openshift/installer/pkg/types/openstack"
|
||||
"github.com/openshift/installer/pkg/types/vsphere"
|
||||
)
|
||||
|
||||
// PlatformPermsCheck is an asset that checks platform credentials for the necessary permissions
|
||||
// to create a cluster.
|
||||
type PlatformPermsCheck struct {
|
||||
}
|
||||
|
||||
var _ asset.Asset = (*PlatformPermsCheck)(nil)
|
||||
|
||||
// Dependencies returns the dependencies for PlatformPermsCheck
|
||||
func (a *PlatformPermsCheck) Dependencies() []asset.Asset {
|
||||
return []asset.Asset{
|
||||
&InstallConfig{},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate queries for input from the user.
|
||||
func (a *PlatformPermsCheck) Generate(dependencies asset.Parents) error {
|
||||
ctx := context.TODO()
|
||||
ic := &InstallConfig{}
|
||||
dependencies.Get(ic)
|
||||
|
||||
var err error
|
||||
platform := ic.Config.Platform.Name()
|
||||
switch platform {
|
||||
case aws.Name:
|
||||
permissionGroups := []awsconfig.PermissionGroup{awsconfig.PermissionCreateBase, awsconfig.PermissionDeleteBase}
|
||||
// If subnets are not provided in install-config.yaml, include network permissions
|
||||
if len(ic.Config.AWS.Subnets) == 0 {
|
||||
permissionGroups = append(permissionGroups, awsconfig.PermissionCreateNetworking, awsconfig.PermissionDeleteNetworking)
|
||||
}
|
||||
|
||||
ssn, err := ic.AWS.Session(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = awsconfig.ValidateCreds(ssn, permissionGroups, ic.Config.Platform.AWS.Region)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "validate AWS credentials")
|
||||
}
|
||||
case azure.Name, baremetal.Name, gcp.Name, libvirt.Name, none.Name, openstack.Name, vsphere.Name:
|
||||
// no permissions to check
|
||||
default:
|
||||
err = fmt.Errorf("unknown platform type %q", platform)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Name returns the human-friendly name of the asset.
|
||||
func (a *PlatformPermsCheck) Name() string {
|
||||
return "Platform Permissions Check"
|
||||
}
|
6
vendor/github.com/openshift/installer/pkg/asset/machines/baremetal/hosts.go
сгенерированный
поставляемый
6
vendor/github.com/openshift/installer/pkg/asset/machines/baremetal/hosts.go
сгенерированный
поставляемый
|
@ -2,6 +2,7 @@ package baremetal
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/metal3-io/baremetal-operator/pkg/hardware"
|
||||
|
||||
machineapi "github.com/openshift/cluster-api/pkg/apis/machine/v1beta1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
@ -52,6 +53,11 @@ func Hosts(config *types.InstallConfig, machines []machineapi.Machine) (*HostSet
|
|||
}
|
||||
settings.Secrets = append(settings.Secrets, secret)
|
||||
|
||||
// Map string 'default' to hardware.DefaultProfileName
|
||||
if host.HardwareProfile == "default" {
|
||||
host.HardwareProfile = hardware.DefaultProfileName
|
||||
}
|
||||
|
||||
newHost := baremetalhost.BareMetalHost{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: baremetalhost.SchemeGroupVersion.String(),
|
||||
|
|
3
vendor/github.com/openshift/installer/pkg/asset/machines/baremetal/machines.go
сгенерированный
поставляемый
3
vendor/github.com/openshift/installer/pkg/asset/machines/baremetal/machines.go
сгенерированный
поставляемый
|
@ -3,6 +3,7 @@ package baremetal
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
|
@ -86,7 +87,7 @@ func provider(clusterName string, platform *baremetal.Platform, osImage string,
|
|||
// ref https://github.com/openshift/ironic-rhcos-downloader/pull/12
|
||||
imageFilename := path.Base(strings.TrimSuffix(imageURL.String(), ".gz"))
|
||||
compressedImageFilename := strings.Replace(imageFilename, "openstack", "compressed", 1)
|
||||
cacheImageURL := fmt.Sprintf("http://%s:6180/images/%s/%s", platform.ClusterProvisioningIP, imageFilename, compressedImageFilename)
|
||||
cacheImageURL := fmt.Sprintf("http://%s/images/%s/%s", net.JoinHostPort(platform.ClusterProvisioningIP, "6180"), imageFilename, compressedImageFilename)
|
||||
cacheChecksumURL := fmt.Sprintf("%s.md5sum", cacheImageURL)
|
||||
config := &baremetalprovider.BareMetalMachineProviderSpec{
|
||||
Image: baremetalprovider.Image{
|
||||
|
|
2
vendor/github.com/openshift/installer/pkg/asset/machines/libvirt/machines.go
сгенерированный
поставляемый
2
vendor/github.com/openshift/installer/pkg/asset/machines/libvirt/machines.go
сгенерированный
поставляемый
|
@ -27,7 +27,7 @@ func Machines(clusterID string, config *types.InstallConfig, pool *types.Machine
|
|||
if pool.Replicas != nil {
|
||||
total = *pool.Replicas
|
||||
}
|
||||
provider := provider(clusterID, config.Networking.MachineCIDR.String(), platform, userDataSecret)
|
||||
provider := provider(clusterID, config.Networking.MachineNetwork[0].CIDR.String(), platform, userDataSecret)
|
||||
var machines []machineapi.Machine
|
||||
for idx := int64(0); idx < total; idx++ {
|
||||
machine := machineapi.Machine{
|
||||
|
|
2
vendor/github.com/openshift/installer/pkg/asset/machines/libvirt/machinesets.go
сгенерированный
поставляемый
2
vendor/github.com/openshift/installer/pkg/asset/machines/libvirt/machinesets.go
сгенерированный
поставляемый
|
@ -31,7 +31,7 @@ func MachineSets(clusterID string, config *types.InstallConfig, pool *types.Mach
|
|||
total = *pool.Replicas
|
||||
}
|
||||
|
||||
provider := provider(clusterID, config.Networking.MachineCIDR.String(), platform, userDataSecret)
|
||||
provider := provider(clusterID, config.Networking.MachineNetwork[0].CIDR.String(), platform, userDataSecret)
|
||||
name := fmt.Sprintf("%s-%s-%d", clusterID, pool.Name, 0)
|
||||
mset := &machineapi.MachineSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
|
|
9
vendor/github.com/openshift/installer/pkg/asset/manifests/additionaltrustbundleconfig.go
сгенерированный
поставляемый
9
vendor/github.com/openshift/installer/pkg/asset/manifests/additionaltrustbundleconfig.go
сгенерированный
поставляемый
|
@ -9,6 +9,7 @@ import (
|
|||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -114,10 +115,14 @@ func parseCertificates(certificates string) (map[string]string, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if cert.IsCA {
|
||||
sb.WriteString(string(pem.EncodeToMemory(block)))
|
||||
if cert.Version < 3 {
|
||||
logrus.Warnf("Certificate %X from additionalTrustBundle is x509 v%d", cert.SerialNumber, cert.Version)
|
||||
} else if !cert.IsCA {
|
||||
logrus.Warnf("Certificate %X from additionalTrustBundle is x509 v%d but not a certificate authority", cert.SerialNumber, cert.Version)
|
||||
}
|
||||
|
||||
sb.WriteString(string(pem.EncodeToMemory(block)))
|
||||
|
||||
if len(rest) == 0 {
|
||||
break
|
||||
}
|
||||
|
|
4
vendor/github.com/openshift/installer/pkg/asset/manifests/azure/cloudproviderconfig.go
сгенерированный
поставляемый
4
vendor/github.com/openshift/installer/pkg/asset/manifests/azure/cloudproviderconfig.go
сгенерированный
поставляемый
|
@ -16,8 +16,6 @@ type CloudProviderConfig struct {
|
|||
NetworkSecurityGroupName string
|
||||
VirtualNetworkName string
|
||||
SubnetName string
|
||||
AADClientID string
|
||||
AADClientSecret string
|
||||
ARO bool
|
||||
}
|
||||
|
||||
|
@ -55,8 +53,6 @@ func (params CloudProviderConfig) JSON() (string, error) {
|
|||
|
||||
if params.ARO {
|
||||
config.authConfig.UseManagedIdentityExtension = false
|
||||
config.authConfig.AADClientID = params.AADClientID
|
||||
config.authConfig.AADClientSecret = params.AADClientSecret
|
||||
}
|
||||
|
||||
buff := &bytes.Buffer{}
|
||||
|
|
149
vendor/github.com/openshift/installer/pkg/asset/manifests/cloudproviderconfig.go
сгенерированный
поставляемый
149
vendor/github.com/openshift/installer/pkg/asset/manifests/cloudproviderconfig.go
сгенерированный
поставляемый
|
@ -2,6 +2,7 @@ package manifests
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
|
@ -9,6 +10,8 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
|
@ -29,7 +32,10 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
cloudProviderConfigFileName = filepath.Join(manifestDir, "cloud-provider-config.yaml")
|
||||
cloudProviderConfigFileName = filepath.Join(manifestDir, "cloud-provider-config.yaml")
|
||||
aroCloudProviderRoleFileName = filepath.Join(manifestDir, "aro-cloud-provider-secret-reader-role.yaml")
|
||||
aroCloudProviderRoleBindingFileName = filepath.Join(manifestDir, "aro-cloud-provider-secret-reader-rolebinding.yaml")
|
||||
aroCloudProviderSecretFileName = filepath.Join(manifestDir, "aro-cloud-provider-secret.yaml")
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -39,7 +45,7 @@ const (
|
|||
// CloudProviderConfig generates the cloud-provider-config.yaml files.
|
||||
type CloudProviderConfig struct {
|
||||
ConfigMap *corev1.ConfigMap
|
||||
File *asset.File
|
||||
FileList []*asset.File
|
||||
}
|
||||
|
||||
var _ asset.WritableAsset = (*CloudProviderConfig)(nil)
|
||||
|
@ -94,6 +100,15 @@ func (cpc *CloudProviderConfig) Generate(dependencies asset.Parents) error {
|
|||
}
|
||||
|
||||
cm.Data[cloudProviderConfigDataKey] = openstackmanifests.CloudProviderConfig(cloud)
|
||||
|
||||
// Get the ca-cert-bundle key if there is a value for cacert in clouds.yaml
|
||||
if caPath := cloud.CACertFile; caPath != "" {
|
||||
caFile, err := ioutil.ReadFile(caPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read clouds.yaml ca-cert from disk")
|
||||
}
|
||||
cm.Data["ca-bundle.pem"] = string(caFile)
|
||||
}
|
||||
case azuretypes.Name:
|
||||
session, err := icazure.GetSession(platformCreds.Azure)
|
||||
if err != nil {
|
||||
|
@ -113,7 +128,7 @@ func (cpc *CloudProviderConfig) Generate(dependencies asset.Parents) error {
|
|||
if installConfig.Config.Azure.ComputeSubnet != "" {
|
||||
subnet = installConfig.Config.Azure.ComputeSubnet
|
||||
}
|
||||
config := azure.CloudProviderConfig{
|
||||
azureConfig, err := azure.CloudProviderConfig{
|
||||
GroupLocation: installConfig.Config.Azure.Region,
|
||||
ResourcePrefix: clusterID.InfraID,
|
||||
SubscriptionID: session.Credentials.SubscriptionID,
|
||||
|
@ -124,12 +139,7 @@ func (cpc *CloudProviderConfig) Generate(dependencies asset.Parents) error {
|
|||
VirtualNetworkName: vnet,
|
||||
SubnetName: subnet,
|
||||
ARO: installConfig.Config.Azure.ARO,
|
||||
}
|
||||
if platformCreds.Azure != nil {
|
||||
config.AADClientID = platformCreds.Azure.ClientID
|
||||
config.AADClientSecret = platformCreds.Azure.ClientSecret
|
||||
}
|
||||
azureConfig, err := config.JSON()
|
||||
}.JSON()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create cloud provider config")
|
||||
}
|
||||
|
@ -162,22 +172,129 @@ func (cpc *CloudProviderConfig) Generate(dependencies asset.Parents) error {
|
|||
return errors.Wrapf(err, "failed to create %s manifest", cpc.Name())
|
||||
}
|
||||
cpc.ConfigMap = cm
|
||||
cpc.File = &asset.File{
|
||||
Filename: cloudProviderConfigFileName,
|
||||
Data: cmData,
|
||||
cpc.FileList = []*asset.File{
|
||||
{
|
||||
Filename: cloudProviderConfigFileName,
|
||||
Data: cmData,
|
||||
},
|
||||
}
|
||||
if installConfig.Config.Azure.ARO {
|
||||
for _, f := range []struct {
|
||||
filename string
|
||||
data func(*installconfig.PlatformCreds) ([]byte, error)
|
||||
}{
|
||||
{
|
||||
filename: aroCloudProviderRoleFileName,
|
||||
data: aroRole,
|
||||
},
|
||||
{
|
||||
filename: aroCloudProviderRoleBindingFileName,
|
||||
data: aroRoleBinding,
|
||||
},
|
||||
{
|
||||
filename: aroCloudProviderSecretFileName,
|
||||
data: aroSecret,
|
||||
},
|
||||
} {
|
||||
b, err := f.data(platformCreds)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create %s manifest", cpc.Name())
|
||||
}
|
||||
|
||||
cpc.FileList = append(cpc.FileList, &asset.File{
|
||||
Filename: f.filename,
|
||||
Data: b,
|
||||
})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Files returns the files generated by the asset.
|
||||
func (cpc *CloudProviderConfig) Files() []*asset.File {
|
||||
if cpc.File != nil {
|
||||
return []*asset.File{cpc.File}
|
||||
}
|
||||
return []*asset.File{}
|
||||
return cpc.FileList
|
||||
}
|
||||
|
||||
// Load loads the already-rendered files back from disk.
|
||||
func (cpc *CloudProviderConfig) Load(f asset.FileFetcher) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func aroRole(*installconfig.PlatformCreds) ([]byte, error) {
|
||||
return yaml.Marshal(&rbacv1.Role{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Role",
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "aro-cloud-provider-secret-reader",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
{
|
||||
Verbs: []string{"get"},
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"secrets"},
|
||||
ResourceNames: []string{"azure-cloud-provider"},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func aroRoleBinding(*installconfig.PlatformCreds) ([]byte, error) {
|
||||
return yaml.Marshal(&rbacv1.RoleBinding{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "RoleBinding",
|
||||
APIVersion: "rbac.authorization.k8s.io/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "aro-cloud-provider-secret-read",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: "azure-cloud-provider",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "Role",
|
||||
Name: "aro-cloud-provider-secret-reader",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func aroSecret(platformCreds *installconfig.PlatformCreds) ([]byte, error) {
|
||||
// config is used to created compatible secret to trigger azure cloud
|
||||
// controller config merge behaviour
|
||||
// https://github.com/openshift/origin/blob/release-4.3/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_config.go#L82
|
||||
config := struct {
|
||||
AADClientID string `json:"aadClientId" yaml:"aadClientId"`
|
||||
AADClientSecret string `json:"aadClientSecret" yaml:"aadClientSecret"`
|
||||
}{
|
||||
AADClientID: platformCreds.Azure.ClientID,
|
||||
AADClientSecret: platformCreds.Azure.ClientSecret,
|
||||
}
|
||||
|
||||
b, err := yaml.Marshal(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return yaml.Marshal(&v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: corev1.SchemeGroupVersion.String(),
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "azure-cloud-provider",
|
||||
Namespace: "kube-system",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"cloud-config": b,
|
||||
},
|
||||
Type: v1.SecretTypeOpaque,
|
||||
})
|
||||
}
|
||||
|
|
2
vendor/github.com/openshift/installer/pkg/asset/manifests/infrastructure.go
сгенерированный
поставляемый
2
vendor/github.com/openshift/installer/pkg/asset/manifests/infrastructure.go
сгенерированный
поставляемый
|
@ -149,7 +149,7 @@ func (i *Infrastructure) Generate(dependencies asset.Parents) error {
|
|||
if cloudproviderconfig.ConfigMap != nil {
|
||||
// set the configmap reference.
|
||||
config.Spec.CloudConfig = configv1.ConfigMapFileReference{Name: cloudproviderconfig.ConfigMap.Name, Key: cloudProviderConfigDataKey}
|
||||
i.FileList = append(i.FileList, cloudproviderconfig.File)
|
||||
i.FileList = append(i.FileList, cloudproviderconfig.Files()...)
|
||||
}
|
||||
|
||||
if trustbundleconfig.ConfigMap != nil {
|
||||
|
|
7
vendor/github.com/openshift/installer/pkg/asset/manifests/openshift.go
сгенерированный
поставляемый
7
vendor/github.com/openshift/installer/pkg/asset/manifests/openshift.go
сгенерированный
поставляемый
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/openshift/installer/pkg/asset/installconfig/gcp"
|
||||
"github.com/openshift/installer/pkg/asset/machines"
|
||||
openstackmanifests "github.com/openshift/installer/pkg/asset/manifests/openstack"
|
||||
"github.com/openshift/installer/pkg/asset/openshiftinstall"
|
||||
|
||||
osmachine "github.com/openshift/installer/pkg/asset/machines/openstack"
|
||||
"github.com/openshift/installer/pkg/asset/password"
|
||||
|
@ -53,6 +54,7 @@ func (o *Openshift) Dependencies() []asset.Asset {
|
|||
&installconfig.InstallConfig{},
|
||||
&installconfig.ClusterID{},
|
||||
&password.KubeadminPassword{},
|
||||
&openshiftinstall.Config{},
|
||||
|
||||
&openshift.CloudCredsSecret{},
|
||||
&openshift.KubeadminPasswordSecret{},
|
||||
|
@ -67,7 +69,8 @@ func (o *Openshift) Generate(dependencies asset.Parents) error {
|
|||
installConfig := &installconfig.InstallConfig{}
|
||||
clusterID := &installconfig.ClusterID{}
|
||||
kubeadminPassword := &password.KubeadminPassword{}
|
||||
dependencies.Get(platformCreds, installConfig, kubeadminPassword, clusterID)
|
||||
openshiftInstall := &openshiftinstall.Config{}
|
||||
dependencies.Get(platformCreds, installConfig, kubeadminPassword, clusterID, openshiftInstall)
|
||||
var cloudCreds cloudCredsSecretData
|
||||
platform := installConfig.Config.Platform.Name()
|
||||
switch platform {
|
||||
|
@ -194,6 +197,8 @@ func (o *Openshift) Generate(dependencies asset.Parents) error {
|
|||
})
|
||||
}
|
||||
|
||||
o.FileList = append(o.FileList, openshiftInstall.Files()...)
|
||||
|
||||
asset.SortFiles(o.FileList)
|
||||
|
||||
return nil
|
||||
|
|
6
vendor/github.com/openshift/installer/pkg/asset/manifests/openstack/cloudproviderconfig.go
сгенерированный
поставляемый
6
vendor/github.com/openshift/installer/pkg/asset/manifests/openstack/cloudproviderconfig.go
сгенерированный
поставляемый
|
@ -34,6 +34,10 @@ secret-namespace = kube-system
|
|||
res += "region = " + cloud.RegionName + "\n"
|
||||
}
|
||||
|
||||
if cloud.CACertFile != "" {
|
||||
res += "ca-file = /etc/kubernetes/static-pod-resources/configmaps/cloud-config/ca-bundle.pem\n"
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
|
@ -83,7 +87,7 @@ func CloudProviderConfigSecret(cloud *clientconfig.Cloud) ([]byte, error) {
|
|||
res.WriteString("region = " + strconv.Quote(cloud.RegionName) + "\n")
|
||||
}
|
||||
if cloud.CACertFile != "" {
|
||||
res.WriteString("ca-file = " + strconv.Quote(cloud.CACertFile) + "\n")
|
||||
res.WriteString("ca-file = /etc/kubernetes/static-pod-resources/configmaps/cloud-config/ca-bundle.pem\n")
|
||||
}
|
||||
|
||||
return []byte(res.String()), nil
|
||||
|
|
|
@ -15,9 +15,9 @@ import (
|
|||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/asset/installconfig"
|
||||
"github.com/openshift/installer/pkg/types/aws"
|
||||
"github.com/openshift/installer/pkg/types/azure"
|
||||
"github.com/openshift/installer/pkg/types/gcp"
|
||||
"github.com/openshift/installer/pkg/types/none"
|
||||
"github.com/openshift/installer/pkg/types/vsphere"
|
||||
"github.com/openshift/installer/pkg/types/openstack"
|
||||
)
|
||||
|
||||
var proxyCfgFilename = filepath.Join(manifestDir, "cluster-proxy-01-config.yaml")
|
||||
|
@ -105,7 +105,7 @@ func (p *Proxy) Generate(dependencies asset.Parents) error {
|
|||
// createNoProxy combines user-provided & platform-specific values to create a comma-separated
|
||||
// list of unique NO_PROXY values. Platform values are: serviceCIDR, podCIDR, machineCIDR,
|
||||
// localhost, 127.0.0.1, api.clusterdomain, api-int.clusterdomain, etcd-idx.clusterdomain
|
||||
// If platform is not vSphere or None add 169.254.169.254 to the list of NO_PROXY addresses.
|
||||
// If platform is AWS, GCP, Azure, or OpenStack add 169.254.169.254 to the list of NO_PROXY addresses.
|
||||
// If platform is AWS, add ".ec2.internal" for region us-east-1 or for all other regions add
|
||||
// ".<aws_region>.compute.internal" to the list of NO_PROXY addresses. We should not proxy
|
||||
// the instance metadata services:
|
||||
|
@ -124,13 +124,15 @@ func createNoProxy(installConfig *installconfig.InstallConfig, network *Networki
|
|||
"localhost",
|
||||
".svc",
|
||||
".cluster.local",
|
||||
network.Config.Spec.ServiceNetwork[0],
|
||||
internalAPIServer.Hostname(),
|
||||
installConfig.Config.Networking.MachineCIDR.String(),
|
||||
)
|
||||
|
||||
platform := installConfig.Config.Platform.Name()
|
||||
|
||||
if platform != vsphere.Name && platform != none.Name {
|
||||
// FIXME: The cluster-network-operator duplicates this code in pkg/util/proxyconfig/no_proxy.go,
|
||||
// if altering this list of platforms, you must ALSO alter the code in cluster-network-operator.
|
||||
switch platform {
|
||||
case aws.Name, gcp.Name, azure.Name, openstack.Name:
|
||||
set.Insert("169.254.169.254")
|
||||
}
|
||||
|
||||
|
@ -155,6 +157,14 @@ func createNoProxy(installConfig *installconfig.InstallConfig, network *Networki
|
|||
set.Insert(etcdHost)
|
||||
}
|
||||
|
||||
for _, network := range installConfig.Config.Networking.ServiceNetwork {
|
||||
set.Insert(network.String())
|
||||
}
|
||||
|
||||
for _, network := range installConfig.Config.Networking.MachineNetwork {
|
||||
set.Insert(network.CIDR.String())
|
||||
}
|
||||
|
||||
for _, clusterNetwork := range network.Config.Spec.ClusterNetwork {
|
||||
set.Insert(clusterNetwork.CIDR)
|
||||
}
|
||||
|
|
105
vendor/github.com/openshift/installer/pkg/asset/openshiftinstall/openshiftinstall.go
сгенерированный
поставляемый
Normal file
105
vendor/github.com/openshift/installer/pkg/asset/openshiftinstall/openshiftinstall.go
сгенерированный
поставляемый
Normal file
|
@ -0,0 +1,105 @@
|
|||
package openshiftinstall
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/pkg/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/openshift/installer/pkg/asset"
|
||||
"github.com/openshift/installer/pkg/version"
|
||||
)
|
||||
|
||||
var (
|
||||
configPath = filepath.Join("openshift", "openshift-install-manifests.yaml")
|
||||
)
|
||||
|
||||
// Config generates the openshift-install ConfigMap.
|
||||
type Config struct {
|
||||
File *asset.File
|
||||
}
|
||||
|
||||
var _ asset.WritableAsset = (*Config)(nil)
|
||||
|
||||
// Name returns a human friendly name for the asset.
|
||||
func (*Config) Name() string {
|
||||
return "OpenShift Install (Manifests)"
|
||||
}
|
||||
|
||||
// Dependencies returns all of the dependencies directly needed to generate
|
||||
// the asset.
|
||||
func (*Config) Dependencies() []asset.Asset {
|
||||
return []asset.Asset{}
|
||||
}
|
||||
|
||||
// Generate generates the openshift-install ConfigMap.
|
||||
func (i *Config) Generate(dependencies asset.Parents) error {
|
||||
cm, err := CreateInstallConfigMap("openshift-install-manifests")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
i.File = &asset.File{
|
||||
Filename: configPath,
|
||||
Data: []byte(cm),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Files returns the files generated by the asset.
|
||||
func (i *Config) Files() []*asset.File {
|
||||
if i.File != nil {
|
||||
return []*asset.File{i.File}
|
||||
}
|
||||
return []*asset.File{}
|
||||
}
|
||||
|
||||
// Load loads the already-rendered files back from disk.
|
||||
func (i *Config) Load(f asset.FileFetcher) (bool, error) {
|
||||
file, err := f.FetchByName(configPath)
|
||||
if os.IsNotExist(err) {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
i.File = file
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// CreateInstallConfigMap creates an openshift-install ConfigMap from the
|
||||
// OPENSHIFT_INSTALL_INVOKER environment variable and the given name for the
|
||||
// ConfigMap. This returns an error if marshalling to YAML fails.
|
||||
func CreateInstallConfigMap(name string) (string, error) {
|
||||
var invoker string
|
||||
if env := os.Getenv("OPENSHIFT_INSTALL_INVOKER"); env != "" {
|
||||
invoker = env
|
||||
} else {
|
||||
invoker = "user"
|
||||
}
|
||||
|
||||
cm := &corev1.ConfigMap{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: corev1.SchemeGroupVersion.String(),
|
||||
Kind: "ConfigMap",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "openshift-config",
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"version": version.Raw,
|
||||
"invoker": invoker,
|
||||
},
|
||||
}
|
||||
|
||||
cmData, err := yaml.Marshal(cm)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to create %q ConfigMap", name)
|
||||
}
|
||||
|
||||
return string(cmData), nil
|
||||
}
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче