This commit is contained in:
David Schott 2020-03-31 12:12:03 -07:00
Родитель 34684821b2
Коммит b48aa0d1e6
112 изменённых файлов: 11798 добавлений и 4 удалений

18
.gitignore поставляемый
Просмотреть файл

@ -1,15 +1,27 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
bin
# Test binary, built with `go test -c`
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
# Kubernetes Generated files - skip generated files, except for vendored files
!vendor/**/zz_generated.*
# editor and IDE paraphernalia
.idea
*.swp
*.swo
*~
# skip generated examples
examples/_out

14
CONTRIBUTING.md Normal file
Просмотреть файл

@ -0,0 +1,14 @@
# Contributing
This project welcomes contributions and suggestions. Most contributions require you to
agree to a Contributor License Agreement (CLA) declaring that you have the right to,
and actually do, grant us the rights to use your contribution. For details, visit
https://cla.microsoft.com.
When you submit a pull request, a CLA-bot will automatically determine whether you need
to provide a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the
instructions provided by the bot. You will only need to do this once across all repositories using our CLA.
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.

48
Dockerfile Normal file
Просмотреть файл

@ -0,0 +1,48 @@
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Build the manager binary
#FROM golang:1.12.9 as builder
# WORKDIR /workspace
# Run this with docker build --build_arg $(go env GOPROXY) to override the goproxy
#ARG goproxy=https://proxy.golang.org
#ENV GOPROXY=$goproxy
#ENV GOPRIVATE="github.com/microsoft"
#RUN go env GOPRIVATE=github.com/microsoft
# Copy the Go Modules manifests
#COPY go.mod go.mod
#COPY go.sum go.sum
# Cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
#RUN go mod download
# Copy the sources
#COPY ./ ./
#COPY ./bin/manager ./
# Build
#ARG ARCH
#RUN CGO_ENABLED=0 GOOS=linux GOARCH=${ARCH} GO111MODULE=on \
# go build -a -ldflags '-extldflags "-static"' \
# -o manager .
# Copy the controller-manager into a thin image
FROM gcr.io/distroless/static:latest
WORKDIR /
COPY bin/manager ./
USER nobody
ENTRYPOINT ["/manager"]

184
Makefile Normal file
Просмотреть файл

@ -0,0 +1,184 @@
# Ensure Make is run with bash shell as some syntax below is bash-specific
SHELL:=/usr/bin/env bash
.DEFAULT_GOAL:=help
# Use GOPROXY environment variable if set
GOPROXY := $(shell go env GOPROXY)
ifeq ($(GOPROXY),)
GOPROXY := https://proxy.golang.org
endif
export GOPROXY
# Active module mode, as we use go modules to manage dependencies
export GO111MODULE=on
# Private repo workaround
export GOPRIVATE = github.com/microsoft
# Directories.
BIN_DIR := bin
# Binaries.
CLUSTERCTL := $(BIN_DIR)/clusterctl
CONTROLLER_GEN := $(TOOLS_BIN_DIR)/controller-gen
GOLANGCI_LINT := $(TOOLS_BIN_DIR)/golangci-lint
MOCKGEN := $(TOOLS_BIN_DIR)/mockgen
CONVERSION_GEN := $(TOOLS_BIN_DIR)/conversion-gen
# Image URL to use all building/pushing image targets
IMG ?= nwoodmsft/controller:0.14
# Produce CRDs that work back to Kubernetes 1.11 (no version conversion)
CRD_OPTIONS ?= "crd:trivialVersions=true"
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
else
GOBIN=$(shell go env GOBIN)
endif
## --------------------------------------
## Help
## --------------------------------------
help: ## Display this help
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
## --------------------------------------
## Binaries
## --------------------------------------
all: manager
.PHONY: manager
manager: generate fmt vet ## Build manager binary.
# go build -o bin/manager main.go
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o bin/manager cmd/manager/main.go
## --------------------------------------
## Tooling Binaries
## --------------------------------------
$(CLUSTERCTL): go.mod ## Build clusterctl binary.
go build -o $(BIN_DIR)/clusterctl sigs.k8s.io/cluster-api/cmd/clusterctl
## --------------------------------------
## Generate
## --------------------------------------
.PHONY: generate-examples
generate-examples: clean-examples ## Generate examples configurations to run a cluster.
./examples/generate.sh
# Run against the configured Kubernetes cluster in ~/.kube/config
run: generate fmt vet manifests
go run ./main.go
# Install CRDs into a cluster
install: manifests
kustomize build config/crd | kubectl apply -f -
# Deploy controller in the configured Kubernetes cluster in ~/.kube/config
deploy: manifests
cd config/manager && kustomize edit set image controller=${IMG}
kustomize build config/default | kubectl apply -f -
# Generate manifests e.g. CRD, RBAC etc.
manifests: controller-gen
$(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases
# Run go fmt against code
fmt:
go fmt ./...
# Run go vet against code
vet:
go vet ./...
# Generate code
generate: controller-gen
$(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths="./..."
# find or download controller-gen
# download controller-gen if necessary
controller-gen:
ifeq (, $(shell which controller-gen))
go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.2.0
CONTROLLER_GEN=$(GOBIN)/controller-gen
else
CONTROLLER_GEN=$(shell which controller-gen)
endif
## --------------------------------------
## Docker Image
## --------------------------------------
# Build the docker image
docker-build: manager ## Build docker image
docker build -t ${IMG} .
# Push the docker image
docker-push: ## Push docker image
docker push ${IMG}
## --------------------------------------
## Development
## --------------------------------------
.PHONY: kind-reset
kind-reset: ## Destroys the "clusterapi" kind cluster.
kind delete cluster --name=clusterapi || true
.PHONY: create-cluster
create-cluster: kind-reset $(CLUSTERCTL) ## Create a development Kubernetes cluster in a KIND management cluster.
# Create KIND cluster
kind create cluster --name=clusterapi
# Apply provider-components.
kubectl \
--kubeconfig=$$(kind get kubeconfig-path --name="clusterapi") \
create -f examples/_out/provider-components.yaml
# Create Cluster.
kubectl \
--kubeconfig=$$(kind get kubeconfig-path --name="clusterapi") \
create -f examples/_out/cluster.yaml
# Create control plane machine.
kubectl \
--kubeconfig=$$(kind get kubeconfig-path --name="clusterapi") \
create -f examples/_out/controlplane.yaml
# Get KubeConfig using clusterctl.
# $(CLUSTERCTL) \
# alpha phases get-kubeconfig -v=4 \
# --kubeconfig=$$(kind get kubeconfig-path --name="clusterapi") \
# --namespace=default \
# --cluster-name=$(CLUSTER_NAME)
# Create a worker node with MachineDeployment.
kubectl \
--kubeconfig=$$(kind get kubeconfig-path --name="clusterapi") \
create -f examples/_out/machinedeployment.yaml
## --------------------------------------
## Cleanup
## --------------------------------------
.PHONY: clean
clean: ## Remove all generated files
$(MAKE) clean-bin
$(MAKE) clean-temporary
$(MAKE) clean-examples
.PHONY: clean-bin
clean-bin:
rm -rf bin
.PHONY: clean-temporary
clean-temporary:
rm -f kubeconfig
.PHONY: clean-examples
clean-examples:
rm -rf examples/_out/
rm -f examples/provider-components/provider-components-*.yaml

10
PROJECT Normal file
Просмотреть файл

@ -0,0 +1,10 @@
version: "2"
domain: cluster.x-k8s.io
repo: github.com/microsoft/cluster-api-provider-azurestackhci
resources:
- group: infrastructure
version: v1alpha2
kind: AzureStackHCICluster
- group: infrastructure
version: v1alpha2
kind: AzureStackHCIMachine

Просмотреть файл

@ -1,5 +1,53 @@
# Kubernetes Cluster API Provider Azure Stack HCI
# Contributing
<img src="https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png" width="100">
------
Kubernetes-native declarative infrastructure for Azure Stack HCI.
## What is the Cluster API Provider Azure Stack HCI
The [Cluster API][cluster_api] brings declarative, Kubernetes-style APIs to cluster creation, configuration and management.
The API itself is shared across multiple cloud providers allowing for true Azure Stack HCI
hybrid deployments of Kubernetes.
## Quick Start
Check out the [Cluster API Quick Start][quickstart] to create your first Kubernetes cluster on Azure Stack HCI using Cluster API.
---
## Support Policy
This provider's versions are compatible with the following versions of Cluster API:
| | Cluster API `v1alpha1` (`v0.1.x`) | Cluster API `v1alpha2` (`v0.2.x`) | Cluster API `v1alpha3` (`v0.3.x`) |
|---|---|---|---|
|AzureStackHCI Provider `v0.2.x` | | ✓ | |
|AzureStackHCI Provider `v0.3.x` | | | ✓ |
This provider's versions are able to install and manage the following versions of Kubernetes:
| | AzureStackHCI Provider `v0.2.x` | Azure Provider `v0.3.x`
|---|---|---|
| Kubernetes 1.14 | ✓ | ✓ |
| Kubernetes 1.15 | ✓ | ✓ |
| Kubernetes 1.16 | ✓ | ✓ |
| Kubernetes 1.17 | | |
Each version of Cluster API for Azure Stack HCI will attempt to support at least two Kubernetes versions e.g., Cluster API for Azure Stack HCI `v0.2` may support Kubernetes 1.14 and Kubernetes 1.15.
**NOTE:** As the versioning for this project is tied to the versioning of Cluster API, future modifications to this policy may be made to more closely align with other providers in the Cluster API ecosystem.
---
## Documentation
Documentation is in the `/docs` directory, and the [index is here](docs/README.md).
## Contributing
This project welcomes contributions and suggestions. Most contributions require you to agree to a
Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
@ -12,3 +60,37 @@ provided by the bot. You will only need to do this once across all repos using o
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
## Github issues
### Bugs
If you think you have found a bug please follow the instructions below.
- Please spend a small amount of time giving due diligence to the issue tracker. Your issue might be a duplicate.
- Get the logs from the cluster controllers. Please paste this into your issue.
- Open a [bug report][bug_report].
- Remember users might be searching for your issue in the future, so please give it a meaningful title to helps others.
- Feel free to reach out to the cluster-api community on [kubernetes slack][slack_info].
### Tracking new features
We also use the issue tracker to track features. If you have an idea for a feature, or think you can help kops become even more awesome follow the steps below.
- Open a [feature request][feature_request].
- Remember users might be searching for your issue in the future, so please
give it a meaningful title to helps others.
- Clearly define the use case, using concrete examples. EG: I type `this` and
cluster-api-provider-azurestackhci does `that`.
- Some of our larger features will require some design. If you would like to
include a technical design for your feature please include it in the issue.
- After the new feature is well understood, and the design agreed upon we can
start coding the feature. We would love for you to code it. So please open
up a **WIP** *(work in progress)* pull request, and happy coding.
<!-- References -->
[bug_report]: https://github.com/microsoft/cluster-api-provider-azurestackhci/issues/new?template=bug_report.md
[feature_request]: https://github.com/microsoft/cluster-api-provider-azurestackhci/issues/new?template=feature_request.md
[cluster_api]: https://github.com/kubernetes-sigs/cluster-api
[quickstart]: https://cluster-api.sigs.k8s.io/user/quick-start.html

Просмотреть файл

@ -0,0 +1,83 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
// ClusterFinalizer allows ReconcileAzureStackHCICluster to clean up AzureStackHCI resources associated with AzureStackHCICluster before
// removing it from the apiserver.
ClusterFinalizer = "azurestackhcicluster.infrastructure.cluster.x-k8s.io"
)
// AzureStackHCIClusterSpec defines the desired state of AzureStackHCICluster
type AzureStackHCIClusterSpec struct {
// NetworkSpec encapsulates all things related to AzureStackHCI network.
NetworkSpec NetworkSpec `json:"networkSpec,omitempty"`
ResourceGroup string `json:"resourceGroup"`
Location string `json:"location"`
// LoadBalancerRef may be used to enable a control plane load balancer for this cluster.
LoadBalancerRef *corev1.ObjectReference `json:"loadBalancerRef,omitempty"`
}
// AzureStackHCIClusterStatus defines the observed state of AzureStackHCICluster
type AzureStackHCIClusterStatus struct {
Network Network `json:"network,omitempty"`
Bastion VM `json:"bastion,omitempty"`
// Ready is true when the provider resource is ready.
// +optional
Ready bool `json:"ready"`
// APIEndpoints represents the endpoints to communicate with the control plane.
// +optional
APIEndpoints []APIEndpoint `json:"apiEndpoints,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=azurestackhciclusters,scope=Namespaced,categories=cluster-api
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// AzureStackHCICluster is the Schema for the azurestackhciclusters API
type AzureStackHCICluster struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec AzureStackHCIClusterSpec `json:"spec,omitempty"`
Status AzureStackHCIClusterStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// AzureStackHCIClusterList contains a list of AzureStackHCICluster
type AzureStackHCIClusterList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []AzureStackHCICluster `json:"items"`
}
func init() {
SchemeBuilder.Register(&AzureStackHCICluster{}, &AzureStackHCIClusterList{})
}

Просмотреть файл

@ -0,0 +1,126 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/cluster-api/errors"
)
const (
// MachineFinalizer allows ReconcileAzureStackHCIMachine to clean up AzureStackHCI resources associated with AzureStackHCIMachine before
// removing it from the apiserver.
MachineFinalizer = "azurestackhcimachine.infrastructure.cluster.x-k8s.io"
)
// AzureStackHCIMachineSpec defines the desired state of AzureStackHCIMachine
type AzureStackHCIMachineSpec struct {
// ProviderID is the unique identifier as specified by the cloud provider.
// +optional
ProviderID *string `json:"providerID,omitempty"`
VMSize string `json:"vmSize"`
AvailabilityZone AvailabilityZone `json:"availabilityZone,omitempty"`
Image Image `json:"image"`
OSDisk OSDisk `json:"osDisk"`
Location string `json:"location"`
SSHPublicKey string `json:"sshPublicKey"`
}
// AzureStackHCIMachineStatus defines the observed state of AzureStackHCIMachine
type AzureStackHCIMachineStatus struct {
// Ready is true when the provider resource is ready.
// +optional
Ready bool `json:"ready"`
// Addresses contains the AzureStackHCI instance associated addresses.
Addresses []v1.NodeAddress `json:"addresses,omitempty"`
// VMState is the provisioning state of the AzureStackHCI virtual machine.
// +optional
VMState *VMState `json:"vmState,omitempty"`
// ErrorReason will be set in the event that there is a terminal problem
// reconciling the Machine and will contain a succinct value suitable
// for machine interpretation.
//
// This field should not be set for transitive errors that a controller
// faces that are expected to be fixed automatically over
// time (like service outages), but instead indicate that something is
// fundamentally wrong with the Machine's spec or the configuration of
// the controller, and that manual intervention is required. Examples
// of terminal errors would be invalid combinations of settings in the
// spec, values that are unsupported by the controller, or the
// responsible controller itself being critically misconfigured.
//
// Any transient errors that occur during the reconciliation of Machines
// can be added as events to the Machine object and/or logged in the
// controller's output.
// +optional
ErrorReason *errors.MachineStatusError `json:"errorReason,omitempty"`
// ErrorMessage will be set in the event that there is a terminal problem
// reconciling the Machine and will contain a more verbose string suitable
// for logging and human consumption.
//
// This field should not be set for transitive errors that a controller
// faces that are expected to be fixed automatically over
// time (like service outages), but instead indicate that something is
// fundamentally wrong with the Machine's spec or the configuration of
// the controller, and that manual intervention is required. Examples
// of terminal errors would be invalid combinations of settings in the
// spec, values that are unsupported by the controller, or the
// responsible controller itself being critically misconfigured.
//
// Any transient errors that occur during the reconciliation of Machines
// can be added as events to the Machine object and/or logged in the
// controller's output.
// +optional
ErrorMessage *string `json:"errorMessage,omitempty"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=azurestackhcimachines,scope=Namespaced,categories=cluster-api
// +kubebuilder:storageversion
// +kubebuilder:subresource:status
// AzureStackHCIMachine is the Schema for the azurestackhcimachines API
type AzureStackHCIMachine struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec AzureStackHCIMachineSpec `json:"spec,omitempty"`
Status AzureStackHCIMachineStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// AzureStackHCIMachineList contains a list of AzureStackHCIMachine
type AzureStackHCIMachineList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []AzureStackHCIMachine `json:"items"`
}
func init() {
SchemeBuilder.Register(&AzureStackHCIMachine{}, &AzureStackHCIMachineList{})
}

Просмотреть файл

@ -0,0 +1,57 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// AzureStackHCIMachineTemplateSpec defines the desired state of AzureStackHCIMachineTemplate
type AzureStackHCIMachineTemplateSpec struct {
Template AzureStackHCIMachineTemplateResource `json:"template"`
}
// +kubebuilder:object:root=true
// +kubebuilder:resource:path=azurestackhcimachinetemplates,scope=Namespaced,categories=cluster-api
// +kubebuilder:storageversion
// AzureStackHCIMachineTemplate is the Schema for the azurestackhcimachinetemplates API
type AzureStackHCIMachineTemplate struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec AzureStackHCIMachineTemplateSpec `json:"spec,omitempty"`
}
// +kubebuilder:object:root=true
// AzureStackHCIMachineTemplateList contains a list of AzureStackHCIMachineTemplate
type AzureStackHCIMachineTemplateList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []AzureStackHCIMachineTemplate `json:"items"`
}
func init() {
SchemeBuilder.Register(&AzureStackHCIMachineTemplate{}, &AzureStackHCIMachineTemplateList{})
}
// AzureStackHCIMachineTemplateResource describes the data needed to create am AzureStackHCIMachine from a template
type AzureStackHCIMachineTemplateResource struct {
// Spec is the specification of the desired behavior of the machine.
Spec AzureStackHCIMachineSpec `json:"spec"`
}

Просмотреть файл

@ -0,0 +1,93 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
v1core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/cluster-api/errors"
)
const (
// VirtualMachineFinalizer allows ReconcileVirtualAzureStackHCIMachine to clean up AzureStackHCI resources associated with VirtualAzureStackHCIMachine before
// removing it from the apiserver.
VirtualMachineFinalizer = "azurestackhcivirtualmachine.infrastructure.cluster.x-k8s.io"
)
// AzureStackHCIVirtualMachineSpec defines the desired state of AzureStackHCIVirtualMachine
type AzureStackHCIVirtualMachineSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
VMSize string `json:"vmSize"`
AvailabilityZone AvailabilityZone `json:"availabilityZone,omitempty"`
Image Image `json:"image"`
OSDisk OSDisk `json:"osDisk"`
BootstrapData *string `json:"bootstrapData,omitempty"`
Identity VMIdentity `json:"identity,omitempty"`
Location string `json:"location"` // does location belong here?
SSHPublicKey string `json:"sshPublicKey"`
// come from the cluster scope for machine and lb controller creation path
ResourceGroup string `json:"resourceGroup"`
VnetName string `json:"vnetName"`
ClusterName string `json:"clusterName"`
SubnetName string `json:"subnetName"`
BackendPoolName string `json:"backendPoolName,omitempty"`
}
// AzureStackHCIVirtualMachineStatus defines the observed state of AzureStackHCIVirtualMachine
type AzureStackHCIVirtualMachineStatus struct {
// Ready is true when the provider resource is ready.
// +optional
Ready bool `json:"ready"`
// Addresses contains the AzureStackHCI instance associated addresses.
Addresses []v1core.NodeAddress `json:"addresses,omitempty"`
// VMState is the provisioning state of the AzureStackHCI virtual machine.
// +optional
VMState *VMState `json:"vmState,omitempty"`
// +optional
ErrorReason *errors.MachineStatusError `json:"errorReason,omitempty"`
// +optional
ErrorMessage *string `json:"errorMessage,omitempty"`
}
// +kubebuilder:object:root=true
// AzureStackHCIVirtualMachine is the Schema for the azurestackhcivirtualmachines API
type AzureStackHCIVirtualMachine struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec AzureStackHCIVirtualMachineSpec `json:"spec,omitempty"`
Status AzureStackHCIVirtualMachineStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// AzureStackHCIVirtualMachineList contains a list of AzureStackHCIVirtualMachine
type AzureStackHCIVirtualMachineList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []AzureStackHCIVirtualMachine `json:"items"`
}
func init() {
SchemeBuilder.Register(&AzureStackHCIVirtualMachine{}, &AzureStackHCIVirtualMachineList{})
}

Просмотреть файл

@ -0,0 +1,36 @@
/*
Copyright 2019 Microsoft and contributors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha2 contains API Schema definitions for the infrastructure v1alpha2 API group
// +kubebuilder:object:generate=true
// +groupName=infrastructure.cluster.x-k8s.io
package v1alpha2
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "infrastructure.cluster.x-k8s.io", Version: "v1alpha2"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

Просмотреть файл

@ -0,0 +1,109 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/cluster-api/errors"
)
const (
// LoadBalancerFinalizer allows ReconcileLoadBalancer to clean up the load balancer resources before removing it from the apiserver.
LoadBalancerFinalizer = "loadbalancer.infrastructure.cluster.x-k8s.io"
)
type LoadBalancerSpec struct {
AvailabilityZone AvailabilityZone `json:"availabilityZone,omitempty"`
Location string `json:"location"` // does location belong here?
SSHPublicKey string `json:"sshPublicKey"`
BackendPoolName string `json:"backendPoolName"`
ImageReference string `json:"imageReference"`
}
type LoadBalancerStatus struct {
// +optional
Ready bool `json:"ready,omitempty"`
// VMState is the provisioning state of the AzureStackHCI virtual machine.
// +optional
VMState *VMState `json:"vmState,omitempty"`
// Address is the IP address of the load balancer.
// +optional
Address string `json:"address,omitempty"`
// ErrorReason will be set in the event that there is a terminal problem
// reconciling the Machine and will contain a succinct value suitable
// for machine interpretation.
//
// This field should not be set for transitive errors that a controller
// faces that are expected to be fixed automatically over
// time (like service outages), but instead indicate that something is
// fundamentally wrong with the Machine's spec or the configuration of
// the controller, and that manual intervention is required. Examples
// of terminal errors would be invalid combinations of settings in the
// spec, values that are unsupported by the controller, or the
// responsible controller itself being critically misconfigured.
//
// Any transient errors that occur during the reconciliation of Machines
// can be added as events to the Machine object and/or logged in the
// controller's output.
// +optional
ErrorReason *errors.MachineStatusError `json:"errorReason,omitempty"`
// ErrorMessage will be set in the event that there is a terminal problem
// reconciling the Machine and will contain a more verbose string suitable
// for logging and human consumption.
//
// This field should not be set for transitive errors that a controller
// faces that are expected to be fixed automatically over
// time (like service outages), but instead indicate that something is
// fundamentally wrong with the Machine's spec or the configuration of
// the controller, and that manual intervention is required. Examples
// of terminal errors would be invalid combinations of settings in the
// spec, values that are unsupported by the controller, or the
// responsible controller itself being critically misconfigured.
//
// Any transient errors that occur during the reconciliation of Machines
// can be added as events to the Machine object and/or logged in the
// controller's output.
// +optional
ErrorMessage *string `json:"errorMessage,omitempty"`
}
// +kubebuilder:object:root=true
// LoadBalancer is the Schema for the loadbalancers API
type LoadBalancer struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec LoadBalancerSpec `json:"spec,omitempty"`
Status LoadBalancerStatus `json:"status,omitempty"`
}
// +kubebuilder:object:root=true
// LoadBalancerList contains a list of LoadBalancers
type LoadBalancerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []LoadBalancer `json:"items"`
}
func init() {
SchemeBuilder.Register(&LoadBalancer{}, &LoadBalancerList{})
}

298
api/v1alpha2/types.go Normal file
Просмотреть файл

@ -0,0 +1,298 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha2
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// AzureStackHCIResourceReference is a reference to a specific AzureStackHCI resource by ID
type AzureStackHCIResourceReference struct {
// ID of resource
// +optional
ID *string `json:"id,omitempty"`
}
// AzureStackHCIMachineProviderConditionType is a valid value for AzureStackHCIMachineProviderCondition.Type
type AzureStackHCIMachineProviderConditionType string
// Valid conditions for an AzureStackHCI machine instance
const (
// MachineCreated indicates whether the machine has been created or not. If not,
// it should include a reason and message for the failure.
MachineCreated AzureStackHCIMachineProviderConditionType = "MachineCreated"
)
// AzureStackHCIMachineProviderCondition is a condition in a AzureStackHCIMachineProviderStatus
type AzureStackHCIMachineProviderCondition struct {
// Type is the type of the condition.
Type AzureStackHCIMachineProviderConditionType `json:"type"`
// Status is the status of the condition.
Status corev1.ConditionStatus `json:"status"`
// LastProbeTime is the last time we probed the condition.
// +optional
LastProbeTime metav1.Time `json:"lastProbeTime"`
// LastTransitionTime is the last time the condition transitioned from one status to another.
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime"`
// Reason is a unique, one-word, CamelCase reason for the condition's last transition.
// +optional
Reason string `json:"reason"`
// Message is a human-readable message indicating details about last transition.
// +optional
Message string `json:"message"`
}
const (
// ControlPlane machine label
ControlPlane string = "control-plane"
// Node machine label
Node string = "node"
)
// Network encapsulates AzureStackHCI networking resources.
type Network struct {
// SecurityGroups is a map from the role/kind of the security group to its unique name, if any.
SecurityGroups map[SecurityGroupRole]SecurityGroup `json:"securityGroups,omitempty"`
// APIServerIP is the Kubernetes API server public IP address.
APIServerIP PublicIP `json:"apiServerIp,omitempty"`
}
// NetworkSpec encapsulates all things related to AzureStackHCI network.
type NetworkSpec struct {
// Vnet configuration.
// +optional
Vnet VnetSpec `json:"vnet,omitempty"`
// Subnets configuration.
// +optional
Subnets Subnets `json:"subnets,omitempty"`
}
// VnetSpec configures an AzureStackHCI virtual network.
type VnetSpec struct {
// ID is the identifier of the virtual network this provider should use to create resources.
ID string `json:"id,omitempty"`
// Name defines a name for the virtual network resource.
Name string `json:"name"`
// CidrBlock is the CIDR block to be used when the provider creates a managed virtual network.
CidrBlock string `json:"cidrBlock,omitempty"`
}
// Subnets is a slice of Subnet.
type Subnets []*SubnetSpec
// ToMap returns a map from id to subnet.
func (s Subnets) ToMap() map[string]*SubnetSpec {
res := make(map[string]*SubnetSpec)
for _, x := range s {
res[x.ID] = x
}
return res
}
// SecurityGroupRole defines the unique role of a security group.
type SecurityGroupRole string
var (
// SecurityGroupBastion defines an SSH bastion role
SecurityGroupBastion = SecurityGroupRole("bastion")
// SecurityGroupNode defines a Kubernetes workload node role
SecurityGroupNode = SecurityGroupRole(Node)
// SecurityGroupControlPlane defines a Kubernetes control plane node role
SecurityGroupControlPlane = SecurityGroupRole(ControlPlane)
)
// SecurityGroup defines an AzureStackHCI security group.
type SecurityGroup struct {
ID string `json:"id"`
Name string `json:"name"`
IngressRules IngressRules `json:"ingressRule"`
}
// SecurityGroupProtocol defines the protocol type for a security group rule.
type SecurityGroupProtocol string
var (
// SecurityGroupProtocolAll is a wildcard for all IP protocols
SecurityGroupProtocolAll = SecurityGroupProtocol("*")
// SecurityGroupProtocolTCP represents the TCP protocol in ingress rules
SecurityGroupProtocolTCP = SecurityGroupProtocol("Tcp")
// SecurityGroupProtocolUDP represents the UDP protocol in ingress rules
SecurityGroupProtocolUDP = SecurityGroupProtocol("Udp")
)
// IngressRule defines an AzureStackHCI ingress rule for security groups.
type IngressRule struct {
Description string `json:"description"`
Protocol SecurityGroupProtocol `json:"protocol"`
// SourcePorts - The source port or range. Integer or range between 0 and 65535. Asterix '*' can also be used to match all ports.
SourcePorts *string `json:"sourcePorts,omitempty"`
// DestinationPorts - The destination port or range. Integer or range between 0 and 65535. Asterix '*' can also be used to match all ports.
DestinationPorts *string `json:"destinationPorts,omitempty"`
// Source - The CIDR or source IP range. Asterix '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureStackHCILoadBalancer' and 'Internet' can also be used. If this is an ingress rule, specifies where network traffic originates from.
Source *string `json:"source,omitempty"`
// Destination - The destination address prefix. CIDR or destination IP range. Asterix '*' can also be used to match all source IPs. Default tags such as 'VirtualNetwork', 'AzureStackHCILoadBalancer' and 'Internet' can also be used.
Destination *string `json:"destination,omitempty"`
}
// IngressRules is a slice of AzureStackHCI ingress rules for security groups.
type IngressRules []*IngressRule
// PublicIP defines an AzureStackHCI public IP address.
// TODO: Remove once load balancer is implemented.
type PublicIP struct {
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
IPAddress string `json:"ipAddress,omitempty"`
DNSName string `json:"dnsName,omitempty"`
}
// VMState describes the state of an AzureStackHCI virtual machine.
type VMState string
var (
// VMStateCreating ...
VMStateCreating = VMState("Creating")
// VMStateDeleting ...
VMStateDeleting = VMState("Deleting")
// VMStateFailed ...
VMStateFailed = VMState("Failed")
// VMStateMigrating ...
VMStateMigrating = VMState("Migrating")
// VMStateSucceeded ...
VMStateSucceeded = VMState("Succeeded")
// VMStateUpdating ...
VMStateUpdating = VMState("Updating")
)
// VM describes an AzureStackHCI virtual machine.
type VM struct {
ID string `json:"id,omitempty"`
Name string `json:"name,omitempty"`
AvailabilityZone string `json:"availabilityZone,omitempty"`
// Hardware profile
VMSize string `json:"vmSize,omitempty"`
// Storage profile
Image Image `json:"image,omitempty"`
OSDisk OSDisk `json:"osDisk,omitempty"`
BootstrapData string `json:"bootstrapData,omitempty"`
// State - The provisioning state, which only appears in the response.
State VMState `json:"vmState,omitempty"`
Identity VMIdentity `json:"identity,omitempty"`
}
type AvailabilityZone struct {
ID *string `json:"id,omitempty"`
Enabled *bool `json:"enabled,omitempty"`
}
// Image defines information about the image to use for VM creation.
// There are three ways to specify an image: by ID, by publisher, or by Shared Image Gallery.
// If specifying an image by ID, only the ID field needs to be set.
// If specifying an image by publisher, the Publisher, Offer, SKU, and Version fields must be set.
// If specifying an image from a Shared Image Gallery, the SubscriptionID, ResourceGroup,
// Gallery, Name, and Version fields must be set.
type Image struct {
Publisher *string `json:"publisher,omitempty"`
Offer *string `json:"offer,omitempty"`
SKU *string `json:"sku,omitempty"`
ID *string `json:"id,omitempty"`
SubscriptionID *string `json:"subscriptionID,omitempty"`
ResourceGroup *string `json:"resourceGroup,omitempty"`
Gallery *string `json:"gallery,omitempty"`
Name *string `json:"name,omitempty"`
Version *string `json:"version,omitempty"`
}
// APIEndpoint represents a reachable Kubernetes API endpoint.
type APIEndpoint struct {
// The hostname on which the API server is serving.
Host string `json:"host"`
// The port on which the API server is serving.
Port int `json:"port"`
}
// VMIdentity defines the identity of the virtual machine, if configured.
type VMIdentity string
// TEMP: OSType describes the OS type of a disk.
type OSType string
var (
// OSTypeLinux
OSTypeLinux = OSType("Linux")
// OSTypeWindows
OSTypeWindows = OSType("Windows")
)
type OSDisk struct {
Name string `json:"name"`
Source string `json:"source"`
OSType OSType `json:"osType"`
DiskSizeGB int32 `json:"diskSizeGB"`
ManagedDisk ManagedDisk `json:"managedDisk"`
}
type ManagedDisk struct {
StorageAccountType string `json:"storageAccountType"`
}
// SubnetSpec configures an AzureStackHCI subnet.
type SubnetSpec struct {
// ID defines a unique identifier to reference this resource.
ID string `json:"id,omitempty"`
// Name defines a name for the subnet resource.
Name string `json:"name"`
// VnetID defines the ID of the virtual network this subnet should be built in.
VnetID string `json:"vnetId"`
// CidrBlock is the CIDR block to be used when the provider creates a managed Vnet.
CidrBlock string `json:"cidrBlock,omitempty"`
// SecurityGroup defines the NSG (network security group) that should be attached to this subnet.
SecurityGroup SecurityGroup `json:"securityGroup"`
}
const (
AnnotationClusterInfrastructureReady = "azurestackhci.cluster.sigs.k8s.io/infrastructure-ready"
ValueReady = "true"
AnnotationControlPlaneReady = "azurestackhci.cluster.sigs.k8s.io/control-plane-ready"
)

Просмотреть файл

@ -0,0 +1,950 @@
// +build !ignore_autogenerated
/*
Copyright 2019 Microsoft and contributors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha2
import (
"k8s.io/api/core/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/cluster-api/errors"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *APIEndpoint) DeepCopyInto(out *APIEndpoint) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIEndpoint.
func (in *APIEndpoint) DeepCopy() *APIEndpoint {
if in == nil {
return nil
}
out := new(APIEndpoint)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AvailabilityZone) DeepCopyInto(out *AvailabilityZone) {
*out = *in
if in.ID != nil {
in, out := &in.ID, &out.ID
*out = new(string)
**out = **in
}
if in.Enabled != nil {
in, out := &in.Enabled, &out.Enabled
*out = new(bool)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvailabilityZone.
func (in *AvailabilityZone) DeepCopy() *AvailabilityZone {
if in == nil {
return nil
}
out := new(AvailabilityZone)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStackHCICluster) DeepCopyInto(out *AzureStackHCICluster) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStackHCICluster.
func (in *AzureStackHCICluster) DeepCopy() *AzureStackHCICluster {
if in == nil {
return nil
}
out := new(AzureStackHCICluster)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *AzureStackHCICluster) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStackHCIClusterList) DeepCopyInto(out *AzureStackHCIClusterList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]AzureStackHCICluster, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStackHCIClusterList.
func (in *AzureStackHCIClusterList) DeepCopy() *AzureStackHCIClusterList {
if in == nil {
return nil
}
out := new(AzureStackHCIClusterList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *AzureStackHCIClusterList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStackHCIClusterSpec) DeepCopyInto(out *AzureStackHCIClusterSpec) {
*out = *in
in.NetworkSpec.DeepCopyInto(&out.NetworkSpec)
if in.LoadBalancerRef != nil {
in, out := &in.LoadBalancerRef, &out.LoadBalancerRef
*out = new(v1.ObjectReference)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStackHCIClusterSpec.
func (in *AzureStackHCIClusterSpec) DeepCopy() *AzureStackHCIClusterSpec {
if in == nil {
return nil
}
out := new(AzureStackHCIClusterSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStackHCIClusterStatus) DeepCopyInto(out *AzureStackHCIClusterStatus) {
*out = *in
in.Network.DeepCopyInto(&out.Network)
in.Bastion.DeepCopyInto(&out.Bastion)
if in.APIEndpoints != nil {
in, out := &in.APIEndpoints, &out.APIEndpoints
*out = make([]APIEndpoint, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStackHCIClusterStatus.
func (in *AzureStackHCIClusterStatus) DeepCopy() *AzureStackHCIClusterStatus {
if in == nil {
return nil
}
out := new(AzureStackHCIClusterStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStackHCIMachine) DeepCopyInto(out *AzureStackHCIMachine) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStackHCIMachine.
func (in *AzureStackHCIMachine) DeepCopy() *AzureStackHCIMachine {
if in == nil {
return nil
}
out := new(AzureStackHCIMachine)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *AzureStackHCIMachine) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStackHCIMachineList) DeepCopyInto(out *AzureStackHCIMachineList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]AzureStackHCIMachine, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStackHCIMachineList.
func (in *AzureStackHCIMachineList) DeepCopy() *AzureStackHCIMachineList {
if in == nil {
return nil
}
out := new(AzureStackHCIMachineList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *AzureStackHCIMachineList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStackHCIMachineProviderCondition) DeepCopyInto(out *AzureStackHCIMachineProviderCondition) {
*out = *in
in.LastProbeTime.DeepCopyInto(&out.LastProbeTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStackHCIMachineProviderCondition.
func (in *AzureStackHCIMachineProviderCondition) DeepCopy() *AzureStackHCIMachineProviderCondition {
if in == nil {
return nil
}
out := new(AzureStackHCIMachineProviderCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStackHCIMachineSpec) DeepCopyInto(out *AzureStackHCIMachineSpec) {
*out = *in
if in.ProviderID != nil {
in, out := &in.ProviderID, &out.ProviderID
*out = new(string)
**out = **in
}
in.AvailabilityZone.DeepCopyInto(&out.AvailabilityZone)
in.Image.DeepCopyInto(&out.Image)
out.OSDisk = in.OSDisk
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStackHCIMachineSpec.
func (in *AzureStackHCIMachineSpec) DeepCopy() *AzureStackHCIMachineSpec {
if in == nil {
return nil
}
out := new(AzureStackHCIMachineSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStackHCIMachineStatus) DeepCopyInto(out *AzureStackHCIMachineStatus) {
*out = *in
if in.Addresses != nil {
in, out := &in.Addresses, &out.Addresses
*out = make([]v1.NodeAddress, len(*in))
copy(*out, *in)
}
if in.VMState != nil {
in, out := &in.VMState, &out.VMState
*out = new(VMState)
**out = **in
}
if in.ErrorReason != nil {
in, out := &in.ErrorReason, &out.ErrorReason
*out = new(errors.MachineStatusError)
**out = **in
}
if in.ErrorMessage != nil {
in, out := &in.ErrorMessage, &out.ErrorMessage
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStackHCIMachineStatus.
func (in *AzureStackHCIMachineStatus) DeepCopy() *AzureStackHCIMachineStatus {
if in == nil {
return nil
}
out := new(AzureStackHCIMachineStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStackHCIMachineTemplate) DeepCopyInto(out *AzureStackHCIMachineTemplate) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStackHCIMachineTemplate.
func (in *AzureStackHCIMachineTemplate) DeepCopy() *AzureStackHCIMachineTemplate {
if in == nil {
return nil
}
out := new(AzureStackHCIMachineTemplate)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *AzureStackHCIMachineTemplate) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStackHCIMachineTemplateList) DeepCopyInto(out *AzureStackHCIMachineTemplateList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]AzureStackHCIMachineTemplate, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStackHCIMachineTemplateList.
func (in *AzureStackHCIMachineTemplateList) DeepCopy() *AzureStackHCIMachineTemplateList {
if in == nil {
return nil
}
out := new(AzureStackHCIMachineTemplateList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *AzureStackHCIMachineTemplateList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStackHCIMachineTemplateResource) DeepCopyInto(out *AzureStackHCIMachineTemplateResource) {
*out = *in
in.Spec.DeepCopyInto(&out.Spec)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStackHCIMachineTemplateResource.
func (in *AzureStackHCIMachineTemplateResource) DeepCopy() *AzureStackHCIMachineTemplateResource {
if in == nil {
return nil
}
out := new(AzureStackHCIMachineTemplateResource)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStackHCIMachineTemplateSpec) DeepCopyInto(out *AzureStackHCIMachineTemplateSpec) {
*out = *in
in.Template.DeepCopyInto(&out.Template)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStackHCIMachineTemplateSpec.
func (in *AzureStackHCIMachineTemplateSpec) DeepCopy() *AzureStackHCIMachineTemplateSpec {
if in == nil {
return nil
}
out := new(AzureStackHCIMachineTemplateSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStackHCIResourceReference) DeepCopyInto(out *AzureStackHCIResourceReference) {
*out = *in
if in.ID != nil {
in, out := &in.ID, &out.ID
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStackHCIResourceReference.
func (in *AzureStackHCIResourceReference) DeepCopy() *AzureStackHCIResourceReference {
if in == nil {
return nil
}
out := new(AzureStackHCIResourceReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStackHCIVirtualMachine) DeepCopyInto(out *AzureStackHCIVirtualMachine) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStackHCIVirtualMachine.
func (in *AzureStackHCIVirtualMachine) DeepCopy() *AzureStackHCIVirtualMachine {
if in == nil {
return nil
}
out := new(AzureStackHCIVirtualMachine)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *AzureStackHCIVirtualMachine) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStackHCIVirtualMachineList) DeepCopyInto(out *AzureStackHCIVirtualMachineList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]AzureStackHCIVirtualMachine, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStackHCIVirtualMachineList.
func (in *AzureStackHCIVirtualMachineList) DeepCopy() *AzureStackHCIVirtualMachineList {
if in == nil {
return nil
}
out := new(AzureStackHCIVirtualMachineList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *AzureStackHCIVirtualMachineList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStackHCIVirtualMachineSpec) DeepCopyInto(out *AzureStackHCIVirtualMachineSpec) {
*out = *in
in.AvailabilityZone.DeepCopyInto(&out.AvailabilityZone)
in.Image.DeepCopyInto(&out.Image)
out.OSDisk = in.OSDisk
if in.BootstrapData != nil {
in, out := &in.BootstrapData, &out.BootstrapData
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStackHCIVirtualMachineSpec.
func (in *AzureStackHCIVirtualMachineSpec) DeepCopy() *AzureStackHCIVirtualMachineSpec {
if in == nil {
return nil
}
out := new(AzureStackHCIVirtualMachineSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AzureStackHCIVirtualMachineStatus) DeepCopyInto(out *AzureStackHCIVirtualMachineStatus) {
*out = *in
if in.Addresses != nil {
in, out := &in.Addresses, &out.Addresses
*out = make([]v1.NodeAddress, len(*in))
copy(*out, *in)
}
if in.VMState != nil {
in, out := &in.VMState, &out.VMState
*out = new(VMState)
**out = **in
}
if in.ErrorReason != nil {
in, out := &in.ErrorReason, &out.ErrorReason
*out = new(errors.MachineStatusError)
**out = **in
}
if in.ErrorMessage != nil {
in, out := &in.ErrorMessage, &out.ErrorMessage
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureStackHCIVirtualMachineStatus.
func (in *AzureStackHCIVirtualMachineStatus) DeepCopy() *AzureStackHCIVirtualMachineStatus {
if in == nil {
return nil
}
out := new(AzureStackHCIVirtualMachineStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Image) DeepCopyInto(out *Image) {
*out = *in
if in.Publisher != nil {
in, out := &in.Publisher, &out.Publisher
*out = new(string)
**out = **in
}
if in.Offer != nil {
in, out := &in.Offer, &out.Offer
*out = new(string)
**out = **in
}
if in.SKU != nil {
in, out := &in.SKU, &out.SKU
*out = new(string)
**out = **in
}
if in.ID != nil {
in, out := &in.ID, &out.ID
*out = new(string)
**out = **in
}
if in.SubscriptionID != nil {
in, out := &in.SubscriptionID, &out.SubscriptionID
*out = new(string)
**out = **in
}
if in.ResourceGroup != nil {
in, out := &in.ResourceGroup, &out.ResourceGroup
*out = new(string)
**out = **in
}
if in.Gallery != nil {
in, out := &in.Gallery, &out.Gallery
*out = new(string)
**out = **in
}
if in.Name != nil {
in, out := &in.Name, &out.Name
*out = new(string)
**out = **in
}
if in.Version != nil {
in, out := &in.Version, &out.Version
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image.
func (in *Image) DeepCopy() *Image {
if in == nil {
return nil
}
out := new(Image)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *IngressRule) DeepCopyInto(out *IngressRule) {
*out = *in
if in.SourcePorts != nil {
in, out := &in.SourcePorts, &out.SourcePorts
*out = new(string)
**out = **in
}
if in.DestinationPorts != nil {
in, out := &in.DestinationPorts, &out.DestinationPorts
*out = new(string)
**out = **in
}
if in.Source != nil {
in, out := &in.Source, &out.Source
*out = new(string)
**out = **in
}
if in.Destination != nil {
in, out := &in.Destination, &out.Destination
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule.
func (in *IngressRule) DeepCopy() *IngressRule {
if in == nil {
return nil
}
out := new(IngressRule)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in IngressRules) DeepCopyInto(out *IngressRules) {
{
in := &in
*out = make(IngressRules, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(IngressRule)
(*in).DeepCopyInto(*out)
}
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRules.
func (in IngressRules) DeepCopy() IngressRules {
if in == nil {
return nil
}
out := new(IngressRules)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoadBalancer) DeepCopyInto(out *LoadBalancer) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancer.
func (in *LoadBalancer) DeepCopy() *LoadBalancer {
if in == nil {
return nil
}
out := new(LoadBalancer)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *LoadBalancer) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoadBalancerList) DeepCopyInto(out *LoadBalancerList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]LoadBalancer, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerList.
func (in *LoadBalancerList) DeepCopy() *LoadBalancerList {
if in == nil {
return nil
}
out := new(LoadBalancerList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *LoadBalancerList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoadBalancerSpec) DeepCopyInto(out *LoadBalancerSpec) {
*out = *in
in.AvailabilityZone.DeepCopyInto(&out.AvailabilityZone)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerSpec.
func (in *LoadBalancerSpec) DeepCopy() *LoadBalancerSpec {
if in == nil {
return nil
}
out := new(LoadBalancerSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) {
*out = *in
if in.VMState != nil {
in, out := &in.VMState, &out.VMState
*out = new(VMState)
**out = **in
}
if in.ErrorReason != nil {
in, out := &in.ErrorReason, &out.ErrorReason
*out = new(errors.MachineStatusError)
**out = **in
}
if in.ErrorMessage != nil {
in, out := &in.ErrorMessage, &out.ErrorMessage
*out = new(string)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus.
func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus {
if in == nil {
return nil
}
out := new(LoadBalancerStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ManagedDisk) DeepCopyInto(out *ManagedDisk) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedDisk.
func (in *ManagedDisk) DeepCopy() *ManagedDisk {
if in == nil {
return nil
}
out := new(ManagedDisk)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Network) DeepCopyInto(out *Network) {
*out = *in
if in.SecurityGroups != nil {
in, out := &in.SecurityGroups, &out.SecurityGroups
*out = make(map[SecurityGroupRole]SecurityGroup, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
out.APIServerIP = in.APIServerIP
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Network.
func (in *Network) DeepCopy() *Network {
if in == nil {
return nil
}
out := new(Network)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) {
*out = *in
out.Vnet = in.Vnet
if in.Subnets != nil {
in, out := &in.Subnets, &out.Subnets
*out = make(Subnets, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(SubnetSpec)
(*in).DeepCopyInto(*out)
}
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec.
func (in *NetworkSpec) DeepCopy() *NetworkSpec {
if in == nil {
return nil
}
out := new(NetworkSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OSDisk) DeepCopyInto(out *OSDisk) {
*out = *in
out.ManagedDisk = in.ManagedDisk
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDisk.
func (in *OSDisk) DeepCopy() *OSDisk {
if in == nil {
return nil
}
out := new(OSDisk)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PublicIP) DeepCopyInto(out *PublicIP) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublicIP.
func (in *PublicIP) DeepCopy() *PublicIP {
if in == nil {
return nil
}
out := new(PublicIP)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecurityGroup) DeepCopyInto(out *SecurityGroup) {
*out = *in
if in.IngressRules != nil {
in, out := &in.IngressRules, &out.IngressRules
*out = make(IngressRules, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(IngressRule)
(*in).DeepCopyInto(*out)
}
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroup.
func (in *SecurityGroup) DeepCopy() *SecurityGroup {
if in == nil {
return nil
}
out := new(SecurityGroup)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SubnetSpec) DeepCopyInto(out *SubnetSpec) {
*out = *in
in.SecurityGroup.DeepCopyInto(&out.SecurityGroup)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetSpec.
func (in *SubnetSpec) DeepCopy() *SubnetSpec {
if in == nil {
return nil
}
out := new(SubnetSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in Subnets) DeepCopyInto(out *Subnets) {
{
in := &in
*out = make(Subnets, len(*in))
for i := range *in {
if (*in)[i] != nil {
in, out := &(*in)[i], &(*out)[i]
*out = new(SubnetSpec)
(*in).DeepCopyInto(*out)
}
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subnets.
func (in Subnets) DeepCopy() Subnets {
if in == nil {
return nil
}
out := new(Subnets)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VM) DeepCopyInto(out *VM) {
*out = *in
in.Image.DeepCopyInto(&out.Image)
out.OSDisk = in.OSDisk
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VM.
func (in *VM) DeepCopy() *VM {
if in == nil {
return nil
}
out := new(VM)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VnetSpec) DeepCopyInto(out *VnetSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VnetSpec.
func (in *VnetSpec) DeepCopy() *VnetSpec {
if in == nil {
return nil
}
out := new(VnetSpec)
in.DeepCopyInto(out)
return out
}

Просмотреть файл

@ -0,0 +1,65 @@
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
package windows
type Images struct {
Pause string
Nanoserver string
ServerCore string
}
type Cri struct {
Name string
Images Images
}
type CniSource struct {
Name string
Url string
}
type Plugin struct {
Name string
}
type Cni struct {
Name string
Source CniSource
Plugin Plugin
InterfaceName string
}
type KubernetesSource struct {
Release string
Url string
}
type ControlPlane struct {
IpAddress string
Username string
KubeadmToken string
KubeadmCAHash string
}
type KubeProxy struct {
Gates string
}
type Network struct {
ServiceCidr string
ClusterCidr string
}
type Kubernetes struct {
Source KubernetesSource
ControlPlane ControlPlane
KubeProxy KubeProxy
Network Network
}
type Install struct {
Destination string
}
type KubeCluster struct {
Cri Cri
Cni Cni
Kubernetes Kubernetes
Install Install
}

58
azure-pipelines.yml Normal file
Просмотреть файл

@ -0,0 +1,58 @@
# Starter pipeline
# Start with a minimal pipeline that you can customize to build and deploy your code.
# Add steps that build, run tests, deploy, and more:
# https://aka.ms/yaml
trigger:
- master
variables:
GOPATH: '$(Agent.BuildDirectory)/gopath' # Go workspace path
GOROOT: '$(Agent.BuildDirectory)/go' # Go installation path
GOBIN: '$(GOPATH)/bin' # Go binaries path
GO111MODULE: 'on'
modulePath: '$(system.defaultWorkingDirectory)'
pool:
vmImage: 'ubuntu-latest'
steps:
- task: InstallSSHKey@0
inputs:
knownHostsEntry: '$(KNOWN_HOST)'
sshPublicKey: '$(SSH_PUBLIC_KEY)'
sshKeySecureFile: 'azure-pipelines-ssh-key'
- task: DockerInstaller@0
inputs:
dockerVersion: '17.09.0-ce'
- task: Docker@2
displayName: Login to ACR
inputs:
command: login
containerRegistry: mocimages-connection
- task: GoTool@0
inputs:
version: '1.13.5'
- script: |
mkdir -p '$(GOBIN)'
mkdir -p '$(GOPATH)/pkg'
mkdir -p '$(modulePath)'
mkdir -p '$(GOROOT)'
shopt -s extglob
shopt -s dotglob
mv !(gopath) '$(modulePath)'
echo '##vso[task.prependpath]$(GOBIN)'
echo '##vso[task.prependpath]$(GOROOT)/bin'
displayName: 'Set up the Go workspace'
- script: |
git config --global url.ssh://git@github.com/.insteadOf https://github.com/
make IMG=mocimages.azurecr.io/caphcontroller:latest docker-build docker-push
chmod +x hack/for-pipeline.sh
hack/for-pipeline.sh
workingDirectory: '$(modulePath)'
displayName: 'Build'
- publish: $(modulePath)/bin
artifact: binaries

33
cloud/converters/vm.go Normal file
Просмотреть файл

@ -0,0 +1,33 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package converters
import (
"github.com/Azure/go-autorest/autorest/to"
infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1alpha2"
"github.com/microsoft/moc-sdk-for-go/services/compute"
)
// SDKToVM converts an SDK VirtualMachine to the provider VM type.
func SDKToVM(v compute.VirtualMachine) (*infrav1.VM, error) {
vm := &infrav1.VM{
ID: to.String(v.ID),
Name: to.String(v.Name),
State: infrav1.VMStateSucceeded, // Hard-coded for now until we expose provisioning state
}
return vm, nil
}

151
cloud/defaults.go Normal file
Просмотреть файл

@ -0,0 +1,151 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azurestackhci
import (
"fmt"
"github.com/blang/semver"
infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1alpha2"
"github.com/pkg/errors"
"k8s.io/utils/pointer"
)
const (
// DefaultUserName is the default username for created vm
DefaultUserName = "clouduser"
// DefaultVnetCIDR is the default Vnet CIDR
DefaultVnetCIDR = "10.0.0.0/8"
// DefaultVnetRouteDestinationPrefix is the destination prefix of the default Vnet route
DefaultVnetRouteDestinationPrefix = "0.0.0.0/0"
// DefaultVnetRouteNextHop is the next hop of the default Vnet route
DefaultVnetRouteNextHop = "10.0.0.1"
// DefaultControlPlaneSubnetCIDR is the default Control Plane Subnet CIDR
DefaultControlPlaneSubnetCIDR = "10.0.0.0/16"
// DefaultNodeSubnetCIDR is the default Node Subnet CIDR
DefaultNodeSubnetCIDR = "10.1.0.0/16"
// DefaultInternalLBIPAddress is the default internal load balancer ip address
DefaultInternalLBIPAddress = "10.0.0.100"
// DefaultAzureStackHCIDNSZone is the default provided azurestackhci dns zone
DefaultAzureStackHCIDNSZone = "cloudapp.azurestackhci.com"
// UserAgent used for communicating with azurestackhci
UserAgent = "cluster-api-azurestackhci-services"
)
const (
// DefaultImageOfferID is the default image offer ID
DefaultImageOfferID = "linux"
// DefaultImageSKU is the default image SKU
DefaultImageSKU = "linux"
// DefaultImagePublisherID is the default publisher ID
DefaultImagePublisherID = "na"
// LatestVersion is the image version latest
LatestVersion = "latest"
)
// SupportedAvailabilityZoneLocations is a slice of the locations where Availability Zones are supported.
// This is used to validate whether a virtual machine should leverage an Availability Zone.
// Based on the Availability Zones listed in https://docs.microsoft.com/en-us/azure/availability-zones/az-overview
var SupportedAvailabilityZoneLocations = []string{
// Americas
"centralus",
"eastus",
"eastus2",
"westus2",
// Europe
"francecentral",
"northeurope",
"uksouth",
"westeurope",
// Asia Pacific
"japaneast",
"southeastasia",
}
// GenerateVnetName generates a virtual network name, based on the cluster name.
func GenerateVnetName(clusterName string) string {
return fmt.Sprintf("%s-%s", clusterName, "vnet")
}
// GenerateControlPlaneSecurityGroupName generates a control plane security group name, based on the cluster name.
func GenerateControlPlaneSecurityGroupName(clusterName string) string {
return fmt.Sprintf("%s-%s", clusterName, "controlplane-nsg")
}
// GenerateNodeSecurityGroupName generates a node security group name, based on the cluster name.
func GenerateNodeSecurityGroupName(clusterName string) string {
return fmt.Sprintf("%s-%s", clusterName, "node-nsg")
}
// GenerateNodeRouteTableName generates a node route table name, based on the cluster name.
func GenerateNodeRouteTableName(clusterName string) string {
return fmt.Sprintf("%s-%s", clusterName, "node-routetable")
}
// GenerateControlPlaneSubnetName generates a node subnet name, based on the cluster name.
func GenerateControlPlaneSubnetName(clusterName string) string {
return fmt.Sprintf("%s-%s", clusterName, "controlplane-subnet")
}
// GenerateNodeSubnetName generates a node subnet name, based on the cluster name.
func GenerateNodeSubnetName(clusterName string) string {
return fmt.Sprintf("%s-%s", clusterName, "node-subnet")
}
// GenerateFQDN generates a fully qualified domain name, based on the public IP name and cluster location.
func GenerateFQDN(publicIPName, location string) string {
return fmt.Sprintf("%s.%s.%s", publicIPName, location, DefaultAzureStackHCIDNSZone)
}
// GenerateNICName generates the name of a network interface based on the name of a VM.
func GenerateNICName(machineName string) string {
return fmt.Sprintf("%s-nic", machineName)
}
// GenerateOSDiskName generates the name of an OS disk based on the name of a VM.
func GenerateOSDiskName(machineName string) string {
return fmt.Sprintf("%s_OSDisk", machineName)
}
// GetDefaultImageName gets the name of the image to use for the provided version of Kubernetes.
func getDefaultImageName(k8sVersion string) (string, error) {
version, err := semver.ParseTolerant(k8sVersion)
if err != nil {
return "", errors.Wrapf(err, "unable to parse Kubernetes version \"%s\" in spec, expected valid SemVer string", k8sVersion)
}
return fmt.Sprintf("linux_k8s_%d-%d-%d", version.Major, version.Minor, version.Patch), nil
}
// GetDefaultLinuxImage returns the default image spec for Linux.
func GetDefaultLinuxImage(k8sVersion string) (*infrav1.Image, error) {
imageName, err := getDefaultImageName(k8sVersion)
if err != nil {
return nil, errors.Wrapf(err, "failed to get default image name")
}
defaultImage := &infrav1.Image{
Name: &imageName,
Publisher: pointer.StringPtr(DefaultImagePublisherID),
Offer: pointer.StringPtr(DefaultImageOfferID),
SKU: pointer.StringPtr(DefaultImageSKU),
Version: pointer.StringPtr(LatestVersion),
}
return defaultImage, nil
}

38
cloud/errors.go Normal file
Просмотреть файл

@ -0,0 +1,38 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azurestackhci
import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// ResourceNotFound parses the error to check if its a resource not found
func ResourceNotFound(err error) bool {
if e, ok := status.FromError(err); ok && e.Code() == codes.NotFound {
return true
}
return false
}
// ResourceAlreadyExists parses the error to check if its a resource already exists
func ResourceAlreadyExists(err error) bool {
if e, ok := status.FromError(err); ok && e.Code() == codes.AlreadyExists {
return true
}
return false
}

37
cloud/interfaces.go Normal file
Просмотреть файл

@ -0,0 +1,37 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azurestackhci
import (
"context"
)
// Service is a generic interface used by components offering a type of service.
// Example: virtualnetworks service would offer Reconcile/Delete methods.
type Service interface {
Reconcile(ctx context.Context, spec interface{}) error
Delete(ctx context.Context, spec interface{}) error
}
// GetterService is a temporary interface used by components which still require Get methods.
// Once all components move to storing provider information within the relevant
// Cluster/Machine specs, this interface should be removed.
type GetterService interface {
Get(ctx context.Context, spec interface{}) (interface{}, error)
Reconcile(ctx context.Context, spec interface{}) error
Delete(ctx context.Context, spec interface{}) error
}

27
cloud/scope/clients.go Normal file
Просмотреть файл

@ -0,0 +1,27 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scope
import (
"github.com/microsoft/moc/pkg/auth"
)
// AzureStackHCIClients contains all the AzureStackHCI clients used by the scopes.
type AzureStackHCIClients struct {
CloudAgentFqdn string
Authorizer auth.Authorizer
}

326
cloud/scope/cluster.go Normal file
Просмотреть файл

@ -0,0 +1,326 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scope
import (
"context"
"os"
"github.com/go-logr/logr"
infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1alpha2"
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/moc-sdk-for-go/services/security"
"github.com/microsoft/moc-sdk-for-go/services/security/authentication"
"github.com/microsoft/moc/pkg/auth"
"github.com/microsoft/moc/pkg/config"
"github.com/microsoft/moc/pkg/marshal"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/klogr"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
AzureStackHCILoginCreds = "azurestackhcilogintoken"
AzureStackHCICreds = "cloudconfig"
AzureStackHCIAccessTokenFieldName = "value"
)
// ClusterScopeParams defines the input parameters used to create a new Scope.
type ClusterScopeParams struct {
AzureStackHCIClients
Client client.Client
Logger logr.Logger
Cluster *clusterv1.Cluster
AzureStackHCICluster *infrav1.AzureStackHCICluster
Context context.Context
}
// NewClusterScope creates a new Scope from the supplied parameters.
// This is meant to be called for each reconcile iteration.
func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) {
if params.Cluster == nil {
return nil, errors.New("failed to generate new scope from nil Cluster")
}
if params.AzureStackHCICluster == nil {
return nil, errors.New("failed to generate new scope from nil AzureStackHCICluster")
}
if params.Logger == nil {
params.Logger = klogr.New()
}
agentFqdn := os.Getenv("CLOUDAGENT_FQDN")
if agentFqdn == "" {
return nil, errors.New("error creating azurestackhci services. Environment variable CLOUDAGENT_FQDN is not set")
}
params.AzureStackHCIClients.CloudAgentFqdn = agentFqdn
helper, err := patch.NewHelper(params.AzureStackHCICluster, params.Client)
if err != nil {
return nil, errors.Wrap(err, "failed to init patch helper")
}
scope := &ClusterScope{
Logger: params.Logger,
Client: params.Client,
AzureStackHCIClients: params.AzureStackHCIClients,
Cluster: params.Cluster,
AzureStackHCICluster: params.AzureStackHCICluster,
patchHelper: helper,
Context: context.Background(),
}
// This is temp. Will be moved to the CloudController in the future
err = scope.ReconcileAzureStackHCIAccess()
if err != nil {
return nil, errors.Wrap(err, "error creating azurestackhci services. can not authenticate to azurestackhci")
}
return scope, nil
}
// ClusterScope defines the basic context for an actuator to operate upon.
type ClusterScope struct {
logr.Logger
Client client.Client
patchHelper *patch.Helper
AzureStackHCIClients
Cluster *clusterv1.Cluster
AzureStackHCICluster *infrav1.AzureStackHCICluster
Context context.Context
}
// GetResourceGroup allows ClusterScope to fulfill ScopeInterface and thus to be used by the cloud services.
func (s *ClusterScope) GetResourceGroup() string {
return s.AzureStackHCICluster.Spec.ResourceGroup
}
// GetCloudAgentFqdn returns the cloud agent fqdn string.
func (s *ClusterScope) GetCloudAgentFqdn() string {
return s.CloudAgentFqdn
}
// GetAuthorizer is a getter for the environment generated authorizer.
func (s *ClusterScope) GetAuthorizer() auth.Authorizer {
return s.Authorizer
}
// Network returns the cluster network object.
func (s *ClusterScope) Network() *infrav1.Network {
return &s.AzureStackHCICluster.Status.Network
}
// Vnet returns the cluster Vnet.
func (s *ClusterScope) Vnet() *infrav1.VnetSpec {
return &s.AzureStackHCICluster.Spec.NetworkSpec.Vnet
}
// Subnets returns the cluster subnets.
func (s *ClusterScope) Subnets() infrav1.Subnets {
return s.AzureStackHCICluster.Spec.NetworkSpec.Subnets
}
// SecurityGroups returns the cluster security groups as a map, it creates the map if empty.
func (s *ClusterScope) SecurityGroups() map[infrav1.SecurityGroupRole]infrav1.SecurityGroup {
return s.AzureStackHCICluster.Status.Network.SecurityGroups
}
// Name returns the cluster name.
func (s *ClusterScope) Name() string {
return s.Cluster.Name
}
// Namespace returns the cluster namespace.
func (s *ClusterScope) Namespace() string {
return s.Cluster.Namespace
}
func (s *ClusterScope) APIVersion() string {
return s.Cluster.APIVersion
}
func (s *ClusterScope) Kind() string {
return s.Cluster.Kind
}
func (s *ClusterScope) UID() types.UID {
return s.Cluster.UID
}
// Location returns the cluster location.
func (s *ClusterScope) Location() string {
return s.AzureStackHCICluster.Spec.Location
}
// ListOptionsLabelSelector returns a ListOptions with a label selector for clusterName.
func (s *ClusterScope) ListOptionsLabelSelector() client.ListOption {
return client.MatchingLabels(map[string]string{
clusterv1.MachineClusterLabelName: s.Cluster.Name,
})
}
// Close closes the current scope persisting the cluster configuration and status.
func (s *ClusterScope) Close() error {
return s.patchHelper.Patch(context.TODO(), s.AzureStackHCICluster)
}
// APIServerPort returns the APIServerPort to use when creating the load balancer.
func (s *ClusterScope) APIServerPort() int32 {
if s.Cluster.Spec.ClusterNetwork != nil && s.Cluster.Spec.ClusterNetwork.APIServerPort != nil {
return *s.Cluster.Spec.ClusterNetwork.APIServerPort
}
return 6443
}
func (s *ClusterScope) LoadBalancerRef() *corev1.ObjectReference {
return s.AzureStackHCICluster.Spec.LoadBalancerRef
}
// This is temp. Will be moved to the CloudController in the future
func (s *ClusterScope) ReconcileAzureStackHCIAccess() error {
s.Logger.Info("reconciling azurestackhci access")
secretAccess, err := s.GetSecret(AzureStackHCICreds)
if err == nil {
// Already have the AccessFile.
data, ok := secretAccess.Data[AzureStackHCIAccessTokenFieldName]
if !ok {
return errors.New("error: could not parse kubernetes secret")
}
azurestackhciObject := auth.WssdConfig{}
err := marshal.FromJSON(string(data), &azurestackhciObject)
if err != nil {
return errors.Wrap(err, "error: could not parse kubernetes secret JSON")
}
serverPem, tlsCert, err := auth.AccessFileToTls(azurestackhciObject)
if err != nil {
return errors.Wrap(err, "error: could not parse accessfile")
}
authorizer, err := auth.NewAuthorizerFromInput(tlsCert, serverPem, s.AzureStackHCIClients.CloudAgentFqdn)
if err != nil {
return errors.Wrap(err, "error: new authorizer failed")
}
s.AzureStackHCIClients.Authorizer = authorizer
return nil
}
secret, err := s.GetSecret(AzureStackHCILoginCreds)
if err != nil {
authorizer, err := auth.NewAuthorizerFromEnvironment(s.AzureStackHCIClients.CloudAgentFqdn)
if err != nil {
return errors.Wrap(err, "failed to create azurestackhci session")
}
s.AzureStackHCIClients.Authorizer = authorizer
return nil
}
s.Logger.Info("recieved azurestackhcilogintoken from the cluster")
data, ok := secret.Data[AzureStackHCIAccessTokenFieldName]
if !ok {
return errors.New("error: could not parse kubernetes secret")
}
loginconfig := auth.LoginConfig{}
err = config.LoadYAMLConfig(string(data), &loginconfig)
if err != nil {
return errors.Wrap(err, "failed to create azurestackhci session: parse yaml login config failed")
}
authForAuth, err := auth.NewAuthorizerForAuth(loginconfig.Token, loginconfig.Certificate, s.AzureStackHCIClients.CloudAgentFqdn)
if err != nil {
return err
}
authenticationClient, err := authentication.NewAuthenticationClient(s.AzureStackHCIClients.CloudAgentFqdn, authForAuth)
if err != nil {
return err
}
clientCert, accessFile, err := auth.GenerateClientKey(loginconfig)
if err != nil {
return err
}
id := security.Identity{
Name: &loginconfig.Name,
Certificate: &clientCert,
}
_, err = authenticationClient.Login(s.Context, "", &id)
if err != nil && !azurestackhci.ResourceAlreadyExists(err) {
return errors.Wrap(err, "failed to create azurestackhci session: login failed")
}
if !azurestackhci.ResourceAlreadyExists(err) {
str, err := marshal.ToJSON(accessFile)
if err != nil {
return err
}
s.CreateSecret(AzureStackHCICreds, []byte(str))
}
serverPem, tlsCert, err := auth.AccessFileToTls(accessFile)
if err != nil {
return err
}
authorizer, err := auth.NewAuthorizerFromInput(tlsCert, serverPem, s.AzureStackHCIClients.CloudAgentFqdn)
if err != nil {
return err
}
s.AzureStackHCIClients.Authorizer = authorizer
return nil
}
func (s *ClusterScope) GetSecret(name string) (*corev1.Secret, error) {
secret := &corev1.Secret{}
secretKey := client.ObjectKey{
Namespace: util.GetNamespaceOrDefault(s.Cluster.Namespace),
Name: name,
}
if err := s.Client.Get(s.Context, secretKey, secret); err != nil {
return nil, errors.Wrapf(err, "kubernetes secret query for azurestackhci access token failed")
}
return secret, nil
}
func (s *ClusterScope) CreateSecret(name string, data []byte) (*corev1.Secret, error) {
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: util.GetNamespaceOrDefault(s.Cluster.Namespace),
Name: name,
},
Data: map[string][]byte{
AzureStackHCIAccessTokenFieldName: data,
},
}
if err := s.Client.Create(s.Context, secret); err != nil {
return nil, errors.Wrapf(err, "kubernetes secret query for azurestackhci access token failed")
}
return secret, nil
}

42
cloud/scope/getters.go Normal file
Просмотреть файл

@ -0,0 +1,42 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scope
var (
DefaultClusterScopeGetter ClusterScopeGetter = ClusterScopeGetterFunc(NewClusterScope)
DefaultMachineScopeGetter MachineScopeGetter = MachineScopeGetterFunc(NewMachineScope)
)
type ClusterScopeGetter interface {
ClusterScope(params ClusterScopeParams) (*ClusterScope, error)
}
type ClusterScopeGetterFunc func(params ClusterScopeParams) (*ClusterScope, error)
func (f ClusterScopeGetterFunc) ClusterScope(params ClusterScopeParams) (*ClusterScope, error) {
return f(params)
}
type MachineScopeGetter interface {
MachineScope(params MachineScopeParams) (*MachineScope, error)
}
type MachineScopeGetterFunc func(params MachineScopeParams) (*MachineScope, error)
func (f MachineScopeGetterFunc) MachineScope(params MachineScopeParams) (*MachineScope, error) {
return f(params)
}

136
cloud/scope/loadbalancer.go Normal file
Просмотреть файл

@ -0,0 +1,136 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scope
import (
"context"
"github.com/go-logr/logr"
infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1alpha2"
"github.com/pkg/errors"
"k8s.io/klog/klogr"
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// LoadBalancerScopeParams defines the input parameters used to create a new LoadBalancerScope.
type LoadBalancerScopeParams struct {
Client client.Client
Logger logr.Logger
LoadBalancer *infrav1.LoadBalancer
Cluster *clusterv1.Cluster
AzureStackHCICluster *infrav1.AzureStackHCICluster
}
// NewLoadBalancerScope creates a new LoadBalancerScope from the supplied parameters.
// This is meant to be called for each reconcile iteration.
func NewLoadBalancerScope(params LoadBalancerScopeParams) (*LoadBalancerScope, error) {
if params.Client == nil {
return nil, errors.New("client is required when creating a LoadBalancerScope")
}
if params.LoadBalancer == nil {
return nil, errors.New("load balancer is required when creating a LoadBalancerScope")
}
if params.Logger == nil {
params.Logger = klogr.New()
}
helper, err := patch.NewHelper(params.LoadBalancer, params.Client)
if err != nil {
return nil, errors.Wrap(err, "failed to init patch helper")
}
return &LoadBalancerScope{
client: params.Client,
LoadBalancer: params.LoadBalancer,
Cluster: params.Cluster,
AzureStackHCICluster: params.AzureStackHCICluster,
Logger: params.Logger,
patchHelper: helper,
Context: context.Background(),
}, nil
}
// LoadBalancerScope defines a scope defined around a machine.
type LoadBalancerScope struct {
logr.Logger
client client.Client
patchHelper *patch.Helper
Context context.Context
LoadBalancer *infrav1.LoadBalancer
Cluster *clusterv1.Cluster
AzureStackHCICluster *infrav1.AzureStackHCICluster
}
// Name returns the Name of the load balancer.
func (l *LoadBalancerScope) Name() string {
return l.LoadBalancer.Name
}
// Address returns the address of the load balancer, if it exists.
func (l *LoadBalancerScope) Address() string {
return l.LoadBalancer.Status.Address
}
// SetAnnotation sets a key value annotation on the LoadBalancer.
func (l *LoadBalancerScope) SetAnnotation(key, value string) {
if l.LoadBalancer.Annotations == nil {
l.LoadBalancer.Annotations = map[string]string{}
}
l.LoadBalancer.Annotations[key] = value
}
// Close the LoadBalancerScope by updating the loadBalancer spec and status.
func (l *LoadBalancerScope) Close() error {
return l.patchHelper.Patch(l.Context, l.LoadBalancer)
}
// SetReady sets the LoadBalancer Ready Status
func (l *LoadBalancerScope) SetReady() {
l.LoadBalancer.Status.Ready = true
}
// GetVMState returns the LoadBalancer VM state.
func (l *LoadBalancerScope) GetVMState() *infrav1.VMState {
return l.LoadBalancer.Status.VMState
}
// SetVMState sets the LoadBalancer VM state.
func (l *LoadBalancerScope) SetVMState(v *infrav1.VMState) {
l.LoadBalancer.Status.VMState = new(infrav1.VMState)
*l.LoadBalancer.Status.VMState = *v
}
// SetErrorMessage sets the LoadBalancer status error message.
func (l *LoadBalancerScope) SetErrorMessage(v error) {
l.LoadBalancer.Status.ErrorMessage = pointer.StringPtr(v.Error())
}
// SetErrorReason sets the LoadBalancer status error reason.
func (l *LoadBalancerScope) SetErrorReason(v capierrors.MachineStatusError) {
l.LoadBalancer.Status.ErrorReason = &v
}
// SetLoadBalancerAddress sets the Address field of the Load Balancer Status.
func (l *LoadBalancerScope) SetAddress(address string) {
l.LoadBalancer.Status.Address = address
}

188
cloud/scope/machine.go Normal file
Просмотреть файл

@ -0,0 +1,188 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scope
import (
"context"
"github.com/go-logr/logr"
infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1alpha2"
"github.com/pkg/errors"
"k8s.io/klog/klogr"
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2"
"sigs.k8s.io/cluster-api/controllers/noderefutil"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// MachineScopeParams defines the input parameters used to create a new MachineScope.
type MachineScopeParams struct {
AzureStackHCIClients
Client client.Client
Logger logr.Logger
Cluster *clusterv1.Cluster
Machine *clusterv1.Machine
AzureStackHCICluster *infrav1.AzureStackHCICluster
AzureStackHCIMachine *infrav1.AzureStackHCIMachine
}
// NewMachineScope creates a new MachineScope from the supplied parameters.
// This is meant to be called for each reconcile iteration.
func NewMachineScope(params MachineScopeParams) (*MachineScope, error) {
if params.Client == nil {
return nil, errors.New("client is required when creating a MachineScope")
}
if params.Machine == nil {
return nil, errors.New("machine is required when creating a MachineScope")
}
if params.Cluster == nil {
return nil, errors.New("cluster is required when creating a MachineScope")
}
if params.AzureStackHCICluster == nil {
return nil, errors.New("azurestackhci cluster is required when creating a MachineScope")
}
if params.AzureStackHCIMachine == nil {
return nil, errors.New("azurestackhci machine is required when creating a MachineScope")
}
if params.Logger == nil {
params.Logger = klogr.New()
}
helper, err := patch.NewHelper(params.AzureStackHCIMachine, params.Client)
if err != nil {
return nil, errors.Wrap(err, "failed to init patch helper")
}
return &MachineScope{
client: params.Client,
Cluster: params.Cluster,
Machine: params.Machine,
AzureStackHCICluster: params.AzureStackHCICluster,
AzureStackHCIMachine: params.AzureStackHCIMachine,
Logger: params.Logger,
patchHelper: helper,
}, nil
}
// MachineScope defines a scope defined around a machine and its cluster.
type MachineScope struct {
logr.Logger
client client.Client
patchHelper *patch.Helper
Cluster *clusterv1.Cluster
Machine *clusterv1.Machine
AzureStackHCICluster *infrav1.AzureStackHCICluster
AzureStackHCIMachine *infrav1.AzureStackHCIMachine
}
// Location returns the AzureStackHCIMachine location.
func (m *MachineScope) Location() string {
return m.AzureStackHCICluster.Spec.Location
}
// AvailabilityZone returns the AzureStackHCIMachine Availability Zone.
func (m *MachineScope) AvailabilityZone() string {
return *m.AzureStackHCIMachine.Spec.AvailabilityZone.ID
}
// Name returns the AzureStackHCIMachine name.
func (m *MachineScope) Name() string {
return m.AzureStackHCIMachine.Name
}
// Namespace returns the namespace name.
func (m *MachineScope) Namespace() string {
return m.AzureStackHCIMachine.Namespace
}
// IsControlPlane returns true if the machine is a control plane.
func (m *MachineScope) IsControlPlane() bool {
return util.IsControlPlaneMachine(m.Machine)
}
// Role returns the machine role from the labels.
func (m *MachineScope) Role() string {
if util.IsControlPlaneMachine(m.Machine) {
return infrav1.ControlPlane
}
return infrav1.Node
}
// GetVMID returns the AzureStackHCIMachine instance id by parsing Spec.ProviderID.
func (m *MachineScope) GetVMID() *string {
parsed, err := noderefutil.NewProviderID(m.GetProviderID())
if err != nil {
return nil
}
return pointer.StringPtr(parsed.ID())
}
// GetProviderID returns the AzureStackHCIMachine providerID from the spec.
func (m *MachineScope) GetProviderID() string {
if m.AzureStackHCIMachine.Spec.ProviderID != nil {
return *m.AzureStackHCIMachine.Spec.ProviderID
}
return ""
}
// SetProviderID sets the AzureStackHCIMachine providerID in spec.
func (m *MachineScope) SetProviderID(v string) {
m.AzureStackHCIMachine.Spec.ProviderID = pointer.StringPtr(v)
}
// GetVMState returns the AzureStackHCIMachine VM state.
func (m *MachineScope) GetVMState() *infrav1.VMState {
return m.AzureStackHCIMachine.Status.VMState
}
// SetVMState sets the AzureStackHCIMachine VM state.
func (m *MachineScope) SetVMState(v *infrav1.VMState) {
m.AzureStackHCIMachine.Status.VMState = new(infrav1.VMState)
*m.AzureStackHCIMachine.Status.VMState = *v
}
// SetReady sets the AzureStackHCIMachine Ready Status
func (m *MachineScope) SetReady() {
m.AzureStackHCIMachine.Status.Ready = true
}
// SetErrorMessage sets the AzureStackHCIMachine status error message.
func (m *MachineScope) SetErrorMessage(v error) {
m.AzureStackHCIMachine.Status.ErrorMessage = pointer.StringPtr(v.Error())
}
// SetErrorReason sets the AzureStackHCIMachine status error reason.
func (m *MachineScope) SetErrorReason(v capierrors.MachineStatusError) {
m.AzureStackHCIMachine.Status.ErrorReason = &v
}
// SetAnnotation sets a key value annotation on the AzureStackHCIMachine.
func (m *MachineScope) SetAnnotation(key, value string) {
if m.AzureStackHCIMachine.Annotations == nil {
m.AzureStackHCIMachine.Annotations = map[string]string{}
}
m.AzureStackHCIMachine.Annotations[key] = value
}
// Close the MachineScope by updating the machine spec, machine status.
func (m *MachineScope) Close() error {
return m.patchHelper.Patch(context.TODO(), m.AzureStackHCIMachine)
}

Просмотреть файл

@ -0,0 +1,12 @@
package scope
import (
"github.com/microsoft/moc/pkg/auth"
)
// ScopeInterface allows multiple scope types to be used for cloud services
type ScopeInterface interface {
GetResourceGroup() string
GetCloudAgentFqdn() string
GetAuthorizer() auth.Authorizer
}

Просмотреть файл

@ -0,0 +1,197 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scope
import (
"context"
"os"
"github.com/go-logr/logr"
infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1alpha2"
"github.com/microsoft/moc/pkg/auth"
"github.com/pkg/errors"
"k8s.io/klog/klogr"
"k8s.io/utils/pointer"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/client"
)
// MachineScopeParams defines the input parameters used to create a new VirtualMachineScope.
type VirtualMachineScopeParams struct {
AzureStackHCIClients
Client client.Client
Logger logr.Logger
AzureStackHCIVirtualMachine *infrav1.AzureStackHCIVirtualMachine
}
// NewMachineScope creates a new VirtualMachineScope from the supplied parameters.
// This is meant to be called for each reconcile iteration.
func NewVirtualMachineScope(params VirtualMachineScopeParams) (*VirtualMachineScope, error) {
if params.Client == nil {
return nil, errors.New("client is required when creating a VirtualMachineScope")
}
if params.AzureStackHCIVirtualMachine == nil {
return nil, errors.New("azurestackhci virtual machine is required when creating a VirtualMachineScope")
}
if params.Logger == nil {
params.Logger = klogr.New()
}
agentFqdn := os.Getenv("CLOUDAGENT_FQDN")
if agentFqdn == "" {
return nil, errors.New("error creating azurestackhci services. Environment variable CLOUDAGENT_FQDN is not set")
}
params.AzureStackHCIClients.CloudAgentFqdn = agentFqdn
authorizer, err := auth.NewAuthorizerFromEnvironment(agentFqdn)
if err != nil {
return nil, errors.Wrap(err, "failed to create azurestackhci session")
}
params.AzureStackHCIClients.Authorizer = authorizer
helper, err := patch.NewHelper(params.AzureStackHCIVirtualMachine, params.Client)
if err != nil {
return nil, errors.Wrap(err, "failed to init patch helper")
}
return &VirtualMachineScope{
client: params.Client,
AzureStackHCIVirtualMachine: params.AzureStackHCIVirtualMachine,
AzureStackHCIClients: params.AzureStackHCIClients,
Logger: params.Logger,
patchHelper: helper,
Context: context.Background(),
}, nil
}
// VirtualMachineScope defines a scope defined around a machine.
type VirtualMachineScope struct {
logr.Logger
client client.Client
patchHelper *patch.Helper
Context context.Context
AzureStackHCIClients
AzureStackHCIVirtualMachine *infrav1.AzureStackHCIVirtualMachine
}
// GetResourceGroup allows VirtualMachineScope to fulfill ScopeInterface and thus to be used by the cloud services.
func (m *VirtualMachineScope) GetResourceGroup() string {
return m.AzureStackHCIVirtualMachine.Spec.ResourceGroup
}
// GetCloudAgentFqdn returns the cloud agent fqdn string.
func (m *VirtualMachineScope) GetCloudAgentFqdn() string {
return m.CloudAgentFqdn
}
// GetAuthorizer is a getter for the environment generated authorizer.
func (m *VirtualMachineScope) GetAuthorizer() auth.Authorizer {
return m.Authorizer
}
// VnetName returns the vnet name given in the vm spec.
func (m *VirtualMachineScope) VnetName() string {
return m.AzureStackHCIVirtualMachine.Spec.VnetName
}
// SubnetName returns the subnet name given in the vm spec.
func (m *VirtualMachineScope) SubnetName() string {
return m.AzureStackHCIVirtualMachine.Spec.SubnetName
}
// ClusterName returns the cluster name in the vm spec.
func (m *VirtualMachineScope) ClusterName() string {
return m.AzureStackHCIVirtualMachine.Spec.ClusterName
}
// Location returns the AzureStackHCIVirtualMachine location.
func (m *VirtualMachineScope) Location() string {
return m.AzureStackHCIVirtualMachine.Spec.Location
}
// AvailabilityZone returns the AzureStackHCIVirtualMachine Availability Zone.
func (m *VirtualMachineScope) AvailabilityZone() string {
return *m.AzureStackHCIVirtualMachine.Spec.AvailabilityZone.ID
}
// Name returns the AzureStackHCIVirtualMachine name.
func (m *VirtualMachineScope) Name() string {
return m.AzureStackHCIVirtualMachine.Name
}
// Namespace returns the namespace name.
func (m *VirtualMachineScope) Namespace() string {
return m.AzureStackHCIVirtualMachine.Namespace
}
// GetVMState returns the AzureStackHCIVirtualMachine VM state.
func (m *VirtualMachineScope) GetVMState() *infrav1.VMState {
return m.AzureStackHCIVirtualMachine.Status.VMState
}
// SetVMState sets the AzureStackHCIVirtualMachine VM state.
func (m *VirtualMachineScope) SetVMState(v infrav1.VMState) {
m.AzureStackHCIVirtualMachine.Status.VMState = new(infrav1.VMState)
*m.AzureStackHCIVirtualMachine.Status.VMState = v
}
// SetReady sets the AzureStackHCIVirtualMachine Ready Status
func (m *VirtualMachineScope) SetReady() {
m.AzureStackHCIVirtualMachine.Status.Ready = true
}
// SetErrorMessage sets the AzureStackHCIVirtualMachine status error message.
func (m *VirtualMachineScope) SetErrorMessage(v error) {
m.AzureStackHCIVirtualMachine.Status.ErrorMessage = pointer.StringPtr(v.Error())
}
// SetErrorReason sets the AzureStackHCIVirtualMachine status error reason.
func (m *VirtualMachineScope) SetErrorReason(v capierrors.MachineStatusError) {
m.AzureStackHCIVirtualMachine.Status.ErrorReason = &v
}
// SetAnnotation sets a key value annotation on the AzureStackHCIVirtualMachine.
func (m *VirtualMachineScope) SetAnnotation(key, value string) {
if m.AzureStackHCIVirtualMachine.Annotations == nil {
m.AzureStackHCIVirtualMachine.Annotations = map[string]string{}
}
m.AzureStackHCIVirtualMachine.Annotations[key] = value
}
// Close the VirtualMachineScope by updating the machine spec, machine status.
func (m *VirtualMachineScope) Close() error {
return m.patchHelper.Patch(context.TODO(), m.AzureStackHCIVirtualMachine)
}
// LoadBalancerVM returns true if the AzureStackHCIVirtualMachine is owned by a LoadBalancer resource and false otherwise (Tenant).
func (m *VirtualMachineScope) LoadBalancerVM() bool {
for _, ref := range m.AzureStackHCIVirtualMachine.ObjectMeta.GetOwnerReferences() {
m.Info("owner references", "type", ref.Kind)
if ref.Kind == "LoadBalancer" && ref.APIVersion == m.AzureStackHCIVirtualMachine.APIVersion {
return true
}
}
return false
}
// BackendPoolName returns the backend pool name for the virtual machine
func (m *VirtualMachineScope) BackendPoolName() string {
return m.AzureStackHCIVirtualMachine.Spec.BackendPoolName
}

Просмотреть файл

@ -0,0 +1,93 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package disks
import (
"context"
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/moc-sdk-for-go/services/storage"
"github.com/pkg/errors"
"k8s.io/klog"
)
// Spec specification for disk
type Spec struct {
Name string
Source string
}
// Get provides information about a disk.
func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error) {
diskSpec, ok := spec.(*Spec)
if !ok {
return storage.VirtualHardDisk{}, errors.New("Invalid Disk Specification")
}
disk, err := s.Client.Get(ctx, s.Scope.GetResourceGroup(), "", diskSpec.Name)
if err != nil && azurestackhci.ResourceNotFound(err) {
return nil, errors.Wrapf(err, "disk %s not found", diskSpec.Name)
} else if err != nil {
return nil, err
}
return (*disk)[0], nil
}
// Reconcile gets/creates/updates a disk.
func (s *Service) Reconcile(ctx context.Context, spec interface{}) error {
diskSpec, ok := spec.(*Spec)
if !ok {
return errors.New("Invalid Disk Specification")
}
if _, err := s.Get(ctx, diskSpec); err == nil {
// disk already exists, cannot update since its immutable
return nil
}
klog.V(2).Infof("creating disk %s ", diskSpec.Name)
_, err := s.Client.CreateOrUpdate(ctx, s.Scope.GetResourceGroup(), "", diskSpec.Name,
&storage.VirtualHardDisk{
Name: &diskSpec.Name,
VirtualHardDiskProperties: &storage.VirtualHardDiskProperties{},
})
if err != nil {
return err
}
klog.V(2).Infof("successfully created disk %s ", diskSpec.Name)
return err
}
// Delete deletes the disk associated with a VM.
func (s *Service) Delete(ctx context.Context, spec interface{}) error {
diskSpec, ok := spec.(*Spec)
if !ok {
return errors.New("Invalid disk specification")
}
klog.V(2).Infof("deleting disk %s", diskSpec.Name)
err := s.Client.Delete(ctx, s.Scope.GetResourceGroup(), "", diskSpec.Name)
if err != nil && azurestackhci.ResourceNotFound(err) {
// already deleted
return nil
}
if err != nil {
return errors.Wrapf(err, "failed to delete disk %s in resource group %s", diskSpec.Name, s.Scope.GetResourceGroup())
}
klog.V(2).Infof("successfully deleted disk %s", diskSpec.Name)
return err
}

Просмотреть файл

@ -0,0 +1,47 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package disks
import (
//"github.com/Azure/go-autorest/autorest"
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/scope"
"github.com/microsoft/moc-sdk-for-go/services/storage/virtualharddisk"
"github.com/microsoft/moc/pkg/auth"
)
var _ azurestackhci.Service = (*Service)(nil)
// Service provides operations on resource groups
type Service struct {
Client virtualharddisk.VirtualHardDiskClient
Scope scope.ScopeInterface
}
// getDisksClient creates a new disks client.
func getDisksClient(cloudAgentFqdn string, authorizer auth.Authorizer) virtualharddisk.VirtualHardDiskClient {
disksClient, _ := virtualharddisk.NewVirtualHardDiskClient(cloudAgentFqdn, authorizer)
return *disksClient
}
// NewService creates a new disks service.
func NewService(scope scope.ScopeInterface) *Service {
return &Service{
Client: getDisksClient(scope.GetCloudAgentFqdn(), scope.GetAuthorizer()),
Scope: scope,
}
}

Просмотреть файл

@ -0,0 +1,93 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package groups
import (
"context"
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/moc-sdk-for-go/services/cloud"
"github.com/pkg/errors"
"k8s.io/klog"
)
// Spec specification for group
type Spec struct {
Name string
Location string
}
// Get provides information about a group.
func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error) {
groupSpec, ok := spec.(*Spec)
if !ok {
return cloud.Group{}, errors.New("Invalid group specification")
}
group, err := s.Client.Get(ctx, groupSpec.Location, groupSpec.Name)
if err != nil && azurestackhci.ResourceNotFound(err) {
return nil, errors.Wrapf(err, "group %s not found in location %s", groupSpec.Name, groupSpec.Location)
} else if err != nil {
return nil, err
}
return (*group)[0], nil
}
// Reconcile gets/creates/updates a group.
func (s *Service) Reconcile(ctx context.Context, spec interface{}) error {
groupSpec, ok := spec.(*Spec)
if !ok {
return errors.New("Invalid group specification")
}
if _, err := s.Get(ctx, groupSpec); err == nil {
// group already exists, cannot update since its immutable
return nil
}
klog.V(2).Infof("creating group %s in location %s", groupSpec.Name, groupSpec.Location)
_, err := s.Client.CreateOrUpdate(ctx, groupSpec.Location, groupSpec.Name,
&cloud.Group{
Name: &groupSpec.Name,
Location: &groupSpec.Location,
})
if err != nil {
return err
}
klog.V(2).Infof("successfully created group %s", groupSpec.Name)
return err
}
// Delete deletes a group.
func (s *Service) Delete(ctx context.Context, spec interface{}) error {
groupSpec, ok := spec.(*Spec)
if !ok {
return errors.New("Invalid group specification")
}
klog.V(2).Infof("deleting group %s in location %s", groupSpec.Name, groupSpec.Location)
err := s.Client.Delete(ctx, groupSpec.Location, groupSpec.Name)
if err != nil && azurestackhci.ResourceNotFound(err) {
// already deleted
return nil
}
if err != nil {
return errors.Wrapf(err, "failed to delete group %s", groupSpec.Name)
}
klog.V(2).Infof("successfully deleted group %s", groupSpec.Name)
return err
}

Просмотреть файл

@ -0,0 +1,46 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package groups
import (
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/scope"
"github.com/microsoft/moc-sdk-for-go/services/cloud/group"
"github.com/microsoft/moc/pkg/auth"
)
var _ azurestackhci.Service = (*Service)(nil)
// Service provides operations on groups
type Service struct {
Client group.GroupClient
Scope *scope.ClusterScope
}
// getGroupClient creates a new group client.
func getGroupClient(cloudAgentFqdn string, authorizer auth.Authorizer) group.GroupClient {
groupClient, _ := group.NewGroupClient(cloudAgentFqdn, authorizer)
return *groupClient
}
// NewService creates a new group service.
func NewService(scope *scope.ClusterScope) *Service {
return &Service{
Client: getGroupClient(scope.CloudAgentFqdn, scope.Authorizer),
Scope: scope,
}
}

Просмотреть файл

@ -0,0 +1,92 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package keyvaults
import (
"context"
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/moc-sdk-for-go/services/security"
"github.com/pkg/errors"
"k8s.io/klog"
)
// Spec specification for keyvault
type Spec struct {
Name string
}
// Get provides information about a keyvault.
func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error) {
vaultSpec, ok := spec.(*Spec)
if !ok {
return security.KeyVault{}, errors.New("Invalid keyvault specification")
}
vault, err := s.Client.Get(ctx, s.Scope.GetResourceGroup(), vaultSpec.Name)
if err != nil && azurestackhci.ResourceNotFound(err) {
return nil, errors.Wrapf(err, "keyvault %s not found", vaultSpec.Name)
} else if err != nil {
return nil, err
}
return (*vault)[0], nil
}
// Reconcile gets/creates/updates a keyvault.
func (s *Service) Reconcile(ctx context.Context, spec interface{}) error {
vaultSpec, ok := spec.(*Spec)
if !ok {
return errors.New("Invalid keyvault specification")
}
if _, err := s.Get(ctx, vaultSpec); err == nil {
// vault already exists, cannot update since its immutable
return nil
}
klog.V(2).Infof("creating keyvault %s ", vaultSpec.Name)
_, err := s.Client.CreateOrUpdate(ctx, s.Scope.GetResourceGroup(), vaultSpec.Name,
&security.KeyVault{
Name: &vaultSpec.Name,
Properties: &security.KeyVaultProperties{},
})
if err != nil {
return err
}
klog.V(2).Infof("successfully created keyvault %s ", vaultSpec.Name)
return err
}
// Delete deletes a keyvault.
func (s *Service) Delete(ctx context.Context, spec interface{}) error {
vaultSpec, ok := spec.(*Spec)
if !ok {
return errors.New("Invalid keyvault specification")
}
klog.V(2).Infof("deleting keyvault %s", vaultSpec.Name)
err := s.Client.Delete(ctx, s.Scope.GetResourceGroup(), vaultSpec.Name)
if err != nil && azurestackhci.ResourceNotFound(err) {
// already deleted
return nil
}
if err != nil {
return errors.Wrapf(err, "failed to delete keyvault %s in resource group %s", vaultSpec.Name, s.Scope.GetResourceGroup())
}
klog.V(2).Infof("successfully deleted keyvault %s", vaultSpec.Name)
return err
}

Просмотреть файл

@ -0,0 +1,46 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package keyvaults
import (
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/scope"
"github.com/microsoft/moc-sdk-for-go/services/security/keyvault"
"github.com/microsoft/moc/pkg/auth"
)
var _ azurestackhci.Service = (*Service)(nil)
// Service provides operations on keyvaults
type Service struct {
Client keyvault.KeyVaultClient
Scope scope.ScopeInterface
}
// getKeyVaultsClient creates a new keyvault client.
func getKeyVaultClient(cloudAgentFqdn string, authorizer auth.Authorizer) keyvault.KeyVaultClient {
vaultClient, _ := keyvault.NewKeyVaultClient(cloudAgentFqdn, authorizer)
return *vaultClient
}
// NewService creates a new keyvault service.
func NewService(scope scope.ScopeInterface) *Service {
return &Service{
Client: getKeyVaultClient(scope.GetCloudAgentFqdn(), scope.GetAuthorizer()),
Scope: scope,
}
}

Просмотреть файл

@ -0,0 +1,113 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package loadbalancers
import (
"context"
"github.com/Azure/go-autorest/autorest/to"
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/moc-sdk-for-go/services/network"
"github.com/pkg/errors"
"k8s.io/klog"
)
// Spec input specification for Get/CreateOrUpdate/Delete calls
type Spec struct {
Name string
BackendPoolName string
VnetName string
}
// Get provides information about a load balancer.
func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error) {
lbSpec, ok := spec.(*Spec)
if !ok {
return network.LoadBalancer{}, errors.New("invalid loadbalancer specification")
}
lb, err := s.Client.Get(ctx, s.Scope.GetResourceGroup(), lbSpec.Name)
if err != nil && azurestackhci.ResourceNotFound(err) {
return nil, errors.Wrapf(err, "loadbalancer %s not found", lbSpec.Name)
} else if err != nil {
return nil, err
}
return (*lb)[0], nil
}
// Reconcile gets/creates/updates a load balancer.
func (s *Service) Reconcile(ctx context.Context, spec interface{}) error {
lbSpec, ok := spec.(*Spec)
if !ok {
return errors.New("invalid loadbalancer specification")
}
if _, err := s.Get(ctx, lbSpec); err == nil {
// loadbalancer already exists, no update supported for now
return nil
}
networkLB := network.LoadBalancer{
Name: to.StringPtr(lbSpec.Name),
LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{
BackendAddressPools: &[]network.BackendAddressPool{
network.BackendAddressPool{
Name: to.StringPtr(lbSpec.BackendPoolName),
},
},
FrontendIPConfigurations: &[]network.FrontendIPConfiguration{
network.FrontendIPConfiguration{
FrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{
Subnet: &network.Subnet{
ID: to.StringPtr(lbSpec.VnetName),
},
},
},
},
},
}
// create the load balancer
klog.V(2).Infof("creating loadbalancer %s ", lbSpec.Name)
_, err := s.Client.CreateOrUpdate(ctx, s.Scope.GetResourceGroup(), lbSpec.Name, &networkLB)
if err != nil {
return err
}
klog.V(2).Infof("successfully created loadbalancer %s ", lbSpec.Name)
return err
}
// Delete deletes the load balancer with the provided name.
func (s *Service) Delete(ctx context.Context, spec interface{}) error {
lbSpec, ok := spec.(*Spec)
if !ok {
return errors.New("invalid loadbalancer specification")
}
klog.V(2).Infof("deleting loadbalancer %s ", lbSpec.Name)
err := s.Client.Delete(ctx, s.Scope.GetResourceGroup(), lbSpec.Name)
if err != nil && azurestackhci.ResourceNotFound(err) {
// already deleted
return nil
}
if err != nil {
return errors.Wrapf(err, "failed to delete loadbalancer %s in resource group %s", lbSpec.Name, s.Scope.GetResourceGroup())
}
klog.V(2).Infof("successfully deleted loadbalancer %s ", lbSpec.Name)
return err
}

Просмотреть файл

@ -0,0 +1,46 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package loadbalancers
import (
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/scope"
"github.com/microsoft/moc-sdk-for-go/services/network/loadbalancer"
"github.com/microsoft/moc/pkg/auth"
)
var _ azurestackhci.Service = (*Service)(nil)
// Service provides operations on load balancers.
type Service struct {
Client loadbalancer.LoadBalancerClient
Scope scope.ScopeInterface
}
// getLoadBalancersClient creates a new load balancers client.
func getLoadBalancersClient(cloudAgentFqdn string, authorizer auth.Authorizer) loadbalancer.LoadBalancerClient {
lbClient, _ := loadbalancer.NewLoadBalancerClient(cloudAgentFqdn, authorizer)
return *lbClient
}
// NewService creates a new load balancers service.
func NewService(scope scope.ScopeInterface) *Service {
return &Service{
Client: getLoadBalancersClient(scope.GetCloudAgentFqdn(), scope.GetAuthorizer()),
Scope: scope,
}
}

Просмотреть файл

@ -0,0 +1,124 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package networkinterfaces
import (
"context"
"github.com/Azure/go-autorest/autorest/to"
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/moc-sdk-for-go/services/network"
"github.com/pkg/errors"
"k8s.io/klog"
)
// Spec specification for network interface
type Spec struct {
Name string
SubnetName string
VnetName string
StaticIPAddress string
MacAddress string
BackendPoolName string
}
// Get provides information about a network interface.
func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error) {
nicSpec, ok := spec.(*Spec)
if !ok {
return network.Interface{}, errors.New("invalid network interface specification")
}
nic, err := s.Client.Get(ctx, s.Scope.GetResourceGroup(), nicSpec.Name)
if err != nil && azurestackhci.ResourceNotFound(err) {
return nil, errors.Wrapf(err, "network interface %s not found", nicSpec.Name)
} else if err != nil {
return nil, err
}
return (*nic)[0], nil
}
// Reconcile gets/creates/updates a network interface.
func (s *Service) Reconcile(ctx context.Context, spec interface{}) error {
nicSpec, ok := spec.(*Spec)
if !ok {
return errors.New("invalid network interface specification")
}
if _, err := s.Get(ctx, nicSpec); err == nil {
// Nic already exists, no update supported for now
return nil
}
nicConfig := &network.InterfaceIPConfigurationPropertiesFormat{}
nicConfig.Subnet = &network.APIEntityReference{
ID: to.StringPtr(nicSpec.VnetName),
}
nicConfig.LoadBalancerBackendAddressPools = &[]network.BackendAddressPool{
{
Name: &nicSpec.BackendPoolName,
},
}
if nicSpec.StaticIPAddress != "" {
nicConfig.PrivateIPAddress = to.StringPtr(nicSpec.StaticIPAddress)
}
_, err := s.Client.CreateOrUpdate(ctx,
s.Scope.GetResourceGroup(),
nicSpec.Name,
&network.Interface{
Name: &nicSpec.Name,
InterfacePropertiesFormat: &network.InterfacePropertiesFormat{
EnableIPForwarding: to.BoolPtr(true),
EnableMACSpoofing: to.BoolPtr(true),
MacAddress: &nicSpec.MacAddress,
IPConfigurations: &[]network.InterfaceIPConfiguration{
{
Name: to.StringPtr("pipConfig"),
InterfaceIPConfigurationPropertiesFormat: nicConfig,
},
},
},
})
if err != nil {
return errors.Wrapf(err, "failed to create network interface %s in resource group %s", nicSpec.Name, s.Scope.GetResourceGroup())
}
klog.V(2).Infof("successfully created network interface %s", nicSpec.Name)
return err
}
// Delete deletes the network interface with the provided name.
func (s *Service) Delete(ctx context.Context, spec interface{}) error {
nicSpec, ok := spec.(*Spec)
if !ok {
return errors.New("invalid network interface Specification")
}
klog.V(2).Infof("deleting nic %s", nicSpec.Name)
err := s.Client.Delete(ctx, s.Scope.GetResourceGroup(), nicSpec.Name)
if err != nil && azurestackhci.ResourceNotFound(err) {
// already deleted
return nil
}
if err != nil {
return errors.Wrapf(err, "failed to delete network interface %s in resource group %s", nicSpec.Name, s.Scope.GetResourceGroup())
}
klog.V(2).Infof("successfully deleted nic %s", nicSpec.Name)
return err
}

Просмотреть файл

@ -0,0 +1,47 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package networkinterfaces
import (
//"github.com/Azure/go-autorest/autorest"
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/scope"
"github.com/microsoft/moc-sdk-for-go/services/network/networkinterface"
"github.com/microsoft/moc/pkg/auth"
)
var _ azurestackhci.Service = (*Service)(nil)
// Service provides operations on network interfaces
type Service struct {
Client networkinterface.InterfaceClient
Scope scope.ScopeInterface
}
// getNetworkInterfacesClient creates a new network interfaces client.
func getNetworkInterfacesClient(cloudAgentFqdn string, authorizer auth.Authorizer) networkinterface.InterfaceClient {
nicClient, _ := networkinterface.NewInterfaceClient(cloudAgentFqdn, authorizer)
return *nicClient
}
// NewService creates a new network interfaces service.
func NewService(scope scope.ScopeInterface) *Service {
return &Service{
Client: getNetworkInterfacesClient(scope.GetCloudAgentFqdn(), scope.GetAuthorizer()),
Scope: scope,
}
}

Просмотреть файл

@ -0,0 +1,102 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package secrets
import (
"context"
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/moc-sdk-for-go/services/security/keyvault"
"github.com/pkg/errors"
"k8s.io/klog"
)
// Spec specification for secret
type Spec struct {
Name string
VaultName string
FileName string
Value string
}
// Get provides information about a secret.
func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error) {
secretSpec, ok := spec.(*Spec)
if !ok {
return keyvault.Secret{}, errors.New("Invalid secret specification")
}
secret, err := s.Client.Get(ctx, s.Scope.GetResourceGroup(), secretSpec.Name, secretSpec.VaultName)
if err != nil && azurestackhci.ResourceNotFound(err) {
return nil, errors.Wrapf(err, "secret %s not found", secretSpec.Name)
} else if err != nil {
return nil, err
}
if secret == nil || len(*secret) == 0 {
return nil, errors.New("Not Found")
}
return (*secret)[0], nil
}
// Reconcile gets/creates/updates a secret.
func (s *Service) Reconcile(ctx context.Context, spec interface{}) error {
secretSpec, ok := spec.(*Spec)
if !ok {
return errors.New("Invalid secret specification")
}
if _, err := s.Get(ctx, secretSpec); err == nil {
// secret already exists, cannot update since its immutable
return nil
}
klog.V(2).Infof("creating secret %s ", secretSpec.Name)
_, err := s.Client.CreateOrUpdate(ctx, s.Scope.GetResourceGroup(), secretSpec.Name,
&keyvault.Secret{
Name: &secretSpec.Name,
Value: &secretSpec.Value,
SecretProperties: &keyvault.SecretProperties{
VaultName: &secretSpec.VaultName,
FileName: &secretSpec.FileName,
},
})
if err != nil {
return err
}
klog.V(2).Infof("successfully created secret %s ", secretSpec.Name)
return err
}
// Delete deletes a secret.
func (s *Service) Delete(ctx context.Context, spec interface{}) error {
secretSpec, ok := spec.(*Spec)
if !ok {
return errors.New("Invalid secret specification")
}
klog.V(2).Infof("deleting secret %s", secretSpec.Name)
err := s.Client.Delete(ctx, s.Scope.GetResourceGroup(), secretSpec.Name, secretSpec.VaultName)
if err != nil && azurestackhci.ResourceNotFound(err) {
// already deleted
return nil
}
if err != nil {
return errors.Wrapf(err, "failed to delete secret %s in resource group %s", secretSpec.Name, s.Scope.GetResourceGroup())
}
klog.V(2).Infof("successfully deleted secret %s", secretSpec.Name)
return err
}

Просмотреть файл

@ -0,0 +1,46 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package secrets
import (
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/scope"
"github.com/microsoft/moc-sdk-for-go/services/security/keyvault/secret"
"github.com/microsoft/moc/pkg/auth"
)
var _ azurestackhci.Service = (*Service)(nil)
// Service provides operations on secrets.
type Service struct {
Client secret.SecretClient
Scope scope.ScopeInterface
}
// getSecretClient creates a new secret client.
func getSecretClient(cloudAgentFqdn string, authorizer auth.Authorizer) secret.SecretClient {
secretClient, _ := secret.NewSecretClient(cloudAgentFqdn, authorizer)
return *secretClient
}
// NewService creates a new secret service.
func NewService(scope scope.ScopeInterface) *Service {
return &Service{
Client: getSecretClient(scope.GetCloudAgentFqdn(), scope.GetAuthorizer()),
Scope: scope,
}
}

Просмотреть файл

@ -0,0 +1,46 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vippools
import (
azhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/scope"
"github.com/microsoft/moc-sdk-for-go/services/network/vippool"
"github.com/microsoft/moc/pkg/auth"
)
var _ azhci.Service = (*Service)(nil)
// Service provides operations on vip pools.
type Service struct {
Client vippool.VipPoolClient
Scope scope.ScopeInterface
}
// getVipPoolsClient creates a new vip pools client.
func getVipPoolsClient(cloudAgentFqdn string, authorizer auth.Authorizer) vippool.VipPoolClient {
vpClient, _ := vippool.NewVipPoolClient(cloudAgentFqdn, authorizer)
return *vpClient
}
// NewService creates a new vip pools service.
func NewService(scope scope.ScopeInterface) *Service {
return &Service{
Client: getVipPoolsClient(scope.GetCloudAgentFqdn(), scope.GetAuthorizer()),
Scope: scope,
}
}

Просмотреть файл

@ -0,0 +1,75 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package vippools
import (
"context"
"fmt"
azhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/moc-sdk-for-go/services/network"
"github.com/pkg/errors"
"k8s.io/klog"
)
// Spec input specification for Get/CreateOrUpdate/Delete calls
type Spec struct {
Name string
Location string
}
// Get provides information about a vip pool.
func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error) {
vpSpec, ok := spec.(*Spec)
if !ok {
return network.VipPool{}, errors.New("invalid vippool specification")
}
vp, err := s.Client.Get(ctx, vpSpec.Location, vpSpec.Name)
if err != nil && azhci.ResourceNotFound(err) {
return nil, errors.Wrapf(err, "vippool %s not found", vpSpec.Name)
} else if err != nil {
return nil, err
}
//If the user wants to get all the vippools, but none exist, cloudagent will return
//a 0 length array.
if vp == nil || len(*vp) == 0 {
return nil, nil
}
return (*vp)[0], nil
}
// Reconcile gets/creates/updates a vip pool.
func (s *Service) Reconcile(ctx context.Context, spec interface{}) error {
vpSpec, ok := spec.(*Spec)
if !ok {
return errors.New("invalid vippool specification")
}
if _, err := s.Get(ctx, vpSpec); err == nil {
// vippool already exists, no update supported for now
return nil
}
klog.V(2).Infof("creating a vippool is not supported")
return fmt.Errorf("creating a vippool is not supported")
}
// Delete deletes the vip pool with the provided name.
func (s *Service) Delete(ctx context.Context, spec interface{}) error {
klog.V(2).Infof("deleting a vippool is not supported")
return fmt.Errorf("deleting a vippool is not supported")
}

Просмотреть файл

@ -0,0 +1,46 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package virtualmachines
import (
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/scope"
"github.com/microsoft/moc-sdk-for-go/services/compute/virtualmachine"
"github.com/microsoft/moc/pkg/auth"
)
var _ azurestackhci.Service = (*Service)(nil)
// Service provides operations on virtual machines.
type Service struct {
Client virtualmachine.VirtualMachineClient
Scope scope.ScopeInterface
}
// getVirtualMachinesClient creates a new virtual machines client.
func getVirtualMachinesClient(cloudAgentFqdn string, authorizer auth.Authorizer) virtualmachine.VirtualMachineClient {
vmClient, _ := virtualmachine.NewVirtualMachineClient(cloudAgentFqdn, authorizer)
return *vmClient
}
// NewService creates a new virtual machines service.
func NewService(scope scope.ScopeInterface) *Service {
return &Service{
Client: getVirtualMachinesClient(scope.GetCloudAgentFqdn(), scope.GetAuthorizer()),
Scope: scope,
}
}

Просмотреть файл

@ -0,0 +1,293 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package virtualmachines
import (
"context"
"crypto/rand"
"crypto/rsa"
"encoding/base64"
"fmt"
"github.com/Azure/go-autorest/autorest/to"
infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1alpha2"
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/converters"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/services/networkinterfaces"
"github.com/microsoft/moc-sdk-for-go/services/compute"
"github.com/microsoft/moc-sdk-for-go/services/network"
"github.com/pkg/errors"
"golang.org/x/crypto/ssh"
"k8s.io/klog"
)
// Spec input specification for Get/CreateOrUpdate/Delete calls
type Spec struct {
Name string
NICName string
SSHKeyData string
Size string
Zone string
Image infrav1.Image
OSDisk infrav1.OSDisk
CustomData string
VMType compute.VMType
}
// Get provides information about a virtual machine.
func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error) {
vmSpec, ok := spec.(*Spec)
if !ok {
return compute.VirtualMachine{}, errors.New("invalid vm specification")
}
vm, err := s.Client.Get(ctx, s.Scope.GetResourceGroup(), vmSpec.Name)
if err != nil {
return nil, err
}
if vm == nil || len(*vm) == 0 {
return nil, errors.Wrapf(err, "vm %s not found", vmSpec.Name)
}
return converters.SDKToVM((*vm)[0])
}
// Reconcile gets/creates/updates a virtual machine.
func (s *Service) Reconcile(ctx context.Context, spec interface{}) error {
vmSpec, ok := spec.(*Spec)
if !ok {
return errors.New("invalid vm specification")
}
storageProfile, err := generateStorageProfile(*vmSpec)
if err != nil {
return err
}
klog.V(2).Infof("getting nic %s", vmSpec.NICName)
nicInterface, err := networkinterfaces.NewService(s.Scope).Get(ctx, &networkinterfaces.Spec{Name: vmSpec.NICName})
if err != nil {
return err
}
nic, ok := nicInterface.(network.Interface)
if !ok {
return errors.New("error getting network interface")
}
klog.V(2).Infof("got nic %s", vmSpec.NICName)
klog.V(2).Infof("creating vm %s ", vmSpec.Name)
sshKeyData := vmSpec.SSHKeyData
if sshKeyData == "" {
privateKey, perr := rsa.GenerateKey(rand.Reader, 2048)
if perr != nil {
return errors.Wrap(perr, "Failed to generate private key")
}
publicRsaKey, perr := ssh.NewPublicKey(&privateKey.PublicKey)
if perr != nil {
return errors.Wrap(perr, "Failed to generate public key")
}
sshKeyData = string(ssh.MarshalAuthorizedKey(publicRsaKey))
}
randomPassword, err := GenerateRandomString(32)
if err != nil {
return errors.Wrapf(err, "failed to generate random string")
}
virtualMachine := compute.VirtualMachine{
Name: to.StringPtr(vmSpec.Name),
VirtualMachineProperties: &compute.VirtualMachineProperties{
StorageProfile: storageProfile,
OsProfile: &compute.OSProfile{
ComputerName: to.StringPtr(vmSpec.Name),
AdminUsername: to.StringPtr(azurestackhci.DefaultUserName),
AdminPassword: to.StringPtr(randomPassword),
CustomData: to.StringPtr(vmSpec.CustomData),
LinuxConfiguration: &compute.LinuxConfiguration{
SSH: &compute.SSHConfiguration{
PublicKeys: &[]compute.SSHPublicKey{
{
Path: to.StringPtr(fmt.Sprintf("/home/%s/.ssh/authorized_keys", azurestackhci.DefaultUserName)),
KeyData: to.StringPtr(sshKeyData),
},
},
},
DisablePasswordAuthentication: to.BoolPtr(false),
},
},
NetworkProfile: &compute.NetworkProfile{
NetworkInterfaces: &[]compute.NetworkInterfaceReference{
{
ID: nic.Name,
},
},
},
VmType: vmSpec.VMType,
HardwareProfile: &compute.HardwareProfile{
VMSize: compute.VirtualMachineSizeTypes(vmSpec.Size),
},
},
}
if vmSpec.OSDisk.OSType == "Windows" {
virtualMachine.OsProfile.LinuxConfiguration = nil
windowsComputerName := "AzureStackHCITestVM"
virtualMachine.OsProfile.ComputerName = &windowsComputerName
pass := "p@ssw0rd!"
virtualMachine.OsProfile.AdminPassword = &pass
username := "TestVMAdmin"
virtualMachine.OsProfile.AdminUsername = &username
}
_, err = s.Client.CreateOrUpdate(
ctx,
s.Scope.GetResourceGroup(),
vmSpec.Name,
&virtualMachine)
if err != nil {
return errors.Wrapf(err, "cannot create vm")
}
klog.V(2).Infof("successfully created vm %s ", vmSpec.Name)
return err
}
// Delete deletes the virtual machine with the provided name.
func (s *Service) Delete(ctx context.Context, spec interface{}) error {
vmSpec, ok := spec.(*Spec)
if !ok {
return errors.New("invalid vm Specification")
}
klog.V(2).Infof("deleting vm %s ", vmSpec.Name)
err := s.Client.Delete(ctx, s.Scope.GetResourceGroup(), vmSpec.Name)
if err != nil && azurestackhci.ResourceNotFound(err) {
// already deleted
return nil
}
if err != nil {
return errors.Wrapf(err, "failed to delete vm %s in resource group %s", vmSpec.Name, s.Scope.GetResourceGroup())
}
klog.V(2).Infof("successfully deleted vm %s ", vmSpec.Name)
return err
}
// generateStorageProfile generates a pointer to a compute.StorageProfile which can utilized for VM creation.
func generateStorageProfile(vmSpec Spec) (*compute.StorageProfile, error) {
osDisk := &compute.OSDisk{
OsType: compute.OperatingSystemTypes(vmSpec.OSDisk.OSType),
Vhd: &compute.VirtualHardDisk{
URI: to.StringPtr(azurestackhci.GenerateOSDiskName(vmSpec.Name)),
},
}
dataDisks := make([]compute.DataDisk, 0)
imageRef, err := generateImageReference(vmSpec.Image)
if err != nil {
return nil, errors.Wrapf(err, "error getting image reference")
}
storageProfile := &compute.StorageProfile{
OsDisk: osDisk,
DataDisks: &dataDisks,
ImageReference: imageRef,
}
return storageProfile, nil
}
// generateImageReference generates a pointer to a compute.ImageReference which can utilized for VM creation.
func generateImageReference(image infrav1.Image) (*compute.ImageReference, error) {
imageRef := &compute.ImageReference{}
if image.Name == nil {
return nil, errors.New("Missing ImageReference")
}
imageRef.Name = to.StringPtr(*image.Name)
if image.ID != nil {
imageRef.ID = to.StringPtr(*image.ID)
// return early since we should only need the image ID
return imageRef, nil
} else if image.SubscriptionID != nil && image.ResourceGroup != nil && image.Gallery != nil && image.Name != nil && image.Version != nil {
imageID, err := generateImageID(image)
if err != nil {
return nil, err
}
imageRef.ID = to.StringPtr(imageID)
// return early since we're referencing an image that may not be published
return imageRef, nil
}
if image.Publisher != nil {
imageRef.Publisher = image.Publisher
}
if image.Offer != nil {
imageRef.Offer = image.Offer
}
if image.SKU != nil {
imageRef.Sku = image.SKU
}
if image.Version != nil {
imageRef.Version = image.Version
return imageRef, nil
}
return nil, errors.Errorf("Image reference cannot be generated, as fields are missing: %+v", *imageRef)
}
// generateImageID generates the resource ID for an image stored in an AzureStackHCI Shared Image Gallery.
func generateImageID(image infrav1.Image) (string, error) {
if image.SubscriptionID == nil {
return "", errors.New("Image subscription ID cannot be nil when specifying an image from an AzureStackHCI Shared Image Gallery")
}
if image.ResourceGroup == nil {
return "", errors.New("Image resource group cannot be nil when specifying an image from an AzureStackHCI Shared Image Gallery")
}
if image.Gallery == nil {
return "", errors.New("Image gallery cannot be nil when specifying an image from an AzureStackHCI Shared Image Gallery")
}
if image.Name == nil {
return "", errors.New("Image name cannot be nil when specifying an image from an AzureStackHCI Shared Image Gallery")
}
if image.Version == nil {
return "", errors.New("Image version cannot be nil when specifying an image from an AzureStackHCI Shared Image Gallery")
}
return fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/galleries/%s/images/%s/versions/%s", *image.SubscriptionID, *image.ResourceGroup, *image.Gallery, *image.Name, *image.Version), nil
}
// GenerateRandomString returns a URL-safe, base64 encoded
// securely generated random string.
// It will return an error if the system's secure random
// number generator fails to function correctly, in which
// case the caller should not continue.
func GenerateRandomString(n int) (string, error) {
b := make([]byte, n)
_, err := rand.Read(b)
// Note that err == nil only if we read len(b) bytes.
if err != nil {
return "", err
}
return base64.URLEncoding.EncodeToString(b), err
}

Просмотреть файл

@ -0,0 +1,47 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package virtualnetworks
import (
//"github.com/Azure/go-autorest/autorest"
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/scope"
"github.com/microsoft/moc-sdk-for-go/services/network/virtualnetwork"
"github.com/microsoft/moc/pkg/auth"
)
var _ azurestackhci.Service = (*Service)(nil)
// Service provides operations on virtual networks.
type Service struct {
Client virtualnetwork.VirtualNetworkClient
Scope scope.ScopeInterface
}
// getVirtualNetworksClient creates a new virtual networks client.
func getVirtualNetworksClient(cloudAgentFqdn string, authorizer auth.Authorizer) virtualnetwork.VirtualNetworkClient {
vnetsClient, _ := virtualnetwork.NewVirtualNetworkClient(cloudAgentFqdn, authorizer)
return *vnetsClient
}
// NewService creates a new virtual networks service.
func NewService(scope scope.ScopeInterface) *Service {
return &Service{
Client: getVirtualNetworksClient(scope.GetCloudAgentFqdn(), scope.GetAuthorizer()),
Scope: scope,
}
}

Просмотреть файл

@ -0,0 +1,109 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package virtualnetworks
import (
"context"
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/moc-sdk-for-go/services/network"
"github.com/pkg/errors"
"k8s.io/klog"
)
// Spec input specification for Get/CreateOrUpdate/Delete calls
type Spec struct {
Name string
CIDR string
}
// Get provides information about a virtual network.
func (s *Service) Get(ctx context.Context, spec interface{}) (interface{}, error) {
vnetSpec, ok := spec.(*Spec)
if !ok {
return network.VirtualNetwork{}, errors.New("Invalid VNET Specification")
}
vnet, err := s.Client.Get(ctx, s.Scope.GetResourceGroup(), vnetSpec.Name)
if err != nil && azurestackhci.ResourceNotFound(err) {
return nil, errors.Wrapf(err, "vnet %s not found", vnetSpec.Name)
} else if err != nil {
return vnet, err
}
return vnet, nil
}
// Reconcile gets/creates/updates a virtual network.
func (s *Service) Reconcile(ctx context.Context, spec interface{}) error {
// Following should be created upstream and provided as an input to NewService
// A vnet has following dependencies
// * Vnet Cidr
// * Control Plane Subnet Cidr
// * Node Subnet Cidr
// * Control Plane NSG
// * Node NSG
// * Node Routetable
vnetSpec, ok := spec.(*Spec)
if !ok {
return errors.New("Invalid VNET Specification")
}
if _, err := s.Get(ctx, vnetSpec); err == nil {
// vnet already exists, cannot update since its immutable
return nil
}
networkType := "Transparent"
//networkType := ""
klog.V(2).Infof("creating vnet %s ", vnetSpec.Name)
_, err := s.Client.CreateOrUpdate(ctx, s.Scope.GetResourceGroup(), vnetSpec.Name,
&network.VirtualNetwork{
Name: &vnetSpec.Name,
Type: &networkType,
VirtualNetworkPropertiesFormat: &network.VirtualNetworkPropertiesFormat{
AddressSpace: &network.AddressSpace{
AddressPrefixes: &[]string{vnetSpec.CIDR},
},
},
})
if err != nil {
return err
}
klog.V(2).Infof("successfully created vnet %s ", vnetSpec.Name)
return err
}
// Delete deletes the virtual network with the provided name.
func (s *Service) Delete(ctx context.Context, spec interface{}) error {
vnetSpec, ok := spec.(*Spec)
if !ok {
return errors.New("Invalid VNET Specification")
}
klog.V(2).Infof("deleting vnet %s ", vnetSpec.Name)
err := s.Client.Delete(ctx, s.Scope.GetResourceGroup(), vnetSpec.Name)
if err != nil && azurestackhci.ResourceNotFound(err) {
// already deleted
return nil
}
if err != nil {
return errors.Wrapf(err, "failed to delete vnet %s in resource group %s", vnetSpec.Name, s.Scope.GetResourceGroup())
}
klog.V(2).Infof("successfully deleted vnet %s ", vnetSpec.Name)
return err
}

192
cmd/manager/main.go Normal file
Просмотреть файл

@ -0,0 +1,192 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"net/http"
_ "net/http/pprof"
"os"
"time"
infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1alpha2"
"github.com/microsoft/cluster-api-provider-azurestackhci/controllers"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/klog"
"k8s.io/klog/klogr"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2"
"sigs.k8s.io/cluster-api/util/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller"
// +kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
_ = clientgoscheme.AddToScheme(scheme)
_ = infrav1.AddToScheme(scheme)
_ = clusterv1.AddToScheme(scheme)
// +kubebuilder:scaffold:scheme
}
func main() {
klog.InitFlags(nil)
var (
metricsAddr string
enableLeaderElection bool
watchNamespace string
profilerAddress string
azureStackHCIClusterConcurrency int
azureStackHCIMachineConcurrency int
loadBalancerConcurrency int
azureStackHCIVirtualMachineConcurrency int
syncPeriod time.Duration
)
flag.StringVar(
&metricsAddr,
"metrics-addr",
":8080",
"The address the metric endpoint binds to.",
)
flag.BoolVar(
&enableLeaderElection,
"enable-leader-election",
false,
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.",
)
flag.StringVar(
&watchNamespace,
"namespace",
"",
"Namespace that the controller watches to reconcile cluster-api objects. If unspecified, the controller watches for cluster-api objects across all namespaces.",
)
flag.StringVar(
&profilerAddress,
"profiler-address",
"",
"Bind address to expose the pprof profiler (e.g. localhost:6060)",
)
flag.IntVar(&azureStackHCIClusterConcurrency,
"azurestackhcicluster-concurrency",
10,
"Number of AzureStackHCIClusters to process simultaneously",
)
flag.IntVar(&azureStackHCIMachineConcurrency,
"azurestackhcimachine-concurrency",
10,
"Number of AzureStackHCIMachines to process simultaneously",
)
flag.IntVar(&loadBalancerConcurrency,
"load-balancer-concurrency",
10,
"Number of LoadBalancers to process simultaneously",
)
flag.IntVar(&azureStackHCIVirtualMachineConcurrency,
"azurestackhci-virtual-machine-concurrency",
10,
"Number of AzureStackHCIVirtualMachines to process simultaneously",
)
flag.DurationVar(&syncPeriod,
"sync-period",
10*time.Minute,
"The minimum interval at which watched resources are reconciled (e.g. 15m)",
)
flag.Parse()
if watchNamespace != "" {
setupLog.Info("Watching cluster-api objects only in namespace for reconciliation", "namespace", watchNamespace)
}
if profilerAddress != "" {
setupLog.Info("Profiler listening for requests", "profiler-address", profilerAddress)
go func() {
setupLog.Error(http.ListenAndServe(profilerAddress, nil), "listen and serve error")
}()
}
ctrl.SetLogger(klogr.New())
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "controller-leader-election-caph",
SyncPeriod: &syncPeriod,
Namespace: watchNamespace,
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
// Initialize event recorder.
record.InitFromRecorder(mgr.GetEventRecorderFor("azurestackhci-controller"))
if err = (&controllers.AzureStackHCIMachineReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("AzureStackHCIMachine"),
}).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: azureStackHCIMachineConcurrency}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "AzureStackHCIMachine")
os.Exit(1)
}
if err = (&controllers.AzureStackHCIClusterReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("AzureStackHCICluster"),
}).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: azureStackHCIClusterConcurrency}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "AzureStackHCICluster")
os.Exit(1)
}
if err = (&controllers.LoadBalancerReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("LoadBalancer"),
}).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: loadBalancerConcurrency}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "LoadBalancer")
os.Exit(1)
}
if err = (&controllers.AzureStackHCIVirtualMachineReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("AzureStackHCIVirtualMachine"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: azureStackHCIVirtualMachineConcurrency}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "AzureStackHCIVirtualMachine")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}

Просмотреть файл

@ -0,0 +1,24 @@
# The following manifests contain a self-signed issuer CR and a certificate CR.
# More document can be found at https://docs.cert-manager.io
apiVersion: certmanager.k8s.io/v1alpha1
kind: Issuer
metadata:
name: selfsigned-issuer
namespace: system
spec:
selfSigned: {}
---
apiVersion: certmanager.k8s.io/v1alpha1
kind: Certificate
metadata:
name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml
namespace: system
spec:
# $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize
commonName: $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc
dnsNames:
- $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local
issuerRef:
kind: Issuer
name: selfsigned-issuer
secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize

Просмотреть файл

@ -0,0 +1,26 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- certificate.yaml
# the following config is for teaching kustomize how to do var substitution
vars:
- name: NAMESPACE # namespace of the service and the certificate CR
objref:
kind: Service
version: v1
name: webhook-service
fieldref:
fieldpath: metadata.namespace
- name: CERTIFICATENAME
objref:
kind: Certificate
group: certmanager.k8s.io
version: v1alpha2
name: serving-cert # this name should match the one in certificate.yaml
- name: SERVICENAME
objref:
kind: Service
version: v1
name: webhook-service
configurations:
- kustomizeconfig.yaml

Просмотреть файл

@ -0,0 +1,16 @@
# This configuration is for teaching kustomize how to update name ref and var substitution
nameReference:
- kind: Issuer
group: certmanager.k8s.io
fieldSpecs:
- kind: Certificate
group: certmanager.k8s.io
path: spec/issuerRef/name
varReference:
- kind: Certificate
group: certmanager.k8s.io
path: spec/commonName
- kind: Certificate
group: certmanager.k8s.io
path: spec/dnsNames

Просмотреть файл

@ -0,0 +1,365 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
creationTimestamp: null
name: azurestackhciclusters.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
names:
categories:
- cluster-api
kind: AzureStackHCICluster
plural: azurestackhciclusters
scope: Namespaced
subresources:
status: {}
validation:
openAPIV3Schema:
description: AzureStackHCICluster is the Schema for the azurestackhciclusters API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: AzureStackHCIClusterSpec defines the desired state of AzureStackHCICluster
properties:
location:
type: string
networkSpec:
description: NetworkSpec encapsulates all things related to AzureStackHCI network.
properties:
subnets:
description: Subnets configuration.
items:
description: SubnetSpec configures an AzureStackHCI subnet.
properties:
cidrBlock:
description: CidrBlock is the CIDR block to be used when the
provider creates a managed Vnet.
type: string
id:
description: ID defines a unique identifier to reference this
resource.
type: string
name:
description: Name defines a name for the subnet resource.
type: string
securityGroup:
description: SecurityGroup defines the NSG (network security
group) that should be attached to this subnet.
properties:
id:
type: string
ingressRule:
description: IngressRules is a slice of AzureStackHCI ingress rules
for security groups.
items:
description: IngressRule defines an AzureStackHCI ingress rule
for security groups.
properties:
description:
type: string
destination:
description: Destination - The destination address
prefix. CIDR or destination IP range. Asterix
'*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureStackHCILoadBalancer'
and 'Internet' can also be used.
type: string
destinationPorts:
description: DestinationPorts - The destination
port or range. Integer or range between 0 and
65535. Asterix '*' can also be used to match all
ports.
type: string
protocol:
description: SecurityGroupProtocol defines the protocol
type for a security group rule.
type: string
source:
description: Source - The CIDR or source IP range.
Asterix '*' can also be used to match all source
IPs. Default tags such as 'VirtualNetwork', 'AzureStackHCILoadBalancer'
and 'Internet' can also be used. If this is an
ingress rule, specifies where network traffic
originates from.
type: string
sourcePorts:
description: SourcePorts - The source port or range.
Integer or range between 0 and 65535. Asterix
'*' can also be used to match all ports.
type: string
required:
- description
- protocol
type: object
type: array
name:
type: string
required:
- id
- ingressRule
- name
type: object
vnetId:
description: VnetID defines the ID of the virtual network
this subnet should be built in.
type: string
required:
- name
- securityGroup
- vnetId
type: object
type: array
vnet:
description: Vnet configuration.
properties:
cidrBlock:
description: CidrBlock is the CIDR block to be used when the
provider creates a managed virtual network.
type: string
id:
description: ID is the identifier of the virtual network this
provider should use to create resources.
type: string
name:
description: Name defines a name for the virtual network resource.
type: string
required:
- name
type: object
type: object
resourceGroup:
type: string
loadBalancerRef:
description: loadBalancerRef allows a load balancer, which must be created
seperately, to be specified as the clusters control plane endpoint. The
AzureStackHCICluster controller waits for the referenced load balancers IP to be available
in its Address status field in order for the cluster to set its APIEndpoint and
be marked as ready.
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of
an entire object, this string should contain a valid JSON/Go
field access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen
only to have some well-defined way of referencing a part of
an object. TODO: this design is not final and this field is
subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference
is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
required:
- location
- resourceGroup
type: object
status:
description: AzureStackHCIClusterStatus defines the observed state of AzureStackHCICluster
properties:
apiEndpoints:
description: APIEndpoints represents the endpoints to communicate with
the control plane.
items:
description: APIEndpoint represents a reachable Kubernetes API endpoint.
properties:
host:
description: The hostname on which the API server is serving.
type: string
port:
description: The port on which the API server is serving.
type: integer
required:
- host
- port
type: object
type: array
bastion:
description: VM describes an AzureStackHCI virtual machine.
properties:
availabilityZone:
type: string
id:
type: string
identity:
description: VMIdentity defines the identity of the virtual machine,
if configured.
type: string
image:
description: Storage profile
properties:
gallery:
type: string
id:
type: string
name:
type: string
offer:
type: string
publisher:
type: string
resourceGroup:
type: string
sku:
type: string
subscriptionID:
type: string
version:
type: string
type: object
name:
type: string
startupScript:
type: string
vmSize:
description: Hardware profile
type: string
vmState:
description: State - The provisioning state, which only appears
in the response.
type: string
type: object
network:
description: Network encapsulates AzureStackHCI networking resources.
properties:
apiServerIp:
description: APIServerIP is the Kubernetes API server public IP
address.
properties:
dnsName:
type: string
id:
type: string
ipAddress:
type: string
name:
type: string
type: object
apiServerLb:
description: APIServerLB is the Kubernetes API server load balancer.
properties:
backendPool:
properties:
id:
type: string
name:
type: string
type: object
frontendIpConfig:
type: object
id:
type: string
name:
type: string
sku:
description: LoadBalancerSKU enumerates the values for load
balancer sku name.
type: string
type: object
securityGroups:
additionalProperties:
description: SecurityGroup defines an AzureStackHCI security group.
properties:
id:
type: string
ingressRule:
description: IngressRules is a slice of AzureStackHCI ingress rules
for security groups.
items:
description: IngressRule defines an AzureStackHCI ingress rule for
security groups.
properties:
description:
type: string
destination:
description: Destination - The destination address prefix.
CIDR or destination IP range. Asterix '*' can also
be used to match all source IPs. Default tags such
as 'VirtualNetwork', 'AzureStackHCILoadBalancer' and 'Internet'
can also be used.
type: string
destinationPorts:
description: DestinationPorts - The destination port
or range. Integer or range between 0 and 65535. Asterix
'*' can also be used to match all ports.
type: string
protocol:
description: SecurityGroupProtocol defines the protocol
type for a security group rule.
type: string
source:
description: Source - The CIDR or source IP range. Asterix
'*' can also be used to match all source IPs. Default
tags such as 'VirtualNetwork', 'AzureStackHCILoadBalancer'
and 'Internet' can also be used. If this is an ingress
rule, specifies where network traffic originates from.
type: string
sourcePorts:
description: SourcePorts - The source port or range.
Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
type: string
required:
- description
- protocol
type: object
type: array
name:
type: string
required:
- id
- ingressRule
- name
type: object
description: SecurityGroups is a map from the role/kind of the security
group to its unique name, if any.
type: object
type: object
ready:
description: Ready is true when the provider resource is ready.
type: boolean
type: object
type: object
version: v1alpha2
versions:
- name: v1alpha2
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

Просмотреть файл

@ -0,0 +1,156 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
creationTimestamp: null
name: azurestackhcimachines.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
names:
categories:
- cluster-api
kind: AzureStackHCIMachine
plural: azurestackhcimachines
scope: Namespaced
subresources:
status: {}
validation:
openAPIV3Schema:
description: AzureStackHCIMachine is the Schema for the azurestackhcimachines API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: AzureStackHCIMachineSpec defines the desired state of AzureStackHCIMachine
properties:
availabilityZone:
properties:
enabled:
type: boolean
id:
type: string
type: object
image:
description: 'Image defines information about the image to use for VM
creation. There are three ways to specify an image: by ID, by publisher,
or by Shared Image Gallery. If specifying an image by ID, only the
ID field needs to be set. If specifying an image by publisher, the
Publisher, Offer, SKU, and Version fields must be set. If specifying
an image from a Shared Image Gallery, the SubscriptionID, ResourceGroup,
Gallery, Name, and Version fields must be set.'
properties:
gallery:
type: string
id:
type: string
name:
type: string
offer:
type: string
publisher:
type: string
resourceGroup:
type: string
sku:
type: string
subscriptionID:
type: string
version:
type: string
type: object
location:
type: string
providerID:
description: ProviderID is the unique identifier as specified by the
cloud provider.
type: string
sshPublicKey:
type: string
vmSize:
type: string
required:
- image
- location
- sshPublicKey
- vmSize
type: object
status:
description: AzureStackHCIMachineStatus defines the observed state of AzureStackHCIMachine
properties:
addresses:
description: Addresses contains the AzureStackHCI instance associated addresses.
items:
description: NodeAddress contains information for the node's address.
properties:
address:
description: The node address.
type: string
type:
description: Node address type, one of Hostname, ExternalIP or
InternalIP.
type: string
required:
- address
- type
type: object
type: array
errorMessage:
description: "ErrorMessage will be set in the event that there is a
terminal problem reconciling the Machine and will contain a more verbose
string suitable for logging and human consumption. \n This field should
not be set for transitive errors that a controller faces that are
expected to be fixed automatically over time (like service outages),
but instead indicate that something is fundamentally wrong with the
Machine's spec or the configuration of the controller, and that manual
intervention is required. Examples of terminal errors would be invalid
combinations of settings in the spec, values that are unsupported
by the controller, or the responsible controller itself being critically
misconfigured. \n Any transient errors that occur during the reconciliation
of Machines can be added as events to the Machine object and/or logged
in the controller's output."
type: string
errorReason:
description: "ErrorReason will be set in the event that there is a terminal
problem reconciling the Machine and will contain a succinct value
suitable for machine interpretation. \n This field should not be set
for transitive errors that a controller faces that are expected to
be fixed automatically over time (like service outages), but instead
indicate that something is fundamentally wrong with the Machine's
spec or the configuration of the controller, and that manual intervention
is required. Examples of terminal errors would be invalid combinations
of settings in the spec, values that are unsupported by the controller,
or the responsible controller itself being critically misconfigured.
\n Any transient errors that occur during the reconciliation of Machines
can be added as events to the Machine object and/or logged in the
controller's output."
type: string
ready:
description: Ready is true when the provider resource is ready.
type: boolean
vmState:
description: VMState is the provisioning state of the AzureStackHCI virtual machine.
type: string
type: object
type: object
version: v1alpha2
versions:
- name: v1alpha2
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

Просмотреть файл

@ -0,0 +1,113 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
creationTimestamp: null
name: azurestackhcimachinetemplates.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
names:
categories:
- cluster-api
kind: AzureStackHCIMachineTemplate
plural: azurestackhcimachinetemplates
scope: Namespaced
validation:
openAPIV3Schema:
description: AzureStackHCIMachineTemplate is the Schema for the azurestackhcimachinetemplates
API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: AzureStackHCIMachineTemplateSpec defines the desired state of AzureStackHCIMachineTemplate
properties:
template:
description: AzureStackHCIMachineTemplateResource describes the data needed to
create am AzureStackHCIMachine from a template
properties:
spec:
description: Spec is the specification of the desired behavior of
the machine.
properties:
availabilityZone:
properties:
enabled:
type: boolean
id:
type: string
type: object
image:
description: 'Image defines information about the image to use
for VM creation. There are three ways to specify an image:
by ID, by publisher, or by Shared Image Gallery. If specifying
an image by ID, only the ID field needs to be set. If specifying
an image by publisher, the Publisher, Offer, SKU, and Version
fields must be set. If specifying an image from a Shared Image
Gallery, the SubscriptionID, ResourceGroup, Gallery, Name,
and Version fields must be set.'
properties:
gallery:
type: string
id:
type: string
name:
type: string
offer:
type: string
publisher:
type: string
resourceGroup:
type: string
sku:
type: string
subscriptionID:
type: string
version:
type: string
type: object
location:
type: string
providerID:
description: ProviderID is the unique identifier as specified
by the cloud provider.
type: string
sshPublicKey:
type: string
vmSize:
type: string
required:
- image
- location
- sshPublicKey
- vmSize
type: object
required:
- spec
type: object
required:
- template
type: object
type: object
version: v1alpha2
versions:
- name: v1alpha2
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

Просмотреть файл

@ -0,0 +1,171 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
creationTimestamp: null
name: azurestackhcivirtualmachines.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
names:
kind: AzureStackHCIVirtualMachine
plural: azurestackhcivirtualmachines
scope: Namespaced
subresources:
status: {}
validation:
openAPIV3Schema:
description: AzureStackHCIVirtualMachine is the Schema for the azurestackhcivirtualmachines API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: AzureStackHCIVirtualMachineSpec defines the desired state of AzureStackHCIVirtualMachine
properties:
availabilityZone:
properties:
enabled:
type: boolean
id:
type: string
type: object
image:
description: 'Image defines information about the image to use for VM
creation. There are three ways to specify an image: by ID, by publisher,
or by Shared Image Gallery. If specifying an image by ID, only the
ID field needs to be set. If specifying an image by publisher, the
Publisher, Offer, SKU, and Version fields must be set. If specifying
an image from a Shared Image Gallery, the SubscriptionID, ResourceGroup,
Gallery, Name, and Version fields must be set.'
properties:
gallery:
type: string
id:
type: string
name:
type: string
offer:
type: string
publisher:
type: string
resourceGroup:
type: string
sku:
type: string
subscriptionID:
type: string
version:
type: string
type: object
location:
type: string
sshPublicKey:
type: string
vmSize:
type: string
identity:
type: string
bootstrapData:
type: string
resourceGroup:
type: string
vnetName:
type: string
clusterName:
type: string
subnetName:
type: string
macAddress:
type: string
backendPoolName:
type: string
required:
- vmSize
- image
- location
- sshPublicKey
- resourceGroup
- vnetName
- clusterName
- subnetName
type: object
status:
description: AzureStackHCIVirtualMachineStatus defines the observed state of AzureStackHCIVirtualMachine
properties:
addresses:
description: Addresses contains the AzureStackHCI instance associated addresses.
items:
description: NodeAddress contains information for the node's address.
properties:
address:
description: The node address.
type: string
type:
description: Node address type, one of Hostname, ExternalIP or
InternalIP.
type: string
required:
- address
- type
type: object
type: array
errorMessage:
description: "ErrorMessage will be set in the event that there is a
terminal problem reconciling the Machine and will contain a more verbose
string suitable for logging and human consumption. \n This field should
not be set for transitive errors that a controller faces that are
expected to be fixed automatically over time (like service outages),
but instead indicate that something is fundamentally wrong with the
Machine's spec or the configuration of the controller, and that manual
intervention is required. Examples of terminal errors would be invalid
combinations of settings in the spec, values that are unsupported
by the controller, or the responsible controller itself being critically
misconfigured. \n Any transient errors that occur during the reconciliation
of Machines can be added as events to the Machine object and/or logged
in the controller's output."
type: string
errorReason:
description: "ErrorReason will be set in the event that there is a terminal
problem reconciling the Machine and will contain a succinct value
suitable for machine interpretation. \n This field should not be set
for transitive errors that a controller faces that are expected to
be fixed automatically over time (like service outages), but instead
indicate that something is fundamentally wrong with the Machine's
spec or the configuration of the controller, and that manual intervention
is required. Examples of terminal errors would be invalid combinations
of settings in the spec, values that are unsupported by the controller,
or the responsible controller itself being critically misconfigured.
\n Any transient errors that occur during the reconciliation of Machines
can be added as events to the Machine object and/or logged in the
controller's output."
type: string
ready:
description: Ready is true when the provider resource is ready.
type: boolean
vmState:
description: VMState is the provisioning state of the AzureStackHCI virtual
machine.
type: string
type: object
type: object
version: v1alpha2
versions:
- name: v1alpha2
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

Просмотреть файл

@ -0,0 +1,109 @@
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
creationTimestamp: null
name: loadbalancers.infrastructure.cluster.x-k8s.io
spec:
group: infrastructure.cluster.x-k8s.io
names:
kind: LoadBalancer
plural: loadbalancers
scope: Namespaced
subresources:
status: {}
validation:
openAPIV3Schema:
description: LoadBalancer is the Schema for the loadbalancers API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: LoadBalancerSpec defines the desired state of LoadBalancer
properties:
availabilityZone:
properties:
enabled:
type: boolean
id:
type: string
type: object
location:
type: string
sshPublicKey:
type: string
backendPoolName:
type: string
imageReference:
type: string
required:
- location
- sshPublicKey
- backendPoolName
- imageReference
type: object
status:
description: LoadBalancerStatus defines the observed state of LoadBalancer
properties:
errorMessage:
description: "ErrorMessage will be set in the event that there is a
terminal problem reconciling the Machine and will contain a more verbose
string suitable for logging and human consumption. \n This field should
not be set for transitive errors that a controller faces that are
expected to be fixed automatically over time (like service outages),
but instead indicate that something is fundamentally wrong with the
Machine's spec or the configuration of the controller, and that manual
intervention is required. Examples of terminal errors would be invalid
combinations of settings in the spec, values that are unsupported
by the controller, or the responsible controller itself being critically
misconfigured. \n Any transient errors that occur during the reconciliation
of Machines can be added as events to the Machine object and/or logged
in the controller's output."
type: string
errorReason:
description: "ErrorReason will be set in the event that there is a terminal
problem reconciling the Machine and will contain a succinct value
suitable for machine interpretation. \n This field should not be set
for transitive errors that a controller faces that are expected to
be fixed automatically over time (like service outages), but instead
indicate that something is fundamentally wrong with the Machine's
spec or the configuration of the controller, and that manual intervention
is required. Examples of terminal errors would be invalid combinations
of settings in the spec, values that are unsupported by the controller,
or the responsible controller itself being critically misconfigured.
\n Any transient errors that occur during the reconciliation of Machines
can be added as events to the Machine object and/or logged in the
controller's output."
type: string
ready:
description: Ready is true when the provider resource is ready.
type: boolean
vmState:
description: VMState is the provisioning state of the AzureStackHCI virtual machine.
type: string
address:
description: Address is the ip address of the load balancer.
type: string
type: object
type: object
version: v1alpha2
versions:
- name: v1alpha2
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

Просмотреть файл

@ -0,0 +1,27 @@
# This kustomization.yaml is not intended to be run by itself,
# since it depends on service name and namespace that are out of this kustomize package.
# It should be run by config/default
resources:
- bases/infrastructure.cluster.x-k8s.io_azurestackhciclusters.yaml
- bases/infrastructure.cluster.x-k8s.io_azurestackhcimachines.yaml
- bases/infrastructure.cluster.x-k8s.io_azurestackhcimachinetemplates.yaml
- bases/infrastructure.cluster.x-k8s.io_azurestackhcivirtualmachines.yaml
- bases/infrastructure.cluster.x-k8s.io_loadbalancer.yaml
# +kubebuilder:scaffold:crdkustomizeresource
patchesStrategicMerge:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
#- patches/webhook_in_azurestackhciclusters.yaml
#- patches/webhook_in_azurestackhcimachines.yaml
# +kubebuilder:scaffold:crdkustomizewebhookpatch
# [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix.
# patches here are for enabling the CA injection for each CRD
#- patches/cainjection_in_azurestackhciclusters.yaml
#- patches/cainjection_in_azurestackhcimachines.yaml
# +kubebuilder:scaffold:crdkustomizecainjectionpatch
# the following config is for teaching kustomize how to do kustomization for CRDs.
configurations:
- kustomizeconfig.yaml

Просмотреть файл

@ -0,0 +1,17 @@
# This file is for teaching kustomize how to substitute name and namespace reference in CRD
nameReference:
- kind: Service
version: v1
fieldSpecs:
- kind: CustomResourceDefinition
group: apiextensions.k8s.io
path: spec/conversion/webhookClientConfig/service/name
namespace:
- kind: CustomResourceDefinition
group: apiextensions.k8s.io
path: spec/conversion/webhookClientConfig/service/namespace
create: false
varReference:
- path: metadata/annotations

Просмотреть файл

@ -0,0 +1,8 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: azurestackhciclusters.infrastructure.cluster.x-k8s.io

Просмотреть файл

@ -0,0 +1,8 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: azurestackhcimachines.infrastructure.cluster.x-k8s.io

Просмотреть файл

@ -0,0 +1,8 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: azurestackhcimachinetemplates.infrastructure.cluster.x-k8s.io

Просмотреть файл

@ -0,0 +1,8 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: azurestackhcivirtualmachines.infrastructure.cluster.x-k8s.io

Просмотреть файл

@ -0,0 +1,17 @@
# The following patch enables conversion webhook for CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: azurestackhciclusters.infrastructure.cluster.x-k8s.io
spec:
conversion:
strategy: Webhook
webhookClientConfig:
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
caBundle: Cg==
service:
namespace: system
name: webhook-service
path: /convert

Просмотреть файл

@ -0,0 +1,17 @@
# The following patch enables conversion webhook for CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: azurestackhcimachines.infrastructure.cluster.x-k8s.io
spec:
conversion:
strategy: Webhook
webhookClientConfig:
# this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank,
# but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager)
caBundle: Cg==
service:
namespace: system
name: webhook-service
path: /convert

Просмотреть файл

@ -0,0 +1,8 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
# CRD conversion requires k8s 1.13 or later.
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: azurestackhcimachinetemplates.infrastructure.cluster.x-k8s.io

Просмотреть файл

@ -0,0 +1,75 @@
# Adds namespace to all resources.
namespace: caph-system
# Value of this field is prepended to the
# names of all resources, e.g. a deployment named
# "wordpress" becomes "alices-wordpress".
# Note that it should also match with the prefix (text before '-') of the namespace
# field above.
namePrefix: caph-
# Labels to add to all resources and selectors.
#commonLabels:
# someName: someValue
bases:
- ../crd
- ../rbac
- ../manager
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml
#- ../webhook
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
#- ../certmanager
patchesStrategicMerge:
- manager_image_patch.yaml
- manager_pull_policy.yaml
- manager_credentials_patch.yaml
# Protect the /metrics endpoint by putting it behind auth.
# Only one of manager_auth_proxy_patch.yaml and
# manager_prometheus_metrics_patch.yaml should be enabled.
#- manager_auth_proxy_patch.yaml
# If you want your controller-manager to expose the /metrics
# endpoint w/o any authn/z, uncomment the following line and
# comment manager_auth_proxy_patch.yaml.
# Only one of manager_auth_proxy_patch.yaml and
# manager_prometheus_metrics_patch.yaml should be enabled.
#- manager_prometheus_metrics_patch.yaml
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml
#- manager_webhook_patch.yaml
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
# 'CERTMANAGER' needs to be enabled to use ca injection
#- webhookcainjection_patch.yaml
# the following config is for teaching kustomize how to do var substitution
vars:
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
# objref:
# kind: Certificate
# group: certmanager.k8s.io
# version: v1alpha1
# name: serving-cert # this name should match the one in certificate.yaml
# fieldref:
# fieldpath: metadata.namespace
#- name: CERTIFICATE_NAME
# objref:
# kind: Certificate
# group: certmanager.k8s.io
# version: v1alpha1
# name: serving-cert # this name should match the one in certificate.yaml
#- name: SERVICE_NAMESPACE # namespace of the service
# objref:
# kind: Service
# version: v1
# name: webhook-service
# fieldref:
# fieldpath: metadata.namespace
#- name: SERVICE_NAME
# objref:
# kind: Service
# version: v1
# name: webhook-service

Просмотреть файл

@ -0,0 +1,25 @@
# This patch inject a sidecar container which is a HTTP proxy for the controller manager,
# it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: kube-rbac-proxy
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0
args:
- "--secure-listen-address=0.0.0.0:8443"
- "--upstream=http://127.0.0.1:8080/"
- "--logtostderr=true"
- "--v=10"
ports:
- containerPort: 8443
name: https
- name: manager
args:
- "--metrics-addr=127.0.0.1:8080"
- "--enable-leader-election"

Просмотреть файл

@ -0,0 +1,21 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: manager
env:
- name: CLOUDAGENT_FQDN
valueFrom:
secretKeyRef:
name: manager-bootstrap-credentials
key: cloudagent-fqdn
- name: WSSD_DEBUG_MODE
valueFrom:
secretKeyRef:
name: manager-bootstrap-credentials
key: wssd-debug-mode

Просмотреть файл

@ -0,0 +1,13 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- image: nwoodmsft/controller:0.14
name: manager
args:
- "--v=6"

Просмотреть файл

@ -0,0 +1,19 @@
# This patch enables Prometheus scraping for the manager pod.
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
metadata:
annotations:
prometheus.io/scrape: 'true'
spec:
containers:
# Expose the prometheus metrics on default port
- name: manager
ports:
- containerPort: 8080
name: metrics
protocol: TCP

Просмотреть файл

@ -0,0 +1,11 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: manager
imagePullPolicy: Always

Просмотреть файл

@ -0,0 +1,23 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: manager
ports:
- containerPort: 443
name: webhook-server
protocol: TCP
volumeMounts:
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
volumes:
- name: cert
secret:
defaultMode: 420
secretName: webhook-server-cert

Просмотреть файл

@ -0,0 +1,15 @@
# This patch add annotation to admission webhook config and
# the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize.
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
name: mutating-webhook-configuration
annotations:
certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
name: validating-webhook-configuration
annotations:
certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)

Просмотреть файл

@ -0,0 +1,9 @@
apiVersion: v1
kind: Secret
metadata:
name: manager-bootstrap-credentials
namespace: system
type: Opaque
data:
cloudagent-fqdn: ${CLOUDAGENT_FQDN_B64}
wssd-debug-mode: ${WSSD_DEBUG_MODE_B64}

Просмотреть файл

@ -0,0 +1,5 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- manager.yaml
- credentials.yaml

Просмотреть файл

@ -0,0 +1,31 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: caph-controller-manager
name: system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
labels:
control-plane: caph-controller-manager
spec:
selector:
matchLabels:
control-plane: caph-controller-manager
replicas: 1
template:
metadata:
labels:
control-plane: caph-controller-manager
spec:
containers:
- args:
- --enable-leader-election
image: controller:latest
imagePullPolicy: Always
name: manager
terminationGracePeriodSeconds: 10

Просмотреть файл

@ -0,0 +1,13 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: proxy-role
rules:
- apiGroups: ["authentication.k8s.io"]
resources:
- tokenreviews
verbs: ["create"]
- apiGroups: ["authorization.k8s.io"]
resources:
- subjectaccessreviews
verbs: ["create"]

Просмотреть файл

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: proxy-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: proxy-role
subjects:
- kind: ServiceAccount
name: default
namespace: system

Просмотреть файл

@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
annotations:
prometheus.io/port: "8443"
prometheus.io/scheme: https
prometheus.io/scrape: "true"
labels:
control-plane: caph-controller-manager
name: controller-manager-metrics-service
namespace: system
spec:
ports:
- name: https
port: 8443
targetPort: https
selector:
control-plane: caph-controller-manager

Просмотреть файл

@ -0,0 +1,10 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- role.yaml
- role_binding.yaml
- leader_election_role.yaml
- leader_election_role_binding.yaml
- auth_proxy_service.yaml
- auth_proxy_role.yaml
- auth_proxy_role_binding.yaml

Просмотреть файл

@ -0,0 +1,32 @@
# permissions to do leader election.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: leader-election-role
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- configmaps/status
verbs:
- get
- update
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create

Просмотреть файл

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: leader-election-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: leader-election-role
subjects:
- kind: ServiceAccount
name: default
namespace: system

128
config/rbac/role.yaml Normal file
Просмотреть файл

@ -0,0 +1,128 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: manager-role
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- cluster.x-k8s.io
resources:
- clusters
- clusters/status
verbs:
- get
- list
- watch
- apiGroups:
- cluster.x-k8s.io
resources:
- machines
- machines/status
verbs:
- get
- list
- watch
- apiGroups:
- infrastructure.cluster.x-k8s.io
resources:
- azurestackhciclusters
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- infrastructure.cluster.x-k8s.io
resources:
- azurestackhciclusters/status
verbs:
- get
- patch
- update
- apiGroups:
- infrastructure.cluster.x-k8s.io
resources:
- azurestackhcimachines
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- infrastructure.cluster.x-k8s.io
resources:
- azurestackhcimachines/status
verbs:
- get
- patch
- update
- apiGroups:
- infrastructure.cluster.x-k8s.io
resources:
- azurestackhcivirtualmachines
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- infrastructure.cluster.x-k8s.io
resources:
- azurestackhcivirtualmachines/status
verbs:
- get
- patch
- update
- apiGroups:
- infrastructure.cluster.x-k8s.io
resources:
- loadbalancers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- infrastructure.cluster.x-k8s.io
resources:
- loadbalancers/status
verbs:
- get
- patch
- update
- apiGroups:
- ""
resources:
- secrets
verbs:
- create
- get
- list
- patch
- watch
- vm.infrastructure.cluster.x-k8s.io

Просмотреть файл

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: manager-role
subjects:
- kind: ServiceAccount
name: default
namespace: system

Просмотреть файл

Просмотреть файл

@ -0,0 +1,220 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"time"
"github.com/go-logr/logr"
infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1alpha2"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/scope"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/utils/pointer"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/patch"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
// AzureStackHCIClusterReconciler reconciles a AzureStackHCICluster object
type AzureStackHCIClusterReconciler struct {
client.Client
Log logr.Logger
}
func (r *AzureStackHCIClusterReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error {
return ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1.AzureStackHCICluster{}).
Complete(r)
}
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azurestackhciclusters,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azurestackhciclusters/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch
func (r *AzureStackHCIClusterReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
ctx := context.TODO()
log := r.Log.WithValues("namespace", req.Namespace, "azureStackHCICluster", req.Name)
// Fetch the AzureStackHCICluster instance
azureStackHCICluster := &infrav1.AzureStackHCICluster{}
err := r.Get(ctx, req.NamespacedName, azureStackHCICluster)
if err != nil {
if apierrors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// Fetch the Cluster.
cluster, err := util.GetOwnerCluster(ctx, r.Client, azureStackHCICluster.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if cluster == nil {
log.Info("Cluster Controller has not yet set OwnerRef")
return reconcile.Result{Requeue: true, RequeueAfter: time.Minute}, nil
}
log = log.WithValues("cluster", cluster.Name)
// Create the scope.
clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Client: r.Client,
Logger: log,
Cluster: cluster,
AzureStackHCICluster: azureStackHCICluster,
})
if err != nil {
return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err)
}
// Always close the scope when exiting this function so we can persist any AzureStackHCIMachine changes.
defer func() {
if err := clusterScope.Close(); err != nil && reterr == nil {
reterr = err
}
}()
// Handle deleted clusters
if !azureStackHCICluster.DeletionTimestamp.IsZero() {
return r.reconcileDelete(clusterScope)
}
// Handle non-deleted clusters
return r.reconcileNormal(clusterScope)
}
func (r *AzureStackHCIClusterReconciler) reconcileNormal(clusterScope *scope.ClusterScope) (reconcile.Result, error) {
clusterScope.Info("Reconciling AzureStackHCICluster")
azureStackHCICluster := clusterScope.AzureStackHCICluster
// If the AzureStackHCICluster doesn't have our finalizer, add it.
if !util.Contains(azureStackHCICluster.Finalizers, infrav1.ClusterFinalizer) {
azureStackHCICluster.Finalizers = append(azureStackHCICluster.Finalizers, infrav1.ClusterFinalizer)
}
err := newAzureStackHCIClusterReconciler(clusterScope).Reconcile()
if err != nil {
return reconcile.Result{}, errors.Wrap(err, "failed to reconcile cluster services")
}
if ok, err := r.reconcileLoadBalancer(clusterScope); !ok {
if err != nil {
return reconcile.Result{}, err
}
clusterScope.Info("Load balancer was not reconciled")
return reconcile.Result{Requeue: true, RequeueAfter: time.Minute}, nil
}
// No errors, so mark us ready so the Cluster API Cluster Controller can pull it
azureStackHCICluster.Status.Ready = true
// We mark the Cluster as Ready so CAPI can progress on ... but we still need to wait for
// the kubeconfig to be written to secrets.
err = newAzureStackHCIClusterReconciler(clusterScope).ReconcileKubeConfig()
if err != nil {
return reconcile.Result{}, errors.Wrap(err, "failed to reconcile cluster services")
}
return reconcile.Result{}, nil
}
func (r *AzureStackHCIClusterReconciler) reconcileDelete(clusterScope *scope.ClusterScope) (reconcile.Result, error) {
clusterScope.Info("Reconciling AzureStackHCICluster delete")
azureStackHCICluster := clusterScope.AzureStackHCICluster
if err := newAzureStackHCIClusterReconciler(clusterScope).Delete(); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureStackHCICluster %s/%s", azureStackHCICluster.Namespace, azureStackHCICluster.Name)
}
// Cluster is deleted so remove the finalizer.
clusterScope.AzureStackHCICluster.Finalizers = util.Filter(clusterScope.AzureStackHCICluster.Finalizers, infrav1.ClusterFinalizer)
return reconcile.Result{}, nil
}
func (r *AzureStackHCIClusterReconciler) reconcileLoadBalancer(clusterScope *scope.ClusterScope) (bool, error) {
if clusterScope.LoadBalancerRef() == nil {
clusterScope.Info("Skipping load balancer reconciliation since AzureStackHCICluster.Spec.LoadBalancerRef is nil")
return true, nil
}
// if there are some existing control plane endpoints, skip reconciler reconcile
if len(clusterScope.AzureStackHCICluster.Status.APIEndpoints) > 0 {
clusterScope.Info("Skipping load balancer reconciliation since at least one control plane endpoint already exists")
return true, nil
}
// find the load balancer
loadBalancer := &infrav1.LoadBalancer{}
loadBalancerNamespacedName := types.NamespacedName{
Namespace: clusterScope.AzureStackHCICluster.Spec.LoadBalancerRef.Namespace,
Name: clusterScope.AzureStackHCICluster.Spec.LoadBalancerRef.Name,
}
if err := r.Get(clusterScope.Context, loadBalancerNamespacedName, loadBalancer); err != nil {
if apierrors.IsNotFound(err) {
clusterScope.Info("resource specified by LoadBalancerRef not found", "Namespaced name", loadBalancerNamespacedName)
return false, nil
}
return false, err
}
// ensure there is an owner reference
clusterOwnerRef := metav1.OwnerReference{
APIVersion: clusterScope.APIVersion(),
Kind: clusterScope.Kind(),
Name: clusterScope.Name(),
UID: clusterScope.UID(),
BlockOwnerDeletion: pointer.BoolPtr(true),
}
loadBalancerOwnerRefs := loadBalancer.GetOwnerReferences()
if !util.HasOwnerRef(loadBalancerOwnerRefs, clusterOwnerRef) {
loadBalancerPatchHelper, err := patch.NewHelper(loadBalancer, r.Client)
if err != nil {
return false, err
}
loadBalancer.SetOwnerReferences(util.EnsureOwnerRef(loadBalancerOwnerRefs, clusterOwnerRef))
if err := loadBalancerPatchHelper.Patch(clusterScope.Context, loadBalancer); err != nil {
return false, err
}
clusterScope.Info("LoadBalancer owner reference set")
}
// wait for the load balancer ip to be available and update the control plane endpoints list
if loadBalancer.Status.Address == "" {
return false, nil
}
clusterScope.AzureStackHCICluster.Status.APIEndpoints = []infrav1.APIEndpoint{
{
Host: loadBalancer.Status.Address,
Port: 6443,
},
}
return true, nil
}

Просмотреть файл

@ -0,0 +1,136 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"fmt"
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/scope"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/services/keyvaults"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/services/secrets"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/services/virtualnetworks"
"github.com/pkg/errors"
"k8s.io/klog"
)
const (
KubeConfigSecretName = "kubeconf"
KubeConfigDataFieldName = "value"
)
// azureStackHCIClusterReconciler are list of services required by cluster controller
type azureStackHCIClusterReconciler struct {
scope *scope.ClusterScope
vnetSvc azurestackhci.Service
keyvaultSvc azurestackhci.Service
secretSvc azurestackhci.GetterService
}
// newAzureStackHCIClusterReconciler populates all the services based on input scope
func newAzureStackHCIClusterReconciler(scope *scope.ClusterScope) *azureStackHCIClusterReconciler {
return &azureStackHCIClusterReconciler{
scope: scope,
vnetSvc: virtualnetworks.NewService(scope),
keyvaultSvc: keyvaults.NewService(scope),
secretSvc: secrets.NewService(scope),
}
}
// Reconcile reconciles all the services in pre determined order
func (r *azureStackHCIClusterReconciler) Reconcile() error {
klog.V(2).Infof("reconciling cluster %s", r.scope.Name())
r.createOrUpdateVnetName()
vnetSpec := &virtualnetworks.Spec{
Name: r.scope.Vnet().Name,
CIDR: azurestackhci.DefaultVnetCIDR,
}
if err := r.vnetSvc.Reconcile(r.scope.Context, vnetSpec); err != nil {
return errors.Wrapf(err, "failed to reconcile virtual network for cluster %s", r.scope.Name())
}
vaultSpec := &keyvaults.Spec{
Name: r.scope.Name(),
}
if err := r.keyvaultSvc.Reconcile(r.scope.Context, vaultSpec); err != nil {
return errors.Wrapf(err, "failed to reconcile keyvault for cluster %s", r.scope.Name())
}
return nil
}
// Delete reconciles all the services in pre determined order
func (r *azureStackHCIClusterReconciler) Delete() error {
vaultSpec := &keyvaults.Spec{
Name: r.scope.Name(),
}
if err := r.keyvaultSvc.Delete(r.scope.Context, vaultSpec); err != nil {
if !azurestackhci.ResourceNotFound(err) {
return errors.Wrapf(err, "failed to delete keyvault %s for cluster %s", r.scope.Name(), r.scope.Name())
}
}
vnetSpec := &virtualnetworks.Spec{
Name: r.scope.Vnet().Name,
CIDR: azurestackhci.DefaultVnetCIDR,
}
if err := r.vnetSvc.Delete(r.scope.Context, vnetSpec); err != nil {
if !azurestackhci.ResourceNotFound(err) {
return errors.Wrapf(err, "failed to delete virtual network %s for cluster %s", r.scope.Vnet().Name, r.scope.Name())
}
}
return nil
}
// ReconcileKubeConfig reconciles the kubeconfig from the cluster secrets
func (r *azureStackHCIClusterReconciler) ReconcileKubeConfig() error {
r.scope.Logger.Info("reconciling kubeconfig %s", r.scope.Name())
cluster := r.scope.Cluster
name := fmt.Sprintf("%s-kubeconfig", cluster.Name)
secret, err := r.scope.GetSecret(name)
if err != nil {
return errors.Wrapf(err, "kubernetes secret query failed %s", r.scope.Name())
}
r.scope.Logger.Info("recieved kubeconfig from the cluster")
data, ok := secret.Data[KubeConfigDataFieldName]
if !ok {
return nil
}
secretSpec := &secrets.Spec{
Name: KubeConfigSecretName,
VaultName: r.scope.Name(),
Value: string(data),
}
if err := r.secretSvc.Reconcile(r.scope.Context, secretSpec); err != nil {
return errors.Wrapf(err, "failed to reconcile secret for cluster %s", r.scope.Name())
}
return nil
}
// createOrUpdateVnetName creates or updates the virtual network (vnet) name
func (r *azureStackHCIClusterReconciler) createOrUpdateVnetName() {
if r.scope.Vnet().Name == "" {
r.scope.Vnet().Name = azurestackhci.GenerateVnetName(r.scope.Name())
}
}

Просмотреть файл

@ -0,0 +1,74 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"encoding/json"
infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1alpha2"
)
// updateMachineAnnotationJSON updates the `annotation` on `machine` with
// `content`. `content` in this case should be a `map[string]interface{}`
// suitable for turning into JSON. This `content` map will be marshalled into a
// JSON string before being set as the given `annotation`.
func (r *AzureStackHCIMachineReconciler) updateMachineAnnotationJSON(machine *infrav1.AzureStackHCIMachine, annotation string, content map[string]interface{}) error {
b, err := json.Marshal(content)
if err != nil {
return err
}
r.updateMachineAnnotation(machine, annotation, string(b))
return nil
}
// updateMachineAnnotation updates the `annotation` on the given `machine` with
// `content`.
func (r *AzureStackHCIMachineReconciler) updateMachineAnnotation(machine *infrav1.AzureStackHCIMachine, annotation string, content string) {
// Get the annotations
annotations := machine.GetAnnotations()
// Set our annotation to the given content.
annotations[annotation] = content
// Update the machine object with these annotations
machine.SetAnnotations(annotations)
}
// Returns a map[string]interface from a JSON annotation.
// This method gets the given `annotation` from the `machine` and unmarshalls it
// from a JSON string into a `map[string]interface{}`.
func (r *AzureStackHCIMachineReconciler) machineAnnotationJSON(machine *infrav1.AzureStackHCIMachine, annotation string) (map[string]interface{}, error) {
out := map[string]interface{}{}
jsonAnnotation := r.machineAnnotation(machine, annotation)
if len(jsonAnnotation) == 0 {
return out, nil
}
err := json.Unmarshal([]byte(jsonAnnotation), &out)
if err != nil {
return out, err
}
return out, nil
}
// Fetches the specific machine annotation.
func (r *AzureStackHCIMachineReconciler) machineAnnotation(machine *infrav1.AzureStackHCIMachine, annotation string) string {
return machine.GetAnnotations()[annotation]
}

Просмотреть файл

@ -0,0 +1,601 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"encoding/base64"
"encoding/json"
"time"
"golang.org/x/text/encoding/unicode"
"fmt"
"strings"
"github.com/Azure/go-autorest/autorest/to"
"github.com/go-logr/logr"
infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1alpha2"
winapi "github.com/microsoft/cluster-api-provider-azurestackhci/api/windows"
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/scope"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/services/secrets"
"github.com/microsoft/moc-sdk-for-go/services/security/keyvault"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
apitypes "k8s.io/apimachinery/pkg/types"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/tools/record"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
// AzureStackHCIMachineReconciler reconciles a AzureStackHCIMachine object
type AzureStackHCIMachineReconciler struct {
client.Client
Log logr.Logger
Recorder record.EventRecorder
}
func (r *AzureStackHCIMachineReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error {
return ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1.AzureStackHCIMachine{}).
Watches(
&source.Kind{Type: &clusterv1.Machine{}},
&handler.EnqueueRequestsFromMapFunc{
ToRequests: util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("AzureStackHCIMachine")),
},
).
Watches(
&source.Kind{Type: &infrav1.AzureStackHCICluster{}},
&handler.EnqueueRequestsFromMapFunc{ToRequests: handler.ToRequestsFunc(r.AzureStackHCIClusterToAzureStackHCIMachines)},
).
Watches(
&source.Kind{Type: &infrav1.AzureStackHCIVirtualMachine{}},
&handler.EnqueueRequestForOwner{OwnerType: &infrav1.AzureStackHCIMachine{}, IsController: false},
).
Complete(r)
}
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azurestackhcimachines,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azurestackhcimachines/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines;machines/status,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch
func (r *AzureStackHCIMachineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
ctx := context.TODO()
logger := r.Log.WithValues("namespace", req.Namespace, "azureStackHCIMachine", req.Name)
// Fetch the AzureStackHCIMachine VM.
azureStackHCIMachine := &infrav1.AzureStackHCIMachine{}
err := r.Get(ctx, req.NamespacedName, azureStackHCIMachine)
if err != nil {
if apierrors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// Fetch the Machine.
machine, err := util.GetOwnerMachine(ctx, r.Client, azureStackHCIMachine.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if machine == nil {
logger.Info("Machine Controller has not yet set OwnerRef")
return reconcile.Result{}, nil
}
logger = logger.WithValues("machine", machine.Name)
// Fetch the Cluster.
cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta)
if err != nil {
logger.Info("Machine is missing cluster label or cluster does not exist")
return reconcile.Result{}, nil
}
logger = logger.WithValues("cluster", cluster.Name)
azureStackHCICluster := &infrav1.AzureStackHCICluster{}
azureStackHCIClusterName := client.ObjectKey{
Namespace: azureStackHCIMachine.Namespace,
Name: cluster.Spec.InfrastructureRef.Name,
}
if err := r.Client.Get(ctx, azureStackHCIClusterName, azureStackHCICluster); err != nil {
logger.Info("AzureStackHCICluster is not available yet")
return reconcile.Result{}, nil
}
logger = logger.WithValues("azureStackHCICluster", azureStackHCICluster.Name)
// Create the cluster scope
clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Client: r.Client,
Logger: logger,
Cluster: cluster,
AzureStackHCICluster: azureStackHCICluster,
})
if err != nil {
return reconcile.Result{}, err
}
// Create the machine scope
machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{
Logger: logger,
Client: r.Client,
Cluster: cluster,
Machine: machine,
AzureStackHCICluster: azureStackHCICluster,
AzureStackHCIMachine: azureStackHCIMachine,
})
if err != nil {
return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err)
}
// Always close the scope when exiting this function so we can persist any AzureStackHCIMachine changes.
defer func() {
if err := machineScope.Close(); err != nil && reterr == nil {
reterr = err
}
}()
// Handle deleted machines
if !azureStackHCIMachine.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(machineScope, clusterScope)
}
// Handle non-deleted machines
return r.reconcileNormal(machineScope, clusterScope)
}
func (r *AzureStackHCIMachineReconciler) reconcileNormal(machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (reconcile.Result, error) {
machineScope.Info("Reconciling AzureStackHCIMachine")
// If the AzureStackHCIMachine is in an error state, return early.
if machineScope.AzureStackHCIMachine.Status.ErrorReason != nil || machineScope.AzureStackHCIMachine.Status.ErrorMessage != nil {
machineScope.Info("Error state detected, skipping reconciliation")
return reconcile.Result{}, nil
}
// If the AzureStackHCIMachine doesn't have our finalizer, add it.
// with controller-runtime 0.4.0 you can do this with AddFinalizer
if !util.Contains(machineScope.AzureStackHCIMachine.Finalizers, infrav1.MachineFinalizer) {
machineScope.AzureStackHCIMachine.Finalizers = append(machineScope.AzureStackHCIMachine.Finalizers, infrav1.MachineFinalizer)
}
if !machineScope.Cluster.Status.InfrastructureReady {
machineScope.Info("Cluster infrastructure is not ready yet")
return reconcile.Result{}, nil
}
// Make sure bootstrap data is available and populated.
if machineScope.Machine.Spec.Bootstrap.Data == nil {
machineScope.Info("Bootstrap data is not yet available")
return reconcile.Result{}, nil
}
if machineScope.AzureStackHCIMachine.Spec.OSDisk.OSType == "Windows" {
//populate bootstrap data
windowsBootstrap, err := r.getWindowsBootstrapData(clusterScope)
if err != nil {
return reconcile.Result{}, err
}
machineScope.Machine.Spec.Bootstrap.Data = &windowsBootstrap
}
vm, err := r.reconcileVirtualMachineNormal(machineScope, clusterScope)
if err != nil {
if apierrors.IsAlreadyExists(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// TODO(ncdc): move this validation logic into a validating webhook
if errs := r.validateUpdate(&machineScope.AzureStackHCIMachine.Spec, vm); len(errs) > 0 {
agg := kerrors.NewAggregate(errs)
r.Recorder.Eventf(machineScope.AzureStackHCIMachine, corev1.EventTypeWarning, "InvalidUpdate", "Invalid update: %s", agg.Error())
return reconcile.Result{}, nil
}
// Make sure Spec.ProviderID is always set.
machineScope.SetProviderID(fmt.Sprintf("azurestackhci:////%s", vm.Name))
// TODO(vincepri): Remove this annotation when clusterctl is no longer relevant.
machineScope.SetAnnotation("cluster-api-provider-azurestackhci", "true")
if vm.Status.VMState == nil {
machineScope.Info("Waiting for VM controller to set vm state")
return reconcile.Result{Requeue: true, RequeueAfter: time.Minute}, nil
}
// changed to avoid using dereference in function param for deep copying
machineScope.SetVMState(vm.Status.VMState)
switch *machineScope.GetVMState() {
case infrav1.VMStateSucceeded:
machineScope.Info("Machine VM is running", "name", vm.Name)
machineScope.SetReady()
case infrav1.VMStateUpdating:
machineScope.Info("Machine VM is updating", "name", vm.Name)
default:
machineScope.SetErrorReason(capierrors.UpdateMachineError)
machineScope.SetErrorMessage(errors.Errorf("AzureStackHCI VM state %q is unexpected", *machineScope.GetVMState()))
}
return reconcile.Result{}, nil
}
func (r *AzureStackHCIMachineReconciler) reconcileVirtualMachineNormal(machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (*infrav1.AzureStackHCIVirtualMachine, error) {
vm := &infrav1.AzureStackHCIVirtualMachine{
ObjectMeta: metav1.ObjectMeta{
Namespace: clusterScope.Namespace(),
Name: machineScope.Name(),
},
}
mutateFn := func() (err error) {
// Mark the AzureStackHCIMachine as the owner of the AzureStackHCIVirtualMachine
vm.SetOwnerReferences(util.EnsureOwnerRef(
vm.OwnerReferences,
metav1.OwnerReference{
APIVersion: machineScope.Machine.APIVersion,
Kind: machineScope.Machine.Kind,
Name: machineScope.Machine.Name,
UID: machineScope.Machine.UID,
}))
vm.Spec.ResourceGroup = clusterScope.AzureStackHCICluster.Spec.ResourceGroup
vm.Spec.VnetName = clusterScope.AzureStackHCICluster.Spec.NetworkSpec.Vnet.Name
vm.Spec.ClusterName = clusterScope.AzureStackHCICluster.Name
switch role := machineScope.Role(); role {
case infrav1.Node:
vm.Spec.SubnetName = azurestackhci.GenerateNodeSubnetName(clusterScope.Name())
case infrav1.ControlPlane:
vm.Spec.SubnetName = azurestackhci.GenerateControlPlaneSubnetName(clusterScope.Name())
backendPoolName, err := r.GetBackendPoolName(machineScope, clusterScope)
if err != nil {
return err
} else if backendPoolName != "" {
vm.Spec.BackendPoolName = backendPoolName
}
default:
return errors.Errorf("unknown value %s for label `set` on machine %s, unable to create virtual machine resource", role, machineScope.Name())
}
image, err := getVMImage(machineScope)
if err != nil {
return errors.Wrap(err, "failed to get VM image")
}
image.DeepCopyInto(&vm.Spec.Image)
vm.Spec.VMSize = machineScope.AzureStackHCIMachine.Spec.VMSize
machineScope.AzureStackHCIMachine.Spec.AvailabilityZone.DeepCopyInto(&vm.Spec.AvailabilityZone)
machineScope.AzureStackHCIMachine.Spec.OSDisk.DeepCopyInto(&vm.Spec.OSDisk)
vm.Spec.Location = machineScope.AzureStackHCIMachine.Spec.Location
vm.Spec.SSHPublicKey = machineScope.AzureStackHCIMachine.Spec.SSHPublicKey
vm.Spec.BootstrapData = machineScope.Machine.Spec.Bootstrap.Data
return nil
}
if _, err := controllerutil.CreateOrUpdate(clusterScope.Context, r.Client, vm, mutateFn); err != nil {
if apierrors.IsAlreadyExists(err) {
clusterScope.Info("AzureStackHCIVirtualMachine already exists")
return nil, err
}
}
return vm, nil
}
func (r *AzureStackHCIMachineReconciler) reconcileDelete(machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (_ reconcile.Result, reterr error) {
machineScope.Info("Handling deleted AzureStackHCIMachine")
if err := r.reconcileVirtualMachineDelete(machineScope, clusterScope); err != nil {
return reconcile.Result{}, err
}
machineScope.AzureStackHCIMachine.Finalizers = util.Filter(machineScope.AzureStackHCIMachine.Finalizers, infrav1.MachineFinalizer)
// can use this method in controller runtime v0.4.0
// controllerutil.RemoveFinalizer(machineScope.AzureStackHCIMachine, infrav1.MachineFinalizer)
return reconcile.Result{}, nil
}
func (r *AzureStackHCIMachineReconciler) reconcileVirtualMachineDelete(machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) error {
// use Get to find VM
vm := &infrav1.AzureStackHCIVirtualMachine{}
vmName := apitypes.NamespacedName{
Namespace: clusterScope.Namespace(),
Name: machineScope.Name(),
}
// Use Delete to delete it
if err := r.Client.Get(clusterScope.Context, vmName, vm); err != nil {
// if the VM resource is not found, it was already deleted
// otherwise return the error
if !apierrors.IsNotFound(err) {
return errors.Wrapf(err, "failed to get AzureStackHCIVirtualMachine %s", vmName)
}
} else if vm.GetDeletionTimestamp().IsZero() {
// this means the VM resource was found and has not been deleted
// is this a synchronous call?
if err := r.Client.Delete(clusterScope.Context, vm); err != nil {
if !apierrors.IsNotFound(err) {
return errors.Wrapf(err, "failed to get AzureStackHCIVirtualMachine %s", vmName)
}
}
}
return nil
}
// validateUpdate checks that no immutable fields have been updated and
// returns a slice of errors representing attempts to change immutable state.
func (r *AzureStackHCIMachineReconciler) validateUpdate(spec *infrav1.AzureStackHCIMachineSpec, i *infrav1.AzureStackHCIVirtualMachine) (errs []error) {
// TODO: Add comparison logic for immutable fields
return errs
}
// AzureStackHCIClusterToAzureStackHCIMachines is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation
// of AzureStackHCIMachines.
func (r *AzureStackHCIMachineReconciler) AzureStackHCIClusterToAzureStackHCIMachines(o handler.MapObject) []ctrl.Request {
result := []ctrl.Request{}
c, ok := o.Object.(*infrav1.AzureStackHCICluster)
if !ok {
r.Log.Error(errors.Errorf("expected a AzureStackHCICluster but got a %T", o.Object), "failed to get AzureStackHCIMachine for AzureStackHCICluster")
return nil
}
log := r.Log.WithValues("AzureStackHCICluster", c.Name, "Namespace", c.Namespace)
cluster, err := util.GetOwnerCluster(context.TODO(), r.Client, c.ObjectMeta)
switch {
case apierrors.IsNotFound(err) || cluster == nil:
return result
case err != nil:
log.Error(err, "failed to get owning cluster")
return result
}
labels := map[string]string{clusterv1.MachineClusterLabelName: cluster.Name}
machineList := &clusterv1.MachineList{}
if err := r.List(context.TODO(), machineList, client.InNamespace(c.Namespace), client.MatchingLabels(labels)); err != nil {
log.Error(err, "failed to list Machines")
return nil
}
for _, m := range machineList.Items {
if m.Spec.InfrastructureRef.Name == "" {
continue
}
name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.InfrastructureRef.Name}
result = append(result, ctrl.Request{NamespacedName: name})
}
return result
}
// GetBackendPoolName finds the clusters load balancer and gets its backend pool name
func (r *AzureStackHCIMachineReconciler) GetBackendPoolName(machineScope *scope.MachineScope, clusterScope *scope.ClusterScope) (string, error) {
if clusterScope.LoadBalancerRef() == nil {
clusterScope.Info("Omitting backend pool name for control plane machine nic since AzureStackHCICluster.Spec.LoadBalancerRef is nil")
return "", nil
}
// find the load balancer
loadBalancer := &infrav1.LoadBalancer{}
loadBalancerNamespacedName := types.NamespacedName{
Namespace: clusterScope.AzureStackHCICluster.Spec.LoadBalancerRef.Namespace,
Name: clusterScope.AzureStackHCICluster.Spec.LoadBalancerRef.Name,
}
if err := r.Get(clusterScope.Context, loadBalancerNamespacedName, loadBalancer); err != nil {
return "", err
}
return loadBalancer.Spec.BackendPoolName, nil
}
// Pick image from the machine configuration, or use a default one.
func getVMImage(scope *scope.MachineScope) (*infrav1.Image, error) {
// Use custom image if provided
if scope.AzureStackHCIMachine.Spec.Image.Name != nil {
scope.Info("Using custom image name for machine", "machine", scope.AzureStackHCIMachine.GetName(), "imageName", scope.AzureStackHCIMachine.Spec.Image.Name)
return &scope.AzureStackHCIMachine.Spec.Image, nil
}
return azurestackhci.GetDefaultLinuxImage(to.String(scope.Machine.Spec.Version))
}
func (r *AzureStackHCIMachineReconciler) getWindowsBootstrapData(clusterScope *scope.ClusterScope) (string, error) {
secretsSvc := secrets.NewService(clusterScope)
secretInterface, err := secretsSvc.Get(clusterScope.Context, &secrets.Spec{Name: "kubeconf", VaultName: clusterScope.Name()})
if err != nil {
return "", errors.Wrap(err, "error retrieving 'conf' secret")
}
conf, ok := secretInterface.(keyvault.Secret)
if !ok {
return "", errors.New("error retrieving 'conf' secret")
}
//Temp until CABPK work is complete
secretInterface, err = secretsSvc.Get(clusterScope.Context, &secrets.Spec{Name: "joincommand", VaultName: clusterScope.Name()})
if err != nil {
return "", errors.Wrap(err, "error retrieving 'joincommand' secret")
}
joinCmd, ok := secretInterface.(keyvault.Secret)
if !ok {
return "", errors.New("error retrieving 'joincommand' secret")
}
joinArray := strings.Fields(*joinCmd.Value)
//Temp: Replace with clusterScope.Cluster.Spec.ApiEndoints[0] ?
masterIP := strings.Split(joinArray[2], ":")[0]
//dummy not needed
username := "masteruser"
token := joinArray[4]
hash := joinArray[6]
clusterCidr := clusterScope.Cluster.Spec.ClusterNetwork.Pods.CIDRBlocks[0]
//The following line is broken
//serviceCidr := clusterScope.Cluster.Spec.ClusterNetwork.Services.CIDRBlocks[0]
serviceCidr := "10.96.0.0/12"
kubecluster := winapi.KubeCluster{
Cri: winapi.Cri{
Name: "dockerd",
Images: winapi.Images{
Pause: "kubeletwin/pause",
Nanoserver: "microsoft/nanoserver",
ServerCore: "microsoft/windowsservercore",
},
},
Cni: winapi.Cni{
Name: "flannel",
Source: winapi.CniSource{
Name: "flanneld",
Url: "https://github.com/coreos/flannel/releases/download/v0.11.0/flanneld.exe",
},
Plugin: winapi.Plugin{
Name: "vxlan",
},
//TODO: Fill out with expected interface name, probably will change the KubeCluster scripts to do this
InterfaceName: "Ethernet 2",
},
Kubernetes: winapi.Kubernetes{
Source: winapi.KubernetesSource{
Release: "1.16.2",
Url: "https://dl.k8s.io/v1.16.2/kubernetes-node-windows-amd64.tar.gz",
},
ControlPlane: winapi.ControlPlane{
IpAddress: masterIP,
Username: username,
KubeadmToken: token,
KubeadmCAHash: hash,
},
KubeProxy: winapi.KubeProxy{
Gates: "WinOverlay=true",
},
Network: winapi.Network{
ServiceCidr: serviceCidr,
ClusterCidr: clusterCidr,
},
},
Install: winapi.Install{
Destination: "C:\\ProgramData\\Kubernetes",
},
}
kubeclusterJSON, err := json.Marshal(kubecluster)
if err != nil {
return "", err
}
kubeconfig := *conf.Value
psScript := `
$cmd = $cmd = (Get-Service docker -ErrorAction SilentlyContinue).Status -eq "Running"
while (!$cmd)
{
Start-Sleep -s 1
$cmd = (Get-Service docker -ErrorAction SilentlyContinue).Status -eq "Running"
}
$BaseDir = "$env:ALLUSERSPROFILE\Kubernetes"
mkdir $BaseDir
$jsonString = '` + string(kubeclusterJSON) + `'
Set-Content -Path $BaseDir/kubecluster.json -Value $jsonString
$kubeconfig = '` + kubeconfig + `'
Set-Content -Path $BaseDir/config -Value $kubeconfig
$secureProtocols = @()
$insecureProtocols = @([System.Net.SecurityProtocolType]::SystemDefault, [System.Net.SecurityProtocolType]::Ssl3)
foreach ($protocol in [System.Enum]::GetValues([System.Net.SecurityProtocolType]))
{
if ($insecureProtocols -notcontains $protocol)
{
$secureProtocols += $protocol
}
}
[System.Net.ServicePointManager]::SecurityProtocol = $secureProtocols
$Url = "https://raw.githubusercontent.com/ksubrmnn/sig-windows-tools/bootstrap/kubeadm/KubeClusterHelper.psm1"
$Destination = "$BaseDir/KubeClusterHelper.psm1"
try {
(New-Object System.Net.WebClient).DownloadFile($Url,$Destination)
Write-Host "Downloaded [$Url] => [$Destination]"
} catch {
Write-Error "Failed to download $Url"
throw
}
ipmo $BaseDir/KubeClusterHelper.psm1
DownloadFile -Url "https://raw.githubusercontent.com/ksubrmnn/sig-windows-tools/bootstrap/kubeadm/KubeCluster.ps1" -Destination "$BaseDir/KubeCluster.ps1"
docker tag microsoft/nanoserver:latest mcr.microsoft.com/windows/nanoserver:latest
Write-Host "Building kubeletwin/pause image"
pushd
cd $Global:BaseDir
DownloadFile -Url "https://github.com/madhanrm/SDN/raw/kubeadm/Kubernetes/windows/Dockerfile" -Destination $BaseDir\Dockerfile
docker build -t kubeletwin/pause .
popd
$scriptPath = [io.Path]::Combine($BaseDir, "KubeCluster.ps1")
$configPath = [io.Path]::Combine($BaseDir, "kubecluster.json")
.$scriptPath -install -ConfigFile $configPath
.$scriptPath -join -ConfigFile $configPath
`
Utf16leEncoding := unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)
if err != nil {
return "", err
}
psScriptEncodedUtf16, err := Utf16leEncoding.NewEncoder().String(psScript)
psScriptEncoded64, err := base64.StdEncoding.EncodeToString([]byte(psScriptEncodedUtf16)), nil
if err != nil {
return "", err
}
cmdScript := "mkdir %WINDIR%\\Setup\\Scripts && powershell.exe echo 'powershell.exe -encoded " + psScriptEncoded64 + " > C:\\logs.txt 2>&1' > %WINDIR%\\Setup\\Scripts\\SetupComplete.cmd"
cmdScriptEncoded, err := base64.StdEncoding.EncodeToString([]byte(cmdScript)), nil
if err != nil {
return "", err
}
return cmdScriptEncoded, nil
}

Просмотреть файл

@ -0,0 +1,196 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"github.com/go-logr/logr"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/scope"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1alpha2"
)
// AzureStackHCIVirtualMachineReconciler reconciles a AzureStackHCIVirtualMachine object
type AzureStackHCIVirtualMachineReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
}
// SetupWithManager registers the controller with the k8s manager
func (r *AzureStackHCIVirtualMachineReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error {
return ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1.AzureStackHCIVirtualMachine{}).
Complete(r)
}
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azurestackhcivirtualmachines,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=azurestackhcivirtualmachines/status,verbs=get;update;patch
// Reconcile reacts to some event on the kubernetes object that the controller has registered to handle
func (r *AzureStackHCIVirtualMachineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
ctx := context.Background()
logger := r.Log.WithValues("namespace", req.Namespace, "azureStackHCIVirtualMachine", req.Name)
logger.Info("attempt reconcile resource", "name", req.NamespacedName)
azureStackHCIVirtualMachine := &infrav1.AzureStackHCIVirtualMachine{}
err := r.Get(ctx, req.NamespacedName, azureStackHCIVirtualMachine)
if err != nil {
logger.Info("resource not found", "name", req.NamespacedName)
if apierrors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// Create the machine scope
virtualMachineScope, err := scope.NewVirtualMachineScope(scope.VirtualMachineScopeParams{
Logger: logger,
Client: r.Client,
AzureStackHCIVirtualMachine: azureStackHCIVirtualMachine,
})
if err != nil {
return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err)
}
// Always close the scope when exiting this function so we can persist any AzureStackHCIVirtualMachine changes.
defer func() {
if err := virtualMachineScope.Close(); err != nil && reterr == nil {
reterr = err
}
}()
// Handle deleted machines
if !azureStackHCIVirtualMachine.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(virtualMachineScope)
}
// Handle non-deleted machines
return r.reconcileNormal(ctx, virtualMachineScope)
}
// findVM queries the AzureStackHCI APIs and retrieves the VM if it exists, returns nil otherwise.
func (r *AzureStackHCIVirtualMachineReconciler) findVM(scope *scope.VirtualMachineScope, ams *azureStackHCIVirtualMachineService) (*infrav1.VM, error) {
var vm *infrav1.VM
vm, err := ams.VMIfExists()
if err != nil {
return nil, errors.Wrapf(err, "failed to query AzureStackHCIVirtualMachine")
}
return vm, nil
}
func (r *AzureStackHCIVirtualMachineReconciler) reconcileNormal(ctx context.Context, virtualMachineScope *scope.VirtualMachineScope) (reconcile.Result, error) {
virtualMachineScope.Info("Reconciling AzureStackHCIVirtualMachine")
// If the AzureStackHCIVirtualMachine is in an error state, return early.
if virtualMachineScope.AzureStackHCIVirtualMachine.Status.ErrorReason != nil || virtualMachineScope.AzureStackHCIVirtualMachine.Status.ErrorMessage != nil {
virtualMachineScope.Info("Error state detected, skipping reconciliation")
return reconcile.Result{}, nil
}
// If the AzureStackHCIVirtualMachine doesn't have our finalizer, add it.
if !util.Contains(virtualMachineScope.AzureStackHCIVirtualMachine.Finalizers, infrav1.VirtualMachineFinalizer) {
virtualMachineScope.AzureStackHCIVirtualMachine.Finalizers = append(virtualMachineScope.AzureStackHCIVirtualMachine.Finalizers, infrav1.VirtualMachineFinalizer)
}
ams := newAzureStackHCIVirtualMachineService(virtualMachineScope)
// Get or create the virtual machine.
vm, err := r.getOrCreate(virtualMachineScope, ams)
if err != nil {
return reconcile.Result{}, err
}
/*
// right now validateUpdate seems to be a no-op so skipping this logic for now
// TODO(ncdc): move this validation logic into a validating webhook
if errs := r.validateUpdate(&virtualMachineScope.AzureStackHCIMachine.Spec, vm); len(errs) > 0 {
agg := kerrors.NewAggregate(errs)
r.Recorder.Eventf(virtualMachineScope.AzureStackHCIMachine, corev1.EventTypeWarning, "InvalidUpdate", "Invalid update: %s", agg.Error())
return reconcile.Result{}, nil
} */
// Proceed to reconcile the AzureStackHCIVirtualMachine state.
virtualMachineScope.SetVMState(vm.State)
switch vm.State {
case infrav1.VMStateSucceeded:
virtualMachineScope.Info("Machine VM is running", "name", virtualMachineScope.Name())
virtualMachineScope.SetReady()
case infrav1.VMStateUpdating:
virtualMachineScope.Info("Machine VM is updating", "name", virtualMachineScope.Name())
default:
virtualMachineScope.SetErrorReason(capierrors.UpdateMachineError)
virtualMachineScope.SetErrorMessage(errors.Errorf("AzureStackHCI VM state %q is unexpected", vm.State))
}
return reconcile.Result{}, nil
}
func (r *AzureStackHCIVirtualMachineReconciler) getOrCreate(virtualMachineScope *scope.VirtualMachineScope, ams *azureStackHCIVirtualMachineService) (*infrav1.VM, error) {
virtualMachineScope.Info("Attempting to find VM", "Name", virtualMachineScope.Name())
vm, err := r.findVM(virtualMachineScope, ams)
if err != nil {
return nil, err
}
if vm == nil {
// Create a new AzureStackHCIVirtualMachine if we couldn't find a running VM.
virtualMachineScope.Info("No VM found, creating VM", "Name", virtualMachineScope.Name())
vm, err = ams.Create()
if err != nil {
return nil, errors.Wrapf(err, "failed to create AzureStackHCIVirtualMachine")
}
}
return vm, nil
}
func (r *AzureStackHCIVirtualMachineReconciler) reconcileDelete(virtualMachineScope *scope.VirtualMachineScope) (_ reconcile.Result, reterr error) {
virtualMachineScope.Info("Handling deleted AzureStackHCIVirtualMachine", "Name", virtualMachineScope.Name())
if err := newAzureStackHCIVirtualMachineService(virtualMachineScope).Delete(); err != nil {
return reconcile.Result{}, errors.Wrapf(err, "error deleting AzureStackHCIVirtualMachine %s/%s", virtualMachineScope.Namespace(), virtualMachineScope.Name())
}
defer func() {
if reterr == nil {
virtualMachineScope.AzureStackHCIVirtualMachine.Finalizers = util.Filter(virtualMachineScope.AzureStackHCIVirtualMachine.Finalizers, infrav1.VirtualMachineFinalizer)
}
}()
return reconcile.Result{}, nil
}
// validateUpdate checks that no immutable fields have been updated and
// returns a slice of errors representing attempts to change immutable state.
func (r *AzureStackHCIVirtualMachineReconciler) validateUpdate(spec *infrav1.AzureStackHCIVirtualMachineSpec, i *infrav1.VM) (errs []error) {
// TODO: Add comparison logic for immutable fields
return errs
}

Просмотреть файл

@ -0,0 +1,281 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"encoding/base64"
infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1alpha2"
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/scope"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/services/disks"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/services/networkinterfaces"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/services/secrets"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/services/virtualmachines"
sdk_compute "github.com/microsoft/moc-sdk-for-go/services/compute"
"github.com/microsoft/moc-sdk-for-go/services/security/keyvault"
"github.com/pkg/errors"
"k8s.io/klog"
)
// azureStackHCIVirtualMachineService are list of services required by cluster actuator, easy to create a fake
// TODO: We should decide if we want to keep this
type azureStackHCIVirtualMachineService struct {
vmScope *scope.VirtualMachineScope
networkInterfacesSvc azurestackhci.Service
virtualMachinesSvc azurestackhci.GetterService
disksSvc azurestackhci.GetterService
secretsSvc azurestackhci.GetterService
}
// newAzureStackHCIMachineService populates all the services based on input scope
func newAzureStackHCIVirtualMachineService(vmScope *scope.VirtualMachineScope) *azureStackHCIVirtualMachineService {
return &azureStackHCIVirtualMachineService{
vmScope: vmScope,
networkInterfacesSvc: networkinterfaces.NewService(vmScope),
virtualMachinesSvc: virtualmachines.NewService(vmScope),
disksSvc: disks.NewService(vmScope),
secretsSvc: secrets.NewService(vmScope),
}
}
// Create creates machine if and only if machine exists, handled by cluster-api
func (s *azureStackHCIVirtualMachineService) Create() (*infrav1.VM, error) {
nicName := azurestackhci.GenerateNICName(s.vmScope.Name())
nicErr := s.reconcileNetworkInterface(nicName)
if nicErr != nil {
return nil, errors.Wrapf(nicErr, "failed to create nic %s for machine %s", nicName, s.vmScope.Name())
}
vm, vmErr := s.createVirtualMachine(nicName)
if vmErr != nil {
return nil, errors.Wrapf(vmErr, "failed to create vm %s ", s.vmScope.Name())
}
return vm, nil
}
// Delete reconciles all the services in pre determined order
func (s *azureStackHCIVirtualMachineService) Delete() error {
vmSpec := &virtualmachines.Spec{
Name: s.vmScope.Name(),
}
err := s.virtualMachinesSvc.Delete(s.vmScope.Context, vmSpec)
if err != nil {
return errors.Wrapf(err, "failed to delete machine")
}
networkInterfaceSpec := &networkinterfaces.Spec{
Name: azurestackhci.GenerateNICName(s.vmScope.Name()),
VnetName: s.vmScope.VnetName(),
}
err = s.networkInterfacesSvc.Delete(s.vmScope.Context, networkInterfaceSpec)
if err != nil {
return errors.Wrapf(err, "Unable to delete network interface")
}
diskSpec := &disks.Spec{
Name: azurestackhci.GenerateOSDiskName(s.vmScope.Name()),
}
err = s.disksSvc.Delete(s.vmScope.Context, diskSpec)
if err != nil {
return errors.Wrapf(err, "Unable to delete os disk of machine %s", s.vmScope.Name())
}
return nil
}
func (s *azureStackHCIVirtualMachineService) VMIfExists() (*infrav1.VM, error) {
vmSpec := &virtualmachines.Spec{
Name: s.vmScope.Name(),
}
vmInterface, err := s.virtualMachinesSvc.Get(s.vmScope.Context, vmSpec)
if err != nil && vmInterface == nil {
return nil, nil
}
if err != nil {
return nil, errors.Wrap(err, "Failed to get vm")
}
vm, ok := vmInterface.(*infrav1.VM)
if !ok {
return nil, errors.New("returned incorrect vm interface")
}
klog.Infof("Found vm for machine %s", s.vmScope.Name())
return vm, nil
}
// getVirtualMachineZone gets a random availability zones from available set,
// this will hopefully be an input from upstream machinesets so all the vms are balanced
func (s *azureStackHCIVirtualMachineService) getVirtualMachineZone() (string, error) {
return "", nil
}
func (s *azureStackHCIVirtualMachineService) reconcileDisk(disk infrav1.OSDisk) error {
diskSpec := &disks.Spec{
Name: azurestackhci.GenerateOSDiskName(s.vmScope.Name()), //disk.Name,
Source: disk.Source,
}
err := s.disksSvc.Reconcile(s.vmScope.Context, diskSpec)
if err != nil {
return errors.Wrap(err, "unable to create VM OS disk")
}
return err
}
func (s *azureStackHCIVirtualMachineService) reconcileNetworkInterface(nicName string) error {
networkInterfaceSpec := &networkinterfaces.Spec{
Name: nicName,
VnetName: s.vmScope.VnetName(),
SubnetName: s.vmScope.SubnetName(), // this field is required to be passed from AzureStackHCIMachine
BackendPoolName: s.vmScope.BackendPoolName(),
}
err := s.networkInterfacesSvc.Reconcile(s.vmScope.Context, networkInterfaceSpec)
if err != nil {
return errors.Wrap(err, "unable to create VM network interface")
}
return err
}
func (s *azureStackHCIVirtualMachineService) createVirtualMachine(nicName string) (*infrav1.VM, error) {
var vm *infrav1.VM
decoded, err := base64.StdEncoding.DecodeString(s.vmScope.AzureStackHCIVirtualMachine.Spec.SSHPublicKey)
if err != nil {
return nil, errors.Wrapf(err, "failed to decode ssh public key")
}
vmSpec := &virtualmachines.Spec{
Name: s.vmScope.Name(),
}
vmInterface, err := s.virtualMachinesSvc.Get(s.vmScope.Context, vmSpec)
if err != nil && vmInterface == nil {
var vmZone string
azSupported := s.isAvailabilityZoneSupported()
if azSupported {
useAZ := true
if s.vmScope.AzureStackHCIVirtualMachine.Spec.AvailabilityZone.Enabled != nil {
useAZ = *s.vmScope.AzureStackHCIVirtualMachine.Spec.AvailabilityZone.Enabled
}
if useAZ {
var zoneErr error
vmZone, zoneErr = s.getVirtualMachineZone()
if zoneErr != nil {
return nil, errors.Wrap(zoneErr, "failed to get availability zone")
}
}
}
if s.vmScope.AzureStackHCIVirtualMachine.Spec.OSDisk.OSType == infrav1.OSTypeWindows {
klog.V(2).Infof("vm ostype is windows, retrieving kubeadm 'joincommand' secret from vault %s ..", s.vmScope.ClusterName())
secretInterface, err := s.secretsSvc.Get(s.vmScope.Context, &secrets.Spec{Name: "joincommand", VaultName: s.vmScope.ClusterName()})
if err != nil {
return nil, errors.Wrap(err, "error retrieving 'joincommand' secret")
}
joinCmd, ok := secretInterface.(keyvault.Secret)
if !ok {
return nil, errors.New("error retrieving 'joincommand' secret")
}
klog.V(2).Infof("TEMP: joincommand is: %s", *joinCmd.Value)
}
vmType := sdk_compute.Tenant
if s.vmScope.LoadBalancerVM() == true {
vmType = sdk_compute.LoadBalancer
}
s.vmScope.Info("VM type is:", "vmType", vmType)
vmSpec = &virtualmachines.Spec{
Name: s.vmScope.Name(),
NICName: nicName,
SSHKeyData: string(decoded),
Size: s.vmScope.AzureStackHCIVirtualMachine.Spec.VMSize,
OSDisk: s.vmScope.AzureStackHCIVirtualMachine.Spec.OSDisk,
Image: s.vmScope.AzureStackHCIVirtualMachine.Spec.Image,
CustomData: *s.vmScope.AzureStackHCIVirtualMachine.Spec.BootstrapData,
Zone: vmZone,
VMType: vmType,
}
err = s.virtualMachinesSvc.Reconcile(s.vmScope.Context, vmSpec)
if err != nil {
return nil, errors.Wrapf(err, "failed to create or get machine")
}
} else if err != nil {
return nil, errors.Wrap(err, "failed to get vm")
}
newVM, err := s.virtualMachinesSvc.Get(s.vmScope.Context, vmSpec)
if err != nil {
return nil, errors.Wrap(err, "failed to get vm")
}
vm, ok := newVM.(*infrav1.VM)
if !ok {
return nil, errors.New("returned incorrect vm interface")
}
if vm.State == "" {
return nil, errors.Errorf("vm %s is nil provisioning state, reconcile", s.vmScope.Name())
}
if vm.State == infrav1.VMStateFailed {
// If VM failed provisioning, delete it so it can be recreated
err = s.virtualMachinesSvc.Delete(s.vmScope.Context, vmSpec)
if err != nil {
return nil, errors.Wrapf(err, "failed to delete machine")
}
return nil, errors.Errorf("vm %s is deleted, retry creating in next reconcile", s.vmScope.Name())
} else if vm.State != infrav1.VMStateSucceeded {
return nil, errors.Errorf("vm %s is still in provisioning state %s, reconcile", s.vmScope.Name(), vm.State)
}
return vm, nil
}
// isAvailabilityZoneSupported determines if Availability Zones are supported in a selected location
// based on SupportedAvailabilityZoneLocations. Returns true if supported.
func (s *azureStackHCIVirtualMachineService) isAvailabilityZoneSupported() bool {
azSupported := false
for _, supportedLocation := range azurestackhci.SupportedAvailabilityZoneLocations {
if s.vmScope.Location() == supportedLocation {
azSupported = true
return azSupported
}
}
s.vmScope.V(2).Info("Availability Zones are not supported in the selected location", "location", s.vmScope.Location())
return azSupported
}

Просмотреть файл

@ -0,0 +1,475 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"encoding/base64"
"fmt"
"time"
"github.com/Azure/go-autorest/autorest/to"
"github.com/go-logr/logr"
infrav1 "github.com/microsoft/cluster-api-provider-azurestackhci/api/v1alpha2"
azurestackhci "github.com/microsoft/cluster-api-provider-azurestackhci/cloud"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/scope"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/services/groups"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/services/loadbalancers"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/services/networkinterfaces"
"github.com/microsoft/cluster-api-provider-azurestackhci/cloud/services/vippools"
"github.com/microsoft/moc-sdk-for-go/services/cloud"
"github.com/microsoft/moc-sdk-for-go/services/network"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
apitypes "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
capierrors "sigs.k8s.io/cluster-api/errors"
"sigs.k8s.io/cluster-api/util"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
// LoadBalancerReconciler reconciles a AzureStackHCIMachine object
type LoadBalancerReconciler struct {
client.Client
Log logr.Logger
Recorder record.EventRecorder
useVIP bool
}
func (r *LoadBalancerReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error {
// later we will also want to watch the cluster which owns the LB
return ctrl.NewControllerManagedBy(mgr).
WithOptions(options).
For(&infrav1.LoadBalancer{}).
Watches(
&source.Kind{Type: &infrav1.AzureStackHCIVirtualMachine{}},
&handler.EnqueueRequestForOwner{OwnerType: &infrav1.LoadBalancer{}, IsController: false},
).
Complete(r)
}
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=loadbalancers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=loadbalancers/status,verbs=get;update;patch
// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch
func (r *LoadBalancerReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, reterr error) {
ctx := context.Background()
logger := r.Log.WithValues("namespace", req.Namespace, "loadBalancer", req.Name)
// Fetch the LoadBalancer resource.
loadBalancer := &infrav1.LoadBalancer{}
err := r.Get(ctx, req.NamespacedName, loadBalancer)
if err != nil {
if apierrors.IsNotFound(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
// Fetch the CAPI Cluster.
cluster, err := util.GetOwnerCluster(ctx, r.Client, loadBalancer.ObjectMeta)
if err != nil {
return reconcile.Result{}, err
}
if cluster == nil {
logger.Info("Waiting for AzureStackHCICluster Controller to set OwnerRef on LoadBalancer")
return reconcile.Result{}, nil
}
azureStackHCICluster := &infrav1.AzureStackHCICluster{}
azureStackHCIClusterName := client.ObjectKey{
Namespace: loadBalancer.Namespace,
Name: cluster.Spec.InfrastructureRef.Name,
}
if err := r.Client.Get(ctx, azureStackHCIClusterName, azureStackHCICluster); err != nil {
logger.Info("AzureStackHCICluster is not available yet")
return reconcile.Result{}, nil
}
// create a cluster scope for the request.
clusterScope, err := scope.NewClusterScope(scope.ClusterScopeParams{
Client: r.Client,
Logger: logger.WithValues("cluster", cluster.Name),
Cluster: cluster,
AzureStackHCICluster: azureStackHCICluster,
})
if err != nil {
return reconcile.Result{}, err
}
// create a lb scope for this request.
loadBalancerScope, err := scope.NewLoadBalancerScope(scope.LoadBalancerScopeParams{
Logger: logger.WithValues("loadBalancer", loadBalancer.Name),
Client: r.Client,
LoadBalancer: loadBalancer,
AzureStackHCICluster: azureStackHCICluster,
Cluster: cluster,
})
if err != nil {
return reconcile.Result{}, errors.Errorf("failed to create scope: %+v", err)
}
// Always close the scope when exiting this function so we can persist any AzureStackHCIMachine changes.
defer func() {
if err := loadBalancerScope.Close(); err != nil && reterr == nil {
reterr = err
}
}()
// Handle deleted LoadBalancers.
if !loadBalancer.ObjectMeta.DeletionTimestamp.IsZero() {
return r.reconcileDelete(loadBalancerScope, clusterScope)
}
// Handle non-deleted machines
return r.reconcileNormal(loadBalancerScope, clusterScope)
}
func (r *LoadBalancerReconciler) reconcileNormal(loadBalancerScope *scope.LoadBalancerScope, clusterScope *scope.ClusterScope) (reconcile.Result, error) {
loadBalancerScope.Info("Reconciling LoadBalancer")
// If the AzureStackHCIMachine is in an error state, return early.
/* if loadBalancerScope.LoadBalancer.Status.ErrorReason != nil || loadBalancerScope.LoadBalancer.Status.ErrorMessage != nil {
loadBalancerScope.Info("Error state detected, skipping reconciliation")
return reconcile.Result{}, nil
} */
// If the LoadBalancer doesn't have our finalizer, add it.
// with controller-runtime 0.4.0 you can do this with AddFinalizer
if !util.Contains(loadBalancerScope.LoadBalancer.Finalizers, infrav1.LoadBalancerFinalizer) {
loadBalancerScope.LoadBalancer.Finalizers = append(loadBalancerScope.LoadBalancer.Finalizers, infrav1.LoadBalancerFinalizer)
}
vm, err := r.reconcileNormalVirtualMachine(loadBalancerScope, clusterScope)
if err != nil {
if apierrors.IsAlreadyExists(err) {
return reconcile.Result{}, nil
}
return reconcile.Result{}, err
}
if vm.Status.VMState == nil {
loadBalancerScope.Info("Waiting for VM controller to set vm state")
return reconcile.Result{Requeue: true, RequeueAfter: time.Minute}, nil
}
// changed to avoid using dereference in function param for deep copying
loadBalancerScope.SetVMState(vm.Status.VMState)
switch *loadBalancerScope.GetVMState() {
case infrav1.VMStateSucceeded:
loadBalancerScope.Info("Machine VM is running", "name", vm.Name)
loadBalancerScope.SetReady()
case infrav1.VMStateUpdating:
loadBalancerScope.Info("Machine VM is updating", "name", vm.Name)
default:
loadBalancerScope.SetErrorReason(capierrors.UpdateMachineError)
loadBalancerScope.SetErrorMessage(errors.Errorf("AzureStackHCI VM state %q is unexpected", *loadBalancerScope.GetVMState()))
}
// reconcile the loadbalancer
err = r.reconcileLoadBalancer(loadBalancerScope, clusterScope)
if err != nil {
return reconcile.Result{}, err
}
// wait for ip address to be exposed
if loadBalancerScope.Address() == "" {
err := r.reconcileLoadBalancerAddress(loadBalancerScope, clusterScope)
if err != nil {
return reconcile.Result{}, err
}
if loadBalancerScope.Address() == "" {
return reconcile.Result{Requeue: true, RequeueAfter: time.Minute}, nil
}
}
return reconcile.Result{}, nil
}
func (r *LoadBalancerReconciler) reconcileNormalVirtualMachine(loadBalancerScope *scope.LoadBalancerScope, clusterScope *scope.ClusterScope) (*infrav1.AzureStackHCIVirtualMachine, error) {
vm := &infrav1.AzureStackHCIVirtualMachine{
ObjectMeta: metav1.ObjectMeta{
Namespace: clusterScope.Namespace(),
Name: loadBalancerScope.Name(),
},
}
mutateFn := func() (err error) {
// Mark the LoadBalancer as the owner of the AzureStackHCIVirtualMachine
vm.SetOwnerReferences(util.EnsureOwnerRef(
vm.OwnerReferences,
metav1.OwnerReference{
APIVersion: loadBalancerScope.LoadBalancer.APIVersion,
Kind: loadBalancerScope.LoadBalancer.Kind,
Name: loadBalancerScope.LoadBalancer.Name,
UID: loadBalancerScope.LoadBalancer.UID,
}))
vm.Spec.ResourceGroup = clusterScope.AzureStackHCICluster.Spec.ResourceGroup
vm.Spec.VnetName = clusterScope.AzureStackHCICluster.Spec.NetworkSpec.Vnet.Name
vm.Spec.ClusterName = clusterScope.AzureStackHCICluster.Name
vm.Spec.SubnetName = azurestackhci.GenerateNodeSubnetName(clusterScope.Name())
/* switch role := machineScope.Role(); role {
case infrav1.Node:
vm.Spec.SubnetName = azurestackhci.GenerateNodeSubnetName(clusterScope.Name())
case infrav1.ControlPlane:
vm.Spec.SubnetName = azurestackhci.GenerateControlPlaneSubnetName(clusterScope.Name())
// TODO: Temporary loadbalancer workaround for transparent networks. This gives a predictable MAC to only the first control plane node
default:
return errors.Errorf("unknown value %s for label `set` on machine %s, unable to create virtual machine resource", role, machineScope.Name())
} */
vm.Spec.BootstrapData = r.formatLoadBalancerCloudInit(loadBalancerScope, clusterScope)
vm.Spec.VMSize = "Default"
vm.Spec.Image = infrav1.Image{
Name: to.StringPtr(loadBalancerScope.LoadBalancer.Spec.ImageReference),
Offer: to.StringPtr(azurestackhci.DefaultImageOfferID),
Publisher: to.StringPtr(azurestackhci.DefaultImagePublisherID),
SKU: to.StringPtr(azurestackhci.DefaultImageSKU),
Version: to.StringPtr(azurestackhci.LatestVersion),
}
vm.Spec.Location = "westus"
vm.Spec.SSHPublicKey = loadBalancerScope.LoadBalancer.Spec.SSHPublicKey
return nil
}
if _, err := controllerutil.CreateOrUpdate(clusterScope.Context, r.Client, vm, mutateFn); err != nil {
if apierrors.IsAlreadyExists(err) {
clusterScope.Info("AzureStackHCIVirtualMachine already exists")
return nil, err
}
}
return vm, nil
}
func (r *LoadBalancerReconciler) reconcileLoadBalancerAddress(loadBalancerScope *scope.LoadBalancerScope, clusterScope *scope.ClusterScope) error {
if r.useVIP {
loadBalancerScope.Info("Attempting to vip for loadbalancer", "name", loadBalancerScope.LoadBalancer.Name)
lbSpec := &loadbalancers.Spec{
Name: loadBalancerScope.LoadBalancer.Name,
}
lbInterface, err := loadbalancers.NewService(clusterScope).Get(clusterScope.Context, lbSpec)
if err != nil {
return err
}
lb, ok := lbInterface.(network.LoadBalancer)
if !ok {
return errors.New("error getting load balancer")
}
loadBalancerScope.SetAddress(*((*lb.FrontendIPConfigurations)[0].IPAddress))
} else {
loadBalancerScope.Info("Attempting to get network interface information for loadbalancer", "name", loadBalancerScope.LoadBalancer.Name)
nicInterface, err := networkinterfaces.NewService(clusterScope).Get(clusterScope.Context,
&networkinterfaces.Spec{
Name: azurestackhci.GenerateNICName(loadBalancerScope.LoadBalancer.Name),
VnetName: clusterScope.AzureStackHCICluster.Spec.NetworkSpec.Vnet.Name,
})
if err != nil {
return err
}
nic, ok := nicInterface.(network.Interface)
if !ok {
return errors.New("error getting network interface")
}
if nic.IPConfigurations != nil && len(*nic.IPConfigurations) > 0 && (*nic.IPConfigurations)[0].PrivateIPAddress != nil && *((*nic.IPConfigurations)[0].PrivateIPAddress) != "" {
loadBalancerScope.SetAddress(*((*nic.IPConfigurations)[0].PrivateIPAddress))
loadBalancerScope.Info("Load balancer address is available", "address", loadBalancerScope.Address())
} else {
loadBalancerScope.Info("Load balancer address is not yet available")
}
}
return nil
}
func (r *LoadBalancerReconciler) reconcileLoadBalancer(loadBalancerScope *scope.LoadBalancerScope, clusterScope *scope.ClusterScope) error {
lbSpec := &loadbalancers.Spec{
Name: loadBalancerScope.LoadBalancer.Name,
BackendPoolName: loadBalancerScope.LoadBalancer.Spec.BackendPoolName,
}
//Currently, CAPI doesn't have correct location.
loadBalancerScope.Info("Attempting to get location for group", "group", clusterScope.GetResourceGroup())
groupInterface, err := groups.NewService(clusterScope).Get(clusterScope.Context, &groups.Spec{Name: clusterScope.GetResourceGroup()})
if err != nil {
return err
}
group, ok := groupInterface.(cloud.Group)
if !ok {
return errors.New("error getting group")
}
location := *group.Location
//If vippool does not exists, specify vnetname.
loadBalancerScope.Info("Attempting to get vippool for location", "location", location)
vippool, err := vippools.NewService(clusterScope).Get(clusterScope.Context, &vippools.Spec{Location: location})
if err == nil && vippool != nil {
loadBalancerScope.Info("Using vippool", "vippool", vippool)
r.useVIP = true
} else {
r.useVIP = false
loadBalancerScope.Info("Vippool does not exist at location. Using the ip address of the virtual machine as the frontend", "location", location)
lbSpec.VnetName = clusterScope.AzureStackHCICluster.Spec.NetworkSpec.Vnet.Name
}
if err := loadbalancers.NewService(clusterScope).Reconcile(clusterScope.Context, lbSpec); err != nil {
return errors.Wrapf(err, "failed to reconcile loadbalancer %s", loadBalancerScope.LoadBalancer.Name)
}
return nil
}
func (r *LoadBalancerReconciler) reconcileDelete(loadBalancerScope *scope.LoadBalancerScope, clusterScope *scope.ClusterScope) (_ reconcile.Result, reterr error) {
loadBalancerScope.Info("Handling deleted LoadBalancer")
if err := r.reconcileDeleteLoadBalancer(loadBalancerScope, clusterScope); err != nil {
return reconcile.Result{}, err
}
if err := r.reconcileDeleteVirtualMachine(loadBalancerScope, clusterScope); err != nil {
return reconcile.Result{}, err
}
loadBalancerScope.LoadBalancer.Finalizers = util.Filter(loadBalancerScope.LoadBalancer.Finalizers, infrav1.LoadBalancerFinalizer)
// can use this method in controller runtime v0.4.0
// controllerutil.RemoveFinalizer(loadBalancerScope.LoadBalancer.Finalizers, infrav1.LoadBalancerFinalizer)
return reconcile.Result{}, nil
}
func (r *LoadBalancerReconciler) reconcileDeleteVirtualMachine(loadBalancerScope *scope.LoadBalancerScope, clusterScope *scope.ClusterScope) error {
// use Get to find VM
vm := &infrav1.AzureStackHCIVirtualMachine{}
vmName := apitypes.NamespacedName{
Namespace: clusterScope.Namespace(),
Name: loadBalancerScope.Name(),
}
// Use Delete to delete it
if err := r.Client.Get(loadBalancerScope.Context, vmName, vm); err != nil {
// if the VM resource is not found, it was already deleted
// otherwise return the error
if !apierrors.IsNotFound(err) {
return errors.Wrapf(err, "failed to get AzureStackHCIVirtualMachine %s", vmName)
}
} else if vm.GetDeletionTimestamp().IsZero() {
// this means the VM resource was found and has not been deleted
// is this a synchronous call?
if err := r.Client.Delete(clusterScope.Context, vm); err != nil {
if !apierrors.IsNotFound(err) {
return errors.Wrapf(err, "failed to get AzureStackHCIVirtualMachine %s", vmName)
}
}
}
return nil
}
func (r *LoadBalancerReconciler) reconcileDeleteLoadBalancer(loadBalancerScope *scope.LoadBalancerScope, clusterScope *scope.ClusterScope) error {
lbSpec := &loadbalancers.Spec{
Name: loadBalancerScope.LoadBalancer.Name,
}
if err := loadbalancers.NewService(clusterScope).Delete(clusterScope.Context, lbSpec); err != nil {
if !azurestackhci.ResourceNotFound(err) {
return errors.Wrapf(err, "failed to delete loadbalancer %s", loadBalancerScope.LoadBalancer.Name)
}
}
return nil
}
func (r *LoadBalancerReconciler) formatLoadBalancerCloudInit(loadBalancerScope *scope.LoadBalancerScope, clusterScope *scope.ClusterScope) *string {
ret := base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf(`
#cloud-config
packages:
- keepalived
- cronie
- diffutils
- hyper-v
- haproxy
write_files:
- path: /root/crontab.input
owner: root:root
permissions: '0640'
content: |
* * * * * /root/update.sh
- path: /root/update.sh
owner: root:root
permissions: '0755'
content: |
#!/bin/sh
# TODO - we could make this more generic in the future. For now, it is tailored to LB.
# create keepalived.conf and check_apiserver.sh
# Download keepalived.conf
export WSSD_DEBUG_MODE=on
/opt/wssd/k8s/wssdcloudctl security keyvault --cloudFqdn %[1]s --group %[2]s secret --vault-name %[2]s_%[3]s show --name keepalived.conf --query value --output tsv > /root/keepalived.conf.new
/opt/wssd/k8s/wssdcloudctl security keyvault --cloudFqdn %[1]s --group %[2]s secret --vault-name %[2]s_%[3]s show --name check_apiserver.sh --query value --output tsv > /root/check_apiserver.sh
/opt/wssd/k8s/wssdcloudctl security keyvault --cloudFqdn %[1]s --group %[2]s secret --vault-name %[2]s_%[3]s show --name haproxy.cfg --query value --output tsv > /root/haproxy.cfg.new
# if file diff - Restart keepalived (to pick up new conf).
if [ -f keepalived.conf.new ]
then
if ! diff /etc/keepalived/keepalived.conf /root/keepalived.conf.new > /dev/null
then
cp /root/keepalived.conf.new /etc/keepalived/keepalived.conf
systemctl restart keepalived
fi
fi
if [ -f haproxy.cfg.new ]
then
if ! diff /etc/haproxy/haproxy.cfg /root/haproxy.cfg.new > /dev/null
then
cp /root/haproxy.cfg.new /etc/haproxy/haproxy.cfg
systemctl restart haproxy
fi
fi
runcmd:
- |
systemctl start hv_kvp_daemon
# WSSD Setup
mkdir -p /opt/wssd/k8s
curl -o /opt/wssd/k8s/wssdcloudctl http://10.231.110.37/AzureEdge/0.7/wssdcloudctl
chmod 755 /opt/wssd/k8s/wssdcloudctl
export WSSD_DEBUG_MODE=on
crontab /root/crontab.input
systemctl start cron
systemctl start haproxy
#TODO: only open up ports that are needed. This would have to be moved to the cronjob.
systemctl stop iptables
`, clusterScope.CloudAgentFqdn, clusterScope.GetResourceGroup(), loadBalancerScope.Name())))
return &ret
}

13
docs/README.md Normal file
Просмотреть файл

@ -0,0 +1,13 @@
# Documentation Index
## Quick start
TODO
## Development
TODO
## Project Documentation
TODO

777
examples/addons.yaml Normal file
Просмотреть файл

@ -0,0 +1,777 @@
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "none"
# Configure the backend to use.
calico_backend: "vxlan"
# Configure the MTU to use
veth_mtu: "1440"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamblocks.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMBlock
plural: ipamblocks
singular: ipamblock
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: blockaffinities.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BlockAffinity
plural: blockaffinities
singular: blockaffinity
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamhandles.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMHandle
plural: ipamhandles
singular: ipamhandle
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamconfigs.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMConfig
plural: ipamconfigs
singular: ipamconfig
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPPeer
plural: bgppeers
singular: bgppeer
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networksets.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkSet
plural: networksets
singular: networkset
---
# Source: calico/templates/rbac.yaml
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
rules:
# Nodes are watched to monitor for deletions.
- apiGroups: [""]
resources:
- nodes
verbs:
- watch
- list
- get
# Pods are queried to check for existence.
- apiGroups: [""]
resources:
- pods
verbs:
- get
# IPAM resources are manipulated when nodes are deleted.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
verbs:
- list
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
# Needs access to update clusterinformations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- clusterinformations
verbs:
- get
- create
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-node
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
- blockaffinities
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only requried for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
# These permissions are required for Calico CNI to perform IPAM allocations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- apiGroups: ["crd.projectcalico.org"]
resources:
- ipamconfigs
verbs:
- get
# Block affinities must also be watchable by confd for route aggregation.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
verbs:
- watch
# The Calico IPAM migration needs to get daemonsets. These permissions can be
# removed if not upgrading from an installation using host-local IPAM.
- apiGroups: ["apps"]
resources:
- daemonsets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
annotations:
# This, along with the CriticalAddonsOnly toleration below,
# marks the pod as a critical add-on, ensuring it gets
# priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
serviceAccountName: calico-node
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container performs upgrade from host-local IPAM to calico-ipam.
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: calico/cni:v3.9.1
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
volumeMounts:
- mountPath: /var/lib/cni/networks
name: host-local-net-dir
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: calico/cni:v3.9.1
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: calico/pod2daemon-flexvol:v3.9.1
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
containers:
# Runs calico-node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: calico/node:v3.9.1
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
# Enable VXLAN
- name: CALICO_IPV4POOL_VXLAN
value: "Always"
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within `--cluster-cidr`.
- name: CALICO_IPV4POOL_CIDR
value: "192.168.0.0/16"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
exec:
command:
- /bin/calico-node
- -felix-live
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/calico-node
- -felix-ready
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- name: policysync
mountPath: /var/run/nodeagent
volumes:
# Used by calico-node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Mount in the directory for host-local IPAM allocations. This is
# used when upgrading from host-local to calico-ipam, and can be removed
# if not using the upgrade-ipam init container.
- name: host-local-net-dir
hostPath:
path: /var/lib/cni/networks
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
# Used to install Flex Volume Driver
- name: flexvol-driver-host
hostPath:
type: DirectoryOrCreate
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
beta.kubernetes.io/os: linux
tolerations:
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- key: node-role.kubernetes.io/master
effect: NoSchedule
serviceAccountName: calico-kube-controllers
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
image: calico/kube-controllers:v3.9.1
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: node
- name: DATASTORE_TYPE
value: kubernetes
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
---
# Source: calico/templates/calico-etcd-secrets.yaml
---
# Source: calico/templates/calico-typha.yaml
---
# Source: calico/templates/configure-canal.yaml

Просмотреть файл

@ -0,0 +1,29 @@
---
apiVersion: cluster.x-k8s.io/v1alpha2
kind: Cluster
metadata:
name: ${CLUSTER_NAME}
spec:
clusterNetwork:
pods:
cidrBlocks: ["${POD_CIDR}"]
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: AzureStackHCICluster
name: ${CLUSTER_NAME}
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: AzureStackHCICluster
metadata:
name: ${CLUSTER_NAME}
spec:
resourceGroup: ${CLUSTER_RESOURCE_GROUP}
location: westus
networkSpec:
vnet:
name: "${VNET_NAME}"
loadBalancerRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: LoadBalancer
namespace: default
name: ${LOAD_BALANCER_NAME}

Просмотреть файл

@ -0,0 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- cluster.yaml
configurations:
- kustomizeconfig.yaml

Просмотреть файл

@ -0,0 +1,6 @@
namespace:
- kind: Cluster
group: cluster.x-k8s.io
version: v1alpha2
path: spec/infrastructureRef/namespace
create: true

Просмотреть файл

@ -0,0 +1,740 @@
apiVersion: cluster.x-k8s.io/v1alpha2
kind: Machine
metadata:
name: ${CLUSTER_NAME}-controlplane-0
labels:
cluster.x-k8s.io/control-plane: "true"
cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}"
spec:
version: ${KUBERNETES_VERSION}
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
kind: KubeadmConfig
name: ${CLUSTER_NAME}-controlplane-0
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: AzureStackHCIMachine
name: ${CLUSTER_NAME}-controlplane-0
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: AzureStackHCIMachine
metadata:
name: ${CLUSTER_NAME}-controlplane-0
spec:
location: westus
vmSize: ${CONTROL_PLANE_MACHINE_TYPE}
image:
offer: "linux"
sshPublicKey: ${SSH_PUBLIC_KEY}
---
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
kind: KubeadmConfig
metadata:
name: ${CLUSTER_NAME}-controlplane-0
spec:
initConfiguration:
nodeRegistration:
name: '{{ ds.meta_data["local_hostname"] }}'
clusterConfiguration:
apiServer:
timeoutForControlPlane: 20m
extraArgs:
audit-log-maxage: "30"
audit-log-maxbackup: "10"
audit-log-maxsize: "100"
audit-log-path: /var/log/apiserver/audit.log
profiling: "false"
controllerManager:
extraArgs:
profiling: "false"
terminated-pod-gc-threshold: "10"
scheduler:
extraArgs:
profiling: "false"
preKubeadmCommands:
- bash -c /tmp/kubeadm-bootstrap.sh
postKubeadmCommands:
- bash -c /tmp/kubeadm-postinstall.sh
files:
- path: /tmp/kubeadm-bootstrap.sh
owner: "root:root"
permissions: "0744"
content: |
#!/bin/bash
set -eux
function os_setup {
command -v "awk" >/dev/null 2>&1 || tdnf install -y awk
}
function dockerd_prereq() {
swapoff -a
modprobe overlay
modprobe br_netfilter
cat > /etc/sysctl.d/99-sysctl-kubernetes-cri.conf <<EOF
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
iptables -P INPUT ACCEPT
iptables -P OUTPUT ACCEPT
iptables -P FORWARD ACCEPT
sysctl --system
}
function dockerd_install() {
# Workaround a date/time race issue that causes apt-get update to fail
until apt-get update; do
sleep 1
done
# Steps from https://kubernetes.io/docs/setup/cri/
### Install packages to allow apt to use a repository over HTTPS
apt-get install -y apt-transport-https ca-certificates curl software-properties-common
### Add Dockers official GPG key
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
### Add Docker apt repository.
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
## Install Docker CE.
apt-get update && apt-get install -y docker-ce=18.06*
# Setup daemon.
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
}
function systemctl_config() {
systemctl daemon-reload
systemctl enable docker
systemctl restart docker
systemctl enable azurestackhci_boot
}
function kubernetes_install_ubuntu() {
apt-get update && apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
apt-get update
apt-get install -y kubelet="${KUBERNETES_SEMVER}-00" kubeadm="${KUBERNETES_SEMVER}-00" kubectl="${KUBERNETES_SEMVER}-00"
apt-mark hold kubelet kubeadm kubectl
}
function kubernetes_install() {
K8S_VERSION="${KUBERNETES_SEMVER}"
KUBEADM_VERSION="${KUBERNETES_SEMVER}"
#tdnf install -y kubernetes-${KUBERNETES_SEMVER} kubernetes-kubeadm-${KUBERNETES_SEMVER} kubernetes-pause-${KUBERNETES_SEMVER}
cat > /etc/sysctl.d/90-kubelet.conf << EOF
vm.overcommit_memory=1
kernel.panic=10
kernel.panic_on_oops=1
EOF
sysctl -p /etc/sysctl.d/90-kubelet.conf
sudo swapoff -a
}
# First setup the os with any required packages (e.g. awk)
os_setup
if [ "$(hostnamectl | awk '/Operating System:/ {print $3}')" = "Ubuntu" ]; then
# Ubuntu steps
dockerd_prereq
dockerd_install
systemctl_config
kubernetes_install_ubuntu
else
# Default steps
dockerd_prereq
systemctl_config
kubernetes_install
fi
- path: /tmp/kubeadm-postinstall.sh
owner: "root:root"
permissions: "0744"
content: |
#!/bin/bash
set -euxo pipefail
function kube_config() {
mkdir -p /home/${CAPH_USER}/.kube
cp /etc/kubernetes/admin.conf /home/${CAPH_USER}/.kube/config
chown ${CAPH_USER} /home/${CAPH_USER}/.kube
chown ${CAPH_USER} /home/${CAPH_USER}/.kube/config
}
function flannel_install() {
KUBECONFIG=/etc/kubernetes/admin.conf kubectl apply -f /etc/kubernetes/cni/kube-flannel.yml
}
# Temp, this responsibility will move to caph
function patch_node_providerid() {
for value in {1..10}
do
sleep 1
echo "Patch ProviderID (attempt $value)..."
KUBECONFIG=/etc/kubernetes/admin.conf kubectl patch node {{ ds.meta_data["local_hostname"] }} -p $'spec:\n providerID: azurestackhci:////{{ ds.meta_data["local_hostname"] }}' >/dev/null 2>&1 || continue
break
done
}
function save_iptables_config() {
iptables-save > /etc/sysconfig/iptables
}
kube_config
flannel_install
save_iptables_config
patch_node_providerid
- path: /etc/kubernetes/azurestackhci.json
owner: "root:root"
permissions: "0644"
content: |
{
"cloud": "AzureStackHCICloud",
"tenantID": "",
"subscriptionID": "1234",
"resourceGroup": "test",
"securityGroupName": "",
"location": "westus2",
"vmType": "vmss",
"vnetName": "External",
"vnetResourceGroup": "test",
"subnetName": "cluster",
"routeTableName": "",
"userAssignedID": "",
"loadBalancerSku": "Standard",
"maximumLoadBalancerRuleCount": 250,
"useManagedIdentityExtension": true,
"useInstanceMetadata": true
}
- path: /etc/rc.d/init.d/azurestackhci_boot.sh
owner: root:root
permissions: '0755'
content: |
#!/bin/bash
iptables-restore -v -w < /etc/sysconfig/iptables
- path: /etc/systemd/system/azurestackhci_boot.service
owner: root:root
permissions: '0644'
content: |
[Unit]
Description=azurestackhci_boot
After=network.target
[Service]
Type=simple
ExecStart=/etc/rc.d/init.d/azurestackhci_boot.sh
TimeoutStartSec=0
[Install]
WantedBy=default.target
---
apiVersion: cluster.x-k8s.io/v1alpha2
kind: Machine
metadata:
name: ${CLUSTER_NAME}-controlplane-1
labels:
cluster.x-k8s.io/control-plane: "true"
cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}"
spec:
version: ${KUBERNETES_VERSION}
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
kind: KubeadmConfig
name: ${CLUSTER_NAME}-controlplane-1
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: AzureStackHCIMachine
name: ${CLUSTER_NAME}-controlplane-1
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: AzureStackHCIMachine
metadata:
name: ${CLUSTER_NAME}-controlplane-1
spec:
location: westus
vmSize: ${CONTROL_PLANE_MACHINE_TYPE}
image:
offer: "linux"
sshPublicKey: ${SSH_PUBLIC_KEY}
---
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
kind: KubeadmConfig
metadata:
name: ${CLUSTER_NAME}-controlplane-1
spec:
joinConfiguration:
controlPlane: {}
nodeRegistration:
name: '{{ ds.meta_data["local_hostname"] }}'
preKubeadmCommands:
- bash -c /tmp/kubeadm-bootstrap.sh
postKubeadmCommands:
- bash -c /tmp/kubeadm-postinstall.sh
files:
- path: /tmp/kubeadm-bootstrap.sh
owner: "root:root"
permissions: "0744"
content: |
#!/bin/bash
set -eux
function os_setup {
command -v "awk" >/dev/null 2>&1 || tdnf install -y awk
}
function dockerd_prereq() {
swapoff -a
modprobe overlay
modprobe br_netfilter
cat > /etc/sysctl.d/99-sysctl-kubernetes-cri.conf <<EOF
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
iptables -P INPUT ACCEPT
iptables -P OUTPUT ACCEPT
iptables -P FORWARD ACCEPT
sysctl --system
}
function dockerd_install() {
# Workaround a date/time race issue that causes apt-get update to fail
until apt-get update; do
sleep 1
done
# Steps from https://kubernetes.io/docs/setup/cri/
### Install packages to allow apt to use a repository over HTTPS
apt-get install -y apt-transport-https ca-certificates curl software-properties-common
### Add Dockers official GPG key
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
### Add Docker apt repository.
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
## Install Docker CE.
apt-get update && apt-get install -y docker-ce=18.06*
# Setup daemon.
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
}
function systemctl_config() {
systemctl daemon-reload
systemctl enable docker
systemctl restart docker
systemctl enable azurestackhci_boot
}
function kubernetes_install_ubuntu() {
apt-get update && apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
apt-get update
apt-get install -y kubelet="${KUBERNETES_SEMVER}-00" kubeadm="${KUBERNETES_SEMVER}-00" kubectl="${KUBERNETES_SEMVER}-00"
apt-mark hold kubelet kubeadm kubectl
}
function kubernetes_install() {
K8S_VERSION="${KUBERNETES_SEMVER}"
KUBEADM_VERSION="${KUBERNETES_SEMVER}"
#tdnf install -y kubernetes-${KUBERNETES_SEMVER} kubernetes-kubeadm-${KUBERNETES_SEMVER} kubernetes-pause-${KUBERNETES_SEMVER}
cat > /etc/sysctl.d/90-kubelet.conf << EOF
vm.overcommit_memory=1
kernel.panic=10
kernel.panic_on_oops=1
EOF
sysctl -p /etc/sysctl.d/90-kubelet.conf
sudo swapoff -a
}
# First setup the os with any required packages (e.g. awk)
os_setup
if [ "$(hostnamectl | awk '/Operating System:/ {print $3}')" = "Ubuntu" ]; then
# Ubuntu steps
dockerd_prereq
dockerd_install
systemctl_config
kubernetes_install_ubuntu
else
# Default steps
dockerd_prereq
systemctl_config
kubernetes_install
fi
- path: /tmp/kubeadm-postinstall.sh
owner: "root:root"
permissions: "0744"
content: |
#!/bin/bash
set -euxo pipefail
function kube_config() {
mkdir -p /home/${CAPH_USER}/.kube
cp /etc/kubernetes/admin.conf /home/${CAPH_USER}/.kube/config
chown ${CAPH_USER} /home/${CAPH_USER}/.kube
chown ${CAPH_USER} /home/${CAPH_USER}/.kube/config
}
function flannel_install() {
KUBECONFIG=/etc/kubernetes/admin.conf kubectl apply -f /etc/kubernetes/cni/kube-flannel.yml
}
# Temp, this responsibility will move to caph
function patch_node_providerid() {
for value in {1..10}
do
sleep 1
echo "Patch ProviderID (attempt $value)..."
KUBECONFIG=/etc/kubernetes/admin.conf kubectl patch node {{ ds.meta_data["local_hostname"] }} -p $'spec:\n providerID: azurestackhci:////{{ ds.meta_data["local_hostname"] }}' >/dev/null 2>&1 || continue
break
done
}
function save_iptables_config() {
iptables-save > /etc/sysconfig/iptables
}
kube_config
flannel_install
save_iptables_config
patch_node_providerid
- path: /etc/kubernetes/azurestackhci.json
owner: "root:root"
permissions: "0644"
content: |
{
"cloud": "AzureStackHCICloud",
"tenantID": "",
"subscriptionID": "1234",
"resourceGroup": "test",
"securityGroupName": "",
"location": "westus2",
"vmType": "vmss",
"vnetName": "External",
"vnetResourceGroup": "test",
"subnetName": "cluster",
"routeTableName": "",
"userAssignedID": "",
"loadBalancerSku": "Standard",
"maximumLoadBalancerRuleCount": 250,
"useManagedIdentityExtension": true,
"useInstanceMetadata": true
}
- path: /etc/rc.d/init.d/azurestackhci_boot.sh
owner: root:root
permissions: '0755'
content: |
#!/bin/bash
iptables-restore -v -w < /etc/sysconfig/iptables
- path: /etc/systemd/system/azurestackhci_boot.service
owner: root:root
permissions: '0644'
content: |
[Unit]
Description=azurestackhci_boot
After=network.target
[Service]
Type=simple
ExecStart=/etc/rc.d/init.d/azurestackhci_boot.sh
TimeoutStartSec=0
[Install]
WantedBy=default.target
---
apiVersion: cluster.x-k8s.io/v1alpha2
kind: Machine
metadata:
name: ${CLUSTER_NAME}-controlplane-2
labels:
cluster.x-k8s.io/control-plane: "true"
cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}"
spec:
version: ${KUBERNETES_VERSION}
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
kind: KubeadmConfig
name: ${CLUSTER_NAME}-controlplane-2
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: AzureStackHCIMachine
name: ${CLUSTER_NAME}-controlplane-2
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: AzureStackHCIMachine
metadata:
name: ${CLUSTER_NAME}-controlplane-2
spec:
location: westus
vmSize: ${CONTROL_PLANE_MACHINE_TYPE}
image:
offer: "linux"
sshPublicKey: ${SSH_PUBLIC_KEY}
---
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
kind: KubeadmConfig
metadata:
name: ${CLUSTER_NAME}-controlplane-2
spec:
joinConfiguration:
controlPlane: {}
nodeRegistration:
name: '{{ ds.meta_data["local_hostname"] }}'
preKubeadmCommands:
- bash -c /tmp/kubeadm-bootstrap.sh
postKubeadmCommands:
- bash -c /tmp/kubeadm-postinstall.sh
files:
- path: /tmp/kubeadm-bootstrap.sh
owner: "root:root"
permissions: "0744"
content: |
#!/bin/bash
set -eux
function os_setup {
command -v "awk" >/dev/null 2>&1 || tdnf install -y awk
}
function dockerd_prereq() {
swapoff -a
modprobe overlay
modprobe br_netfilter
cat > /etc/sysctl.d/99-sysctl-kubernetes-cri.conf <<EOF
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
iptables -P INPUT ACCEPT
iptables -P OUTPUT ACCEPT
iptables -P FORWARD ACCEPT
sysctl --system
}
function dockerd_install() {
# Workaround a date/time race issue that causes apt-get update to fail
until apt-get update; do
sleep 1
done
# Steps from https://kubernetes.io/docs/setup/cri/
### Install packages to allow apt to use a repository over HTTPS
apt-get install -y apt-transport-https ca-certificates curl software-properties-common
### Add Dockers official GPG key
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
### Add Docker apt repository.
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
## Install Docker CE.
apt-get update && apt-get install -y docker-ce=18.06*
# Setup daemon.
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
}
function systemctl_config() {
systemctl daemon-reload
systemctl enable docker
systemctl restart docker
systemctl enable azurestackhci_boot
}
function kubernetes_install_ubuntu() {
apt-get update && apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
apt-get update
apt-get install -y kubelet="${KUBERNETES_SEMVER}-00" kubeadm="${KUBERNETES_SEMVER}-00" kubectl="${KUBERNETES_SEMVER}-00"
apt-mark hold kubelet kubeadm kubectl
}
function kubernetes_install() {
K8S_VERSION="${KUBERNETES_SEMVER}"
KUBEADM_VERSION="${KUBERNETES_SEMVER}"
#tdnf install -y kubernetes-${KUBERNETES_SEMVER} kubernetes-kubeadm-${KUBERNETES_SEMVER} kubernetes-pause-${KUBERNETES_SEMVER}
cat > /etc/sysctl.d/90-kubelet.conf << EOF
vm.overcommit_memory=1
kernel.panic=10
kernel.panic_on_oops=1
EOF
sysctl -p /etc/sysctl.d/90-kubelet.conf
sudo swapoff -a
}
# First setup the os with any required packages (e.g. awk)
os_setup
if [ "$(hostnamectl | awk '/Operating System:/ {print $3}')" = "Ubuntu" ]; then
# Ubuntu steps
dockerd_prereq
dockerd_install
systemctl_config
kubernetes_install_ubuntu
else
# Default steps
dockerd_prereq
systemctl_config
kubernetes_install
fi
- path: /tmp/kubeadm-postinstall.sh
owner: "root:root"
permissions: "0744"
content: |
#!/bin/bash
set -euxo pipefail
function kube_config() {
mkdir -p /home/${CAPH_USER}/.kube
cp /etc/kubernetes/admin.conf /home/${CAPH_USER}/.kube/config
chown ${CAPH_USER} /home/${CAPH_USER}/.kube
chown ${CAPH_USER} /home/${CAPH_USER}/.kube/config
}
function flannel_install() {
KUBECONFIG=/etc/kubernetes/admin.conf kubectl apply -f /etc/kubernetes/cni/kube-flannel.yml
}
# Temp, this responsibility will move to caph
function patch_node_providerid() {
for value in {1..10}
do
sleep 1
echo "Patch ProviderID (attempt $value)..."
KUBECONFIG=/etc/kubernetes/admin.conf kubectl patch node {{ ds.meta_data["local_hostname"] }} -p $'spec:\n providerID: azurestackhci:////{{ ds.meta_data["local_hostname"] }}' >/dev/null 2>&1 || continue
break
done
}
function save_iptables_config() {
iptables-save > /etc/sysconfig/iptables
}
kube_config
flannel_install
save_iptables_config
patch_node_providerid
- path: /etc/kubernetes/azurestackhci.json
owner: "root:root"
permissions: "0644"
content: |
{
"cloud": "AzureStackHCICloud",
"tenantID": "",
"subscriptionID": "1234",
"resourceGroup": "test",
"securityGroupName": "",
"location": "westus2",
"vmType": "vmss",
"vnetName": "External",
"vnetResourceGroup": "test",
"subnetName": "cluster",
"routeTableName": "",
"userAssignedID": "",
"loadBalancerSku": "Standard",
"maximumLoadBalancerRuleCount": 250,
"useManagedIdentityExtension": true,
"useInstanceMetadata": true
}
- path: /etc/rc.d/init.d/azurestackhci_boot.sh
owner: root:root
permissions: '0755'
content: |
#!/bin/bash
iptables-restore -v -w < /etc/sysconfig/iptables
- path: /etc/systemd/system/azurestackhci_boot.service
owner: root:root
permissions: '0644'
content: |
[Unit]
Description=azurestackhci_boot
After=network.target
[Service]
Type=simple
ExecStart=/etc/rc.d/init.d/azurestackhci_boot.sh
TimeoutStartSec=0
[Install]
WantedBy=default.target

Просмотреть файл

@ -0,0 +1,256 @@
apiVersion: cluster.x-k8s.io/v1alpha2
kind: Machine
metadata:
name: ${CLUSTER_NAME}-controlplane-0
labels:
cluster.x-k8s.io/control-plane: "true"
cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}"
spec:
version: ${KUBERNETES_VERSION}
bootstrap:
configRef:
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
kind: KubeadmConfig
name: ${CLUSTER_NAME}-controlplane-0
infrastructureRef:
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: AzureStackHCIMachine
name: ${CLUSTER_NAME}-controlplane-0
---
apiVersion: infrastructure.cluster.x-k8s.io/v1alpha2
kind: AzureStackHCIMachine
metadata:
name: ${CLUSTER_NAME}-controlplane-0
spec:
location: westus
vmSize: ${CONTROL_PLANE_MACHINE_TYPE}
image:
offer: "linux"
sshPublicKey: ${SSH_PUBLIC_KEY}
---
apiVersion: bootstrap.cluster.x-k8s.io/v1alpha2
kind: KubeadmConfig
metadata:
name: ${CLUSTER_NAME}-controlplane-0
spec:
initConfiguration:
nodeRegistration:
name: '{{ ds.meta_data["local_hostname"] }}'
clusterConfiguration:
apiServer:
timeoutForControlPlane: 20m
extraArgs:
audit-log-maxage: "30"
audit-log-maxbackup: "10"
audit-log-maxsize: "100"
audit-log-path: /var/log/apiserver/audit.log
profiling: "false"
controllerManager:
extraArgs:
profiling: "false"
terminated-pod-gc-threshold: "10"
scheduler:
extraArgs:
profiling: "false"
preKubeadmCommands:
- bash -c /tmp/kubeadm-bootstrap.sh
postKubeadmCommands:
- bash -c /tmp/kubeadm-postinstall.sh
files:
- path: /tmp/kubeadm-bootstrap.sh
owner: "root:root"
permissions: "0744"
content: |
#!/bin/bash
set -eux
function os_setup {
command -v "awk" >/dev/null 2>&1 || tdnf install -y awk
}
function dockerd_prereq() {
swapoff -a
modprobe overlay
modprobe br_netfilter
cat > /etc/sysctl.d/99-sysctl-kubernetes-cri.conf <<EOF
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
iptables -P INPUT ACCEPT
iptables -P OUTPUT ACCEPT
iptables -P FORWARD ACCEPT
sysctl --system
}
function dockerd_install() {
# Workaround a date/time race issue that causes apt-get update to fail
until apt-get update; do
sleep 1
done
# Steps from https://kubernetes.io/docs/setup/cri/
### Install packages to allow apt to use a repository over HTTPS
apt-get install -y apt-transport-https ca-certificates curl software-properties-common
### Add Dockers official GPG key
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
### Add Docker apt repository.
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
## Install Docker CE.
apt-get update && apt-get install -y docker-ce=18.06*
# Setup daemon.
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2"
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
}
function systemctl_config() {
systemctl daemon-reload
systemctl enable docker
systemctl restart docker
systemctl enable azurestackhci_boot
}
function kubernetes_install_ubuntu() {
apt-get update && apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://apt.kubernetes.io/ kubernetes-xenial main
EOF
apt-get update
apt-get install -y kubelet="${KUBERNETES_SEMVER}-00" kubeadm="${KUBERNETES_SEMVER}-00" kubectl="${KUBERNETES_SEMVER}-00"
apt-mark hold kubelet kubeadm kubectl
}
function kubernetes_install() {
K8S_VERSION="${KUBERNETES_SEMVER}"
KUBEADM_VERSION="${KUBERNETES_SEMVER}"
#tdnf install -y kubernetes-${KUBERNETES_SEMVER} kubernetes-kubeadm-${KUBERNETES_SEMVER} kubernetes-pause-${KUBERNETES_SEMVER}
cat > /etc/sysctl.d/90-kubelet.conf << EOF
vm.overcommit_memory=1
kernel.panic=10
kernel.panic_on_oops=1
EOF
sysctl -p /etc/sysctl.d/90-kubelet.conf
sudo swapoff -a
}
# First setup the os with any required packages (e.g. awk)
os_setup
if [ "$(hostnamectl | awk '/Operating System:/ {print $3}')" = "Ubuntu" ]; then
# Ubuntu steps
dockerd_prereq
dockerd_install
systemctl_config
kubernetes_install_ubuntu
else
# Default steps
dockerd_prereq
systemctl_config
kubernetes_install
fi
- path: /tmp/kubeadm-postinstall.sh
owner: "root:root"
permissions: "0744"
content: |
#!/bin/bash
set -euxo pipefail
function kube_config() {
mkdir -p /home/${CAPH_USER}/.kube
cp /etc/kubernetes/admin.conf /home/${CAPH_USER}/.kube/config
chown ${CAPH_USER} /home/${CAPH_USER}/.kube
chown ${CAPH_USER} /home/${CAPH_USER}/.kube/config
}
function flannel_install() {
KUBECONFIG=/etc/kubernetes/admin.conf kubectl apply -f /etc/kubernetes/cni/kube-flannel.yml
}
# Temp, this responsibility will move to caph
function patch_node_providerid() {
for value in {1..10}
do
sleep 1
echo "Patch ProviderID (attempt $value)..."
KUBECONFIG=/etc/kubernetes/admin.conf kubectl patch node {{ ds.meta_data["local_hostname"] }} -p $'spec:\n providerID: azurestackhci:////{{ ds.meta_data["local_hostname"] }}' >/dev/null 2>&1 || continue
break
done
}
function save_iptables_config() {
iptables-save > /etc/sysconfig/iptables
}
kube_config
flannel_install
save_iptables_config
patch_node_providerid
- path: /etc/kubernetes/azurestackhci.json
owner: "root:root"
permissions: "0644"
content: |
{
"cloud": "AzureStackHCICloud",
"tenantID": "",
"subscriptionID": "1234",
"resourceGroup": "test",
"securityGroupName": "",
"location": "westus2",
"vmType": "vmss",
"vnetName": "External",
"vnetResourceGroup": "test",
"subnetName": "cluster",
"routeTableName": "",
"userAssignedID": "",
"loadBalancerSku": "Standard",
"maximumLoadBalancerRuleCount": 250,
"useManagedIdentityExtension": true,
"useInstanceMetadata": true
}
- path: /etc/rc.d/init.d/azurestackhci_boot.sh
owner: root:root
permissions: '0755'
content: |
#!/bin/bash
iptables-restore -v -w < /etc/sysconfig/iptables
- path: /etc/systemd/system/azurestackhci_boot.service
owner: root:root
permissions: '0644'
content: |
[Unit]
Description=azurestackhci_boot
After=network.target
[Service]
Type=simple
ExecStart=/etc/rc.d/init.d/azurestackhci_boot.sh
TimeoutStartSec=0
[Install]
WantedBy=default.target

Просмотреть файл

@ -0,0 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: default
resources:
- controlplane.yaml
configurations:
- kustomizeconfig.yaml

Просмотреть файл

@ -0,0 +1,15 @@
namespace:
- kind: Machine
group: cluster.x-k8s.io
version: v1alpha2
path: spec/infrastructureRef/namespace
create: true
- kind: Machine
group: cluster.x-k8s.io
version: v1alpha2
path: spec/bootstrap/configRef/namespace
create: true
commonLabels:
- path: metadata/labels
create: true

183
examples/generate.sh Normal file
Просмотреть файл

@ -0,0 +1,183 @@
#!/bin/bash
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
# Directories.
SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
OUTPUT_DIR=${OUTPUT_DIR:-${SOURCE_DIR}/_out}
# Binaries
ENVSUBST=${ENVSUBST:-envsubst}
command -v "${ENVSUBST}" >/dev/null 2>&1 || echo -v "Cannot find ${ENVSUBST} in path."
RANDOM_STRING=$(date | md5sum | head -c8)
# Cluster.
export TARGET_CLUSTER_NAME="${TARGET_CLUSTER_NAME:-azurestackhci-${RANDOM_STRING}}"
export MANAGEMENT_CLUSTER_NAME="${MANAGEMENT_CLUSTER_NAME:-${TARGET_CLUSTER_NAME}-mgmt}"
export VNET_NAME="${VNET_NAME:-External}"
export KUBERNETES_VERSION="${KUBERNETES_VERSION:-v1.16.1}"
export KUBERNETES_SEMVER="${KUBERNETES_VERSION#v}"
export POD_CIDR="${POD_CIDR:-192.168.0.0/16}"
export TARGET_CLUSTER_LB_NAME=$TARGET_CLUSTER_NAME"-load-balancer"
export MANAGEMENT_CLUSTER_LB_NAME=$MANAGEMENT_CLUSTER_NAME"-load-balancer"
export TARGET_CLUSTER_BACKEND_POOL_NAME=$TARGET_CLUSTER_NAME"-backend-pool"
export MANAGEMENT_CLUSTER_BACKEND_POOL_NAME=$MANAGEMENT_CLUSTER_NAME"-backend-pool"
export MANAGEMENT_CLUSTER_RESOURCE_GROUP="${MANAGEMENT_CLUSTER_GROUP_NAME:-${MANAGEMENT_CLUSTER_NAME}}"
export TARGET_CLUSTER_RESOURCE_GROUP="${TARGET_CLUSTER_GROUP_NAME:-${MANAGEMENT_CLUSTER_RESOURCE_GROUP}-target}"
# User.
export CAPH_USER="${CAPH_USER:-clouduser}"
# Debug Mode
export WSSD_DEBUG_MODE="${WSSD_DEBUG_MODE:-off}"
# Disk
export CAPH_DISK_NAME="${CAPH_DISK_NAME:-linux}"
# Machine settings.
export CONTROL_PLANE_REPLICAS="${CONTROL_PLANE_REPLICAS:-1}"
export MACHINE_REPLICAS="${MACHINE_REPLICAS:-2}"
export CONTROL_PLANE_MACHINE_TYPE="${CONTROL_PLANE_MACHINE_TYPE:-Default}"
export NODE_MACHINE_TYPE="${NODE_MACHINE_TYPE:-Default}"
# Outputs.
COMPONENTS_CLUSTER_API_GENERATED_FILE=${SOURCE_DIR}/provider-components/provider-components-cluster-api.yaml
COMPONENTS_KUBEADM_GENERATED_FILE=${SOURCE_DIR}/provider-components/provider-components-kubeadm.yaml
COMPONENTS_CAPH_GENERATED_FILE=${SOURCE_DIR}/provider-components/provider-components-azurestackhci.yaml
PROVIDER_COMPONENTS_GENERATED_FILE=${OUTPUT_DIR}/provider-components.yaml
MANAGEMENT_CLUSTER_GENERATED_FILE=${OUTPUT_DIR}/mgmt-cluster.yaml
MANAGEMENT_CONTROLPLANE_GENERATED_FILE=${OUTPUT_DIR}/mgmt-controlplane.yaml
MANAGEMENT_LOADBALANCER_GENERATED_FILE=${OUTPUT_DIR}/mgmt-loadbalancer.yaml
TARGET_CLUSTER_GENERATED_FILE=${OUTPUT_DIR}/target-cluster.yaml
TARGET_CONTROLPLANE_GENERATED_FILE=${OUTPUT_DIR}/target-controlplane.yaml
TARGET_LOADBALANCER_GENERATED_FILE=${OUTPUT_DIR}/target-loadbalancer.yaml
MACHINEDEPLOYMENT_GENERATED_FILE=${OUTPUT_DIR}/target-machinedeployment.yaml
# Overwrite flag.
OVERWRITE=0
SCRIPT=$(basename "$0")
while test $# -gt 0; do
case "$1" in
-h|--help)
echo "$SCRIPT - generates input yaml files for Cluster API (CAPH)"
echo " "
echo "$SCRIPT [options]"
echo " "
echo "options:"
echo "-h, --help show brief help"
echo "-f, --force-overwrite if file to be generated already exists, force script to overwrite it"
exit 0
;;
-f)
OVERWRITE=1
shift
;;
--force-overwrite)
OVERWRITE=1
shift
;;
*)
break
;;
esac
done
if [ $OVERWRITE -ne 1 ] && [ -d "$OUTPUT_DIR" ]; then
echo "ERR: Folder ${OUTPUT_DIR} already exists. Delete it manually before running this script."
exit 1
fi
mkdir -p "${OUTPUT_DIR}"
# Verify the required Environment Variables are present.
: "${CLOUDAGENT_FQDN:?Environment variable empty or not defined.}"
: "${SSH_PUBLIC_KEY:?Environment variable empty or not defined.}"
# If requested, adjust control plane kustomization to point to the HA (3 node) yaml.
# This is temporary until we move to alpha3 and truly support user specified replica counts for the control plane.
if [ ${CONTROL_PLANE_REPLICAS} -gt 1 ]; then
sed -ri 's/- controlplane.yaml/- controlplane-ha.yaml/' "${SOURCE_DIR}/controlplane/kustomization.yaml"
else
sed -ri 's/- controlplane-ha.yaml/- controlplane.yaml/' "${SOURCE_DIR}/controlplane/kustomization.yaml"
fi
# Cloudagent FQDN is passed through to the manager pod via secret
export CLOUDAGENT_FQDN_B64="$(echo -n "$CLOUDAGENT_FQDN" | base64 | tr -d '\n')"
export WSSD_DEBUG_MODE_B64="$(echo -n "$WSSD_DEBUG_MODE" | base64 | tr -d '\n')"
# Prepare environment for generation of management cluster yamls
export CLUSTER_NAME="${MANAGEMENT_CLUSTER_NAME}"
export LOAD_BALANCER_NAME=${MANAGEMENT_CLUSTER_LB_NAME}
export BACKEND_POOL_NAME=${MANAGEMENT_CLUSTER_BACKEND_POOL_NAME}
export CLUSTER_RESOURCE_GROUP=${MANAGEMENT_CLUSTER_RESOURCE_GROUP}
# Generate management cluster resources.
kustomize build "${SOURCE_DIR}/cluster" | envsubst > "${MANAGEMENT_CLUSTER_GENERATED_FILE}"
echo "Generated ${MANAGEMENT_CLUSTER_GENERATED_FILE}"
# Generate management controlplane resources.
kustomize build "${SOURCE_DIR}/controlplane" | envsubst > "${MANAGEMENT_CONTROLPLANE_GENERATED_FILE}"
echo "Generated ${MANAGEMENT_CONTROLPLANE_GENERATED_FILE}"
# Generate loadbalancer resources.
kustomize build "${SOURCE_DIR}/loadbalancer" | envsubst >> "${MANAGEMENT_LOADBALANCER_GENERATED_FILE}"
echo "Generated ${MANAGEMENT_LOADBALANCER_GENERATED_FILE}"
# Prepare environment for generation of target cluster yamls
# If target cluster LB is not specified (e.g. converged cluster) then management LB is used.
export CLUSTER_NAME="${TARGET_CLUSTER_NAME}"
export LOAD_BALANCER_NAME=${TARGET_CLUSTER_LB_NAME}
export BACKEND_POOL_NAME=${TARGET_CLUSTER_BACKEND_POOL_NAME}
export CLUSTER_RESOURCE_GROUP=${TARGET_CLUSTER_RESOURCE_GROUP}
# Generate target cluster resources.
kustomize build "${SOURCE_DIR}/cluster" | envsubst > "${TARGET_CLUSTER_GENERATED_FILE}"
echo "Generated ${TARGET_CLUSTER_GENERATED_FILE}"
# Generate target controlplane resources.
kustomize build "${SOURCE_DIR}/controlplane" | envsubst > "${TARGET_CONTROLPLANE_GENERATED_FILE}"
echo "Generated ${TARGET_CONTROLPLANE_GENERATED_FILE}"
# Generate loadbalancer resources.
kustomize build "${SOURCE_DIR}/loadbalancer" | envsubst >> "${TARGET_LOADBALANCER_GENERATED_FILE}"
echo "Generated ${TARGET_LOADBALANCER_GENERATED_FILE}"
# Generate machinedeployment resources.
kustomize build "${SOURCE_DIR}/machinedeployment" | envsubst >> "${MACHINEDEPLOYMENT_GENERATED_FILE}"
echo "Generated ${MACHINEDEPLOYMENT_GENERATED_FILE}"
# Generate Cluster API provider components file.
curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/v0.2.4/cluster-api-components.yaml > "${COMPONENTS_CLUSTER_API_GENERATED_FILE}"
echo "Downloaded ${COMPONENTS_CLUSTER_API_GENERATED_FILE}"
# Generate Kubeadm Bootstrap Provider components file.
curl -L https://github.com/kubernetes-sigs/cluster-api-bootstrap-provider-kubeadm/releases/download/v0.1.2/bootstrap-components.yaml > "${COMPONENTS_KUBEADM_GENERATED_FILE}"
echo "Downloaded ${COMPONENTS_KUBEADM_GENERATED_FILE}"
# Generate AzureStackHCI Infrastructure Provider components file.
kustomize build "${SOURCE_DIR}/../config/default" | envsubst > "${COMPONENTS_CAPH_GENERATED_FILE}"
echo "Generated ${COMPONENTS_CAPH_GENERATED_FILE}"
# Generate a single provider components file.
kustomize build "${SOURCE_DIR}/provider-components" | envsubst > "${PROVIDER_COMPONENTS_GENERATED_FILE}"
echo "Generated ${PROVIDER_COMPONENTS_GENERATED_FILE}"

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше