fix(controller): replace load balancer with public IPs

This commit is contained in:
Sean Knox 2018-06-04 17:12:41 -07:00
Родитель a56ef48397
Коммит 99e7ec21d7
10 изменённых файлов: 47 добавлений и 155 удалений

Просмотреть файл

@ -23,7 +23,7 @@ export DIR_KUBECONFIG := .kube
#
export LOCATION ?= westus2
export KUBE_API_PUBLIC_FQDN := $(CLUSTER_NAME).$(LOCATION).cloudapp.azure.com
export KUBE_API_PUBLIC_FQDN := kubeapi1-$(CLUSTER_NAME).$(LOCATION).cloudapp.azure.com
export AZURE_VM_KEY_NAME ?= $(CLUSTER_NAME)
export AZURE_VM_KEY_PATH := ${DIR_KEY_PAIR}/$(CLUSTER_NAME)/${AZURE_VM_KEY_NAME}.pem

Просмотреть файл

@ -80,7 +80,7 @@ output "location" {
}
output "kube-api-public-fqdn" {
value = "${ var.kube-api-public-fqdn }"
value = "${ module.controller.kube-api-public-fqdn }"
}
output "kube-api-internal-ip" {

Просмотреть файл

@ -67,20 +67,6 @@ module "image" {
resource_group_name = "${ module.rg.name }"
}
module "load_balancer" {
source = "./modules/load_balancer"
depends-id = "${ module.vnet.depends-id }"
# variables
name = "${ var.name }"
location = "${ var.location }"
kube-api-internal-ip = "${ var.kube-api-internal-ip }"
# modules
private-subnet-id = "${ module.vnet.controller-subnet-id }"
resource_group_name = "${ module.rg.name }"
}
module "bastion" {
source = "./modules/bastion"
depends-id = "${ module.dns.depends-id }"
@ -136,7 +122,6 @@ module "controller" {
storage_endpoint = "${ module.storage_account.primary_blob_endpoint }"
image_id = "${ module.image.image_id }"
bastion-ip = "${ module.bastion.public-ip }"
backend_pool_ids = ["${ module.load_balancer.public_backend_pool_id }", "${ module.load_balancer.private_backend_pool_id }"]
resource_group_name = "${ module.rg.name }"
}

Просмотреть файл

@ -1,3 +1,35 @@
resource "azurerm_public_ip" "controller" {
name = "controller${ count.index + 1 }"
location = "${ var.location }"
resource_group_name = "${ var.resource_group_name }"
public_ip_address_allocation = "static"
domain_name_label = "kubeapi${ count.index + 1}-${ var.resource_group_name }"
count = "${ var.master_count }"
}
resource "azurerm_network_security_group" "controller" {
name = "controller-nsg"
location = "${ var.location }"
resource_group_name = "${ var.resource_group_name }"
security_rule {
name = "kube-api"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "8443"
source_address_prefix = "*"
destination_address_prefix = "${azurerm_public_ip.controller.*.ip_address[0]}"
}
tags {
environment = "Production"
}
}
resource "azurerm_network_interface" "controller" {
name = "controller${ count.index + 1 }"
location = "${ var.location }"
@ -6,11 +38,11 @@ resource "azurerm_network_interface" "controller" {
count = "${ var.master_count }"
ip_configuration {
name = "private"
subnet_id = "${ var.private-subnet-id }"
private_ip_address_allocation = "static"
private_ip_address = "${ element(split(",", var.master-ips), count.index) }"
load_balancer_backend_address_pools_ids = ["${ var.backend_pool_ids }"]
name = "private"
subnet_id = "${ var.private-subnet-id }"
private_ip_address_allocation = "static"
private_ip_address = "${ element(split(",", var.master-ips), count.index) }"
public_ip_address_id = "${azurerm_public_ip.controller.*.id[count.index]}"
}
}

Просмотреть файл

@ -16,10 +16,6 @@ variable "kube-api-internal-ip" {}
variable "bootstrap_token" {}
variable "depends-id" {}
variable "backend_pool_ids" {
type = "list"
}
variable "azure" {
type = "map"
}
@ -31,3 +27,7 @@ output "depends-id" {
output "controller_private_ips" {
value = "${azurerm_network_interface.controller.*.private_ip_address}"
}
output "kube-api-public-fqdn" {
value = "${azurerm_public_ip.controller.*.fqdn[0]}"
}

Просмотреть файл

@ -1,22 +0,0 @@
variable "name" {}
variable "location" {}
variable "kube-api-internal-ip" {}
variable "private-subnet-id" {}
variable "resource_group_name" {}
variable "depends-id" {}
output "depends-id" {
value = "${null_resource.dummy_dependency.id}"
}
output "public_backend_pool_id" {
value = "${azurerm_lb_backend_address_pool.public_backend_pool.id}"
}
output "private_backend_pool_id" {
value = "${azurerm_lb_backend_address_pool.private_backend_pool.id}"
}
# output "public_load_balancer_ip" {
# value = "${azurerm_public_ip.lbpip.ip_address}"
# }

Просмотреть файл

@ -1,43 +0,0 @@
resource "azurerm_lb" "intlb" {
name = "${ var.name }-private"
location = "${ var.location }"
resource_group_name = "${ var.resource_group_name }"
frontend_ip_configuration {
name = "LoadBalancerFrontEndPrivate"
private_ip_address_allocation = "static"
private_ip_address = "${ var.kube-api-internal-ip }"
subnet_id = "${ var.private-subnet-id }"
}
}
resource "azurerm_lb_backend_address_pool" "private_backend_pool" {
resource_group_name = "${ var.resource_group_name }"
loadbalancer_id = "${azurerm_lb.intlb.id}"
name = "BackendPoolPrivate"
}
resource "azurerm_lb_rule" "apiserver-private" {
resource_group_name = "${ var.resource_group_name }"
loadbalancer_id = "${azurerm_lb.intlb.id}"
name = "kube-api-private"
protocol = "tcp"
frontend_port = 6443
backend_port = 6443
frontend_ip_configuration_name = "LoadBalancerFrontEndPrivate"
enable_floating_ip = false
backend_address_pool_id = "${azurerm_lb_backend_address_pool.private_backend_pool.id}"
idle_timeout_in_minutes = 4
probe_id = "${azurerm_lb_probe.lb_probe_private.id}"
depends_on = ["azurerm_lb_probe.lb_probe_private"]
}
resource "azurerm_lb_probe" "lb_probe_private" {
resource_group_name = "${ var.resource_group_name }"
loadbalancer_id = "${azurerm_lb.intlb.id}"
name = "kube-api-privateProbe"
protocol = "tcp"
port = 6443
interval_in_seconds = 5
number_of_probes = 2
}

Просмотреть файл

@ -1,60 +0,0 @@
resource "azurerm_public_ip" "lbpip" {
name = "lbpip"
location = "${ var.location }"
resource_group_name = "${ var.resource_group_name }"
public_ip_address_allocation = "dynamic"
domain_name_label = "${ var.name }"
tags {
environment = "test"
}
}
resource "azurerm_lb" "extlb" {
name = "${ var.name }-public"
location = "${ var.location }"
resource_group_name = "${ var.resource_group_name }"
frontend_ip_configuration {
name = "LoadBalancerFrontEndPublic"
public_ip_address_id = "${azurerm_public_ip.lbpip.id}"
}
}
resource "azurerm_lb_backend_address_pool" "public_backend_pool" {
resource_group_name = "${ var.resource_group_name }"
loadbalancer_id = "${azurerm_lb.extlb.id}"
name = "BackendPoolPublic"
}
resource "azurerm_lb_rule" "apiserver_public" {
resource_group_name = "${ var.resource_group_name }"
loadbalancer_id = "${azurerm_lb.extlb.id}"
name = "kube-api-public"
protocol = "tcp"
frontend_port = 8443
backend_port = 6443
frontend_ip_configuration_name = "LoadBalancerFrontEndPublic"
enable_floating_ip = false
backend_address_pool_id = "${azurerm_lb_backend_address_pool.public_backend_pool.id}"
idle_timeout_in_minutes = 4
probe_id = "${azurerm_lb_probe.lb_probe_public.id}"
depends_on = ["azurerm_lb_probe.lb_probe_public"]
}
resource "azurerm_lb_probe" "lb_probe_public" {
resource_group_name = "${ var.resource_group_name }"
loadbalancer_id = "${azurerm_lb.extlb.id}"
name = "kube-api-publicProbe"
protocol = "tcp"
port = 6443
interval_in_seconds = 5
number_of_probes = 2
}
resource "null_resource" "dummy_dependency" {
depends_on = [
"azurerm_lb.extlb",
"azurerm_lb.intlb",
]
}

Просмотреть файл

@ -3,12 +3,12 @@
echo $DIR_KUBECONFIG
echo $CLUSTER_NAME
echo $DIR_SSL
echo $KUBE_API_PUBLIC_FQDN
CA_PATH=$DIR_SSL/${CLUSTER_NAME}/ca.pem
ADMIN_CERT_PATH=$DIR_SSL/${CLUSTER_NAME}/admin.pem
ADMIN_KEY_PATH=$DIR_SSL/${CLUSTER_NAME}/admin-key.pem
KUBE_API_PUBLIC_FQDN=`terraform output -state=build/${CLUSTER_NAME}/terraform.tfstate kube-api-public-fqdn`
mkdir -p $DIR_KUBECONFIG
@ -17,7 +17,7 @@ mkdir -p $DIR_KUBECONFIG
cat << EOF > ${DIR_KUBECONFIG}/kubeconfig
kubectl config set-cluster cluster-${CLUSTER_NAME} \
--embed-certs=true \
--server=https://${KUBE_API_PUBLIC_FQDN}:8443 \
--server=https://${KUBE_API_PUBLIC_FQDN}:6443 \
--certificate-authority=${CA_PATH}
kubectl config set-credentials admin-${CLUSTER_NAME} \
@ -35,7 +35,7 @@ EOF
kubectl config set-cluster cluster-${CLUSTER_NAME} \
--embed-certs=true \
--server=https://${KUBE_API_PUBLIC_FQDN}:8443 \
--server=https://${KUBE_API_PUBLIC_FQDN}:6443 \
--certificate-authority=${CA_PATH}
kubectl config set-credentials admin-${CLUSTER_NAME} \

Просмотреть файл

@ -11,7 +11,7 @@ _retry() {
echo "❤ Polling for cluster life - this could take a minute or more"
# _retry "❤ Waiting for DNS to resolve for ${ELB}" ping -c1 "${ELB}"
_retry "❤ Curling apiserver external elb" curl --insecure --silent "https://${ELB}:8443"
_retry "❤ Curling apiserver external elb" curl --insecure --silent "https://${ELB}:6443"
_retry "❤ Trying to connect to cluster with kubectl" kubectl cluster-info
_retry "❤ Waiting for kube-system namespace" kubectl get namespace kube-system
_retry "❤ Waiting for rbac.authorization.k8s.io/v1 API to become available" kubectl get clusterrolebinding