-
Notifications
You must be signed in to change notification settings - Fork 1
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Standalone cluster creation with Terraform failing #2
Comments
Hey, thanks for opening the issue. I think this is a documentation error. I think the correct variable name should be |
@invidian thanks for the quick reply, now its throwing an
|
I'll give it a go as soon as I can and update the docs to help you out. Sorry that you run into problems with it and thanks for checking this project out! |
@govindkailas you can try the config below for the time being. Before you do, make sure you get Helm binary, e.g. using commands below and you add export HELM_VERSION=3.5.4
wget -O- https://get.helm.sh/helm-v${HELM_VERSION}-linux-amd64.tar.gz | tar -zxvf - linux-amd64/helm && mv linux-amd64/helm ./ && rmdir linux-amd64
export PATH="$(pwd):${PATH}"
helm repo add flexkube https://flexkube.github.io/charts/ Unfortunately, the guide you found (https://flexkube.github.io/documentation/guides/kubernetes/creating-single-node-cluster-on-local-machine-using-terraform/) is not finished and it's mainly a copy-paste from https://flexkube.github.io/documentation/guides/etcd/creating-single-member-cluster-on-local-machine-using-terraform/, which from what I see does not work very well anymore either. I'll try to find some time to improve this. Or if you like, feel welcome to contribute to the docs, here: https://github.com/flexkube/website/tree/master/content/documentation/guides For more complex scenarios and for reference usage, see e2e tests configuration: https://github.com/flexkube/terraform-provider-flexkube/blob/master/e2e/main.tf Working Terraform config with latest stable Flatcar release: terraform {
required_providers {
flexkube = {
source = "flexkube/flexkube"
version = "0.6.0"
}
local = {
source = "hashicorp/local"
version = "2.1.0"
}
random = {
source = "hashicorp/random"
version = "3.1.0"
}
}
required_version = ">= 1.1"
}
variable "ip" {}
variable "pod_cidr" {}
variable "service_cidr" {}
variable "kubernetes_service_ip" {}
variable "dns_service_ip" {}
variable "node_name" {}
resource "flexkube_pki" "pki" {
etcd {
peers = {
"${var.node_name}" = var.ip
}
servers = {
"${var.node_name}" = var.ip
}
client_cns = ["root"]
}
kubernetes {
kube_api_server {
server_ips = ["127.0.1.1", var.ip, var.kubernetes_service_ip]
}
}
}
resource "flexkube_etcd_cluster" "etcd" {
pki_yaml = flexkube_pki.pki.state_yaml
member {
name = var.node_name
peer_address = var.ip
server_address = var.ip
}
}
resource "random_password" "bootstrap_token_id" {
length = 6
upper = false
special = false
}
resource "random_password" "bootstrap_token_secret" {
length = 16
upper = false
special = false
}
resource "flexkube_controlplane" "bootstrap" {
pki_yaml = flexkube_pki.pki.state_yaml
kube_apiserver {
service_cidr = var.service_cidr
etcd_servers = ["https://${var.ip}:2379"]
bind_address = var.ip
advertise_address = var.ip
secure_port = 8443
common {
image = "k8s.gcr.io/kube-apiserver:v1.23.3"
}
}
kube_controller_manager {
flex_volume_plugin_dir = "/var/lib/kubelet/volumeplugins"
common {
image = "k8s.gcr.io/kube-controller-manager:v1.23.3"
}
}
kube_scheduler {
common {
# 1.23.x compatibility is not released yet.
image = "k8s.gcr.io/kube-scheduler:v1.22.5"
}
}
api_server_address = var.ip
api_server_port = 8443
depends_on = [
flexkube_etcd_cluster.etcd,
]
}
locals {
kubeconfig_admin = <<EOF
apiVersion: v1
kind: Config
clusters:
- name: admin-cluster
cluster:
server: https://${var.ip}:8443
certificate-authority-data: ${base64encode(flexkube_pki.pki.state_sensitive[0].kubernetes[0].ca[0].x509_certificate)}
users:
- name: admin-user
user:
client-certificate-data: ${base64encode(flexkube_pki.pki.state_sensitive[0].kubernetes[0].admin_certificate[0].x509_certificate)}
client-key-data: ${base64encode(flexkube_pki.pki.state_sensitive[0].kubernetes[0].admin_certificate[0].private_key)}
current-context: admin-context
contexts:
- name: admin-context
context:
cluster: admin-cluster
namespace: kube-system
user: admin-user
EOF
}
resource "flexkube_helm_release" "tls-bootstrapping" {
kubeconfig = local.kubeconfig_admin
namespace = "kube-system"
chart = "flexkube/tls-bootstrapping"
version = "0.1.1"
name = "tls-bootstrapping"
values = <<EOF
tokens:
- token-id: ${random_password.bootstrap_token_id.result}
token-secret: ${random_password.bootstrap_token_secret.result}
EOF
wait = true
depends_on = [
flexkube_controlplane.bootstrap,
]
}
resource "flexkube_kubelet_pool" "main" {
bootstrap_config {
server = "${var.ip}:8443"
token = "${random_password.bootstrap_token_id.result}.${random_password.bootstrap_token_secret.result}"
}
privileged_labels = {
"node-role.kubernetes.io/master" = ""
}
admin_config {
server = "${var.ip}:8443"
}
pki_yaml = flexkube_pki.pki.state_yaml
cgroup_driver = "systemd"
network_plugin = "cni"
hairpin_mode = "hairpin-veth"
volume_plugin_dir = "/var/lib/kubelet/volumeplugins"
cluster_dns_ips = [
var.dns_service_ip,
]
extra_mount {
source = "/run/systemd/"
target = "/run/systemd"
}
image = "quay.io/flexkube/kubelet:v1.23.3"
kubelet {
name = var.node_name
address = var.ip
}
depends_on = [
flexkube_helm_release.tls-bootstrapping,
]
}
resource "flexkube_helm_release" "kube-apiserver" {
kubeconfig = local.kubeconfig_admin
namespace = "kube-system"
chart = "flexkube/kube-apiserver"
version = "0.3.17"
name = "kube-apiserver"
values = <<EOF
serverKey: |
${indent(2, trimspace(flexkube_pki.pki.state_sensitive[0].kubernetes[0].kube_api_server[0].server_certificate[0].private_key))}
serverCertificate: |
${indent(2, trimspace(flexkube_pki.pki.state_sensitive[0].kubernetes[0].kube_api_server[0].server_certificate[0].x509_certificate))}
serviceAccountPrivateKey: |
${indent(2, trimspace(flexkube_pki.pki.state_sensitive[0].kubernetes[0].service_account_certificate[0].private_key))}
caCertificate: |
${indent(2, trimspace(flexkube_pki.pki.state_sensitive[0].kubernetes[0].ca[0].x509_certificate))}
frontProxyClientKey: |
${indent(2, trimspace(flexkube_pki.pki.state_sensitive[0].kubernetes[0].kube_api_server[0].front_proxy_client_certificate[0].private_key))}
frontProxyClientCertificate: |
${indent(2, trimspace(flexkube_pki.pki.state_sensitive[0].kubernetes[0].kube_api_server[0].front_proxy_client_certificate[0].x509_certificate))}
frontProxyCACertificate: |
${indent(2, trimspace(flexkube_pki.pki.state_sensitive[0].kubernetes[0].front_proxy_ca[0].x509_certificate))}
kubeletClientCertificate: |
${indent(2, trimspace(flexkube_pki.pki.state_sensitive[0].kubernetes[0].kube_api_server[0].kubelet_certificate[0].x509_certificate))}
kubeletClientKey: |
${indent(2, trimspace(flexkube_pki.pki.state_sensitive[0].kubernetes[0].kube_api_server[0].kubelet_certificate[0].private_key))}
etcdCACertificate: |
${indent(2, trimspace(flexkube_pki.pki.state_sensitive[0].etcd[0].ca[0].x509_certificate))}
etcdClientCertificate: |
${indent(2, trimspace(flexkube_pki.pki.state_sensitive[0].etcd[0].client_certificates[0].certificate[0].x509_certificate))}
etcdClientKey: |
${indent(2, trimspace(flexkube_pki.pki.state_sensitive[0].etcd[0].client_certificates[0].certificate[0].private_key))}
etcdServers:
- https://${var.ip}:2379
serviceCIDR: ${var.service_cidr}
EOF
wait = true
depends_on = [
flexkube_controlplane.bootstrap,
flexkube_kubelet_pool.main,
]
}
resource "flexkube_helm_release" "kube-proxy" {
kubeconfig = local.kubeconfig_admin
namespace = "kube-system"
chart = "flexkube/kube-proxy"
version = "0.3.17"
name = "kube-proxy"
values = <<EOF
apiServers:
- ${var.ip}:8443
podCIDR: ${var.pod_cidr}
EOF
wait = true
depends_on = [
flexkube_controlplane.bootstrap,
flexkube_kubelet_pool.main,
]
}
resource "flexkube_helm_release" "calico" {
kubeconfig = local.kubeconfig_admin
namespace = "kube-system"
chart = "flexkube/calico"
version = "0.4.13"
name = "calico"
values = <<EOF
podCIDR: ${var.pod_cidr}
flexVolumePluginDir: /var/lib/kubelet/volumeplugins
EOF
wait = true
depends_on = [
flexkube_controlplane.bootstrap,
flexkube_helm_release.kube-proxy,
]
}
resource "flexkube_helm_release" "kubernetes" {
kubeconfig = local.kubeconfig_admin
namespace = "kube-system"
chart = "flexkube/kubernetes"
version = "0.4.19"
name = "kubernetes"
values = <<EOF
serviceAccountPrivateKey: |
${indent(2, trimspace(flexkube_pki.pki.state_sensitive[0].kubernetes[0].service_account_certificate[0].private_key))}
kubernetesCAKey: |
${indent(2, trimspace(flexkube_pki.pki.state_sensitive[0].kubernetes[0].ca[0].private_key))}
kubernetesCACertificate: |
${indent(2, trimspace(flexkube_pki.pki.state_sensitive[0].kubernetes[0].ca[0].x509_certificate))}
rootCACertificate: |
${indent(2, trimspace(flexkube_pki.pki.state_sensitive[0].root_ca[0].x509_certificate))}
flexVolumePluginDir: /var/lib/kubelet/volumeplugins
apiServers:
- ${var.ip}:8443
EOF
wait = true
depends_on = [
flexkube_controlplane.bootstrap,
flexkube_helm_release.calico,
]
}
resource "flexkube_helm_release" "coredns" {
kubeconfig = local.kubeconfig_admin
namespace = "kube-system"
chart = "flexkube/coredns"
version = "2.0.4"
name = "coredns"
values = <<EOF
rbac:
pspEnable: true
service:
clusterIP: ${var.dns_service_ip}
EOF
wait = true
depends_on = [
flexkube_controlplane.bootstrap,
flexkube_helm_release.calico,
]
}
resource "flexkube_helm_release" "metrics-server" {
kubeconfig = local.kubeconfig_admin
namespace = "kube-system"
chart = "flexkube/metrics-server"
version = "3.0.6"
name = "metrics-server"
values = <<EOF
rbac:
pspEnabled: true
args:
- --kubelet-preferred-address-types=InternalIP
EOF
wait = true
depends_on = [
flexkube_controlplane.bootstrap,
flexkube_helm_release.calico,
]
}
resource "flexkube_helm_release" "kubelet-rubber-stamp" {
kubeconfig = local.kubeconfig_admin
namespace = "kube-system"
chart = "flexkube/kubelet-rubber-stamp"
version = "0.1.7"
name = "kubelet-rubber-stamp"
wait = true
depends_on = [
flexkube_controlplane.bootstrap,
flexkube_helm_release.calico,
]
}
resource "local_file" "kubeconfig" {
sensitive_content = local.kubeconfig_admin
filename = "./kubeconfig"
} |
I'm trying to create a standalone cluster locally following the steps mentioned here,
Its failing with the below error,
The text was updated successfully, but these errors were encountered: