From 2e010eff01e7ebae97a7f5bfe9afe9acbcb2ddf0 Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Tue, 25 Jun 2024 15:45:58 +0200 Subject: [PATCH 01/35] Reviewed AWS modules and rewrote AWS EC2 x RKE recipe --- modules/infra/aws/README.md | 102 +++++--- modules/infra/aws/ec2/README.md | 217 ++++++++++++++++++ modules/infra/aws/{ => ec2}/data.tf | 8 + modules/infra/aws/{ => ec2}/docs.md | 27 ++- modules/infra/aws/{ => ec2}/main.tf | 69 +++++- modules/infra/aws/ec2/outputs.tf | 30 +++ modules/infra/aws/{ => ec2}/variables.tf | 115 ++++------ modules/infra/aws/{ => ec2}/versions.tf | 0 modules/infra/aws/outputs.tf | 42 ---- modules/infra/aws/provider.tf | 5 - recipes/upstream/aws/k3s/main.tf | 8 + recipes/upstream/aws/k3s/main.tf_bkp | 121 ++++++++++ recipes/upstream/aws/rke/README.md | 95 ++------ recipes/upstream/aws/rke/docs.md | 58 +++-- recipes/upstream/aws/rke/main.tf | 83 ++++--- recipes/upstream/aws/rke/outputs.tf | 22 +- recipes/upstream/aws/rke/provider.tf | 36 +++ .../upstream/aws/rke/terraform.tfvars.example | 112 ++++++--- recipes/upstream/aws/rke/variables.tf | 178 ++++++-------- 19 files changed, 866 insertions(+), 462 deletions(-) create mode 100644 modules/infra/aws/ec2/README.md rename modules/infra/aws/{ => ec2}/data.tf (59%) rename modules/infra/aws/{ => ec2}/docs.md (66%) rename modules/infra/aws/{ => ec2}/main.tf (57%) create mode 100644 modules/infra/aws/ec2/outputs.tf rename modules/infra/aws/{ => ec2}/variables.tf (75%) rename modules/infra/aws/{ => ec2}/versions.tf (100%) delete mode 100644 modules/infra/aws/outputs.tf delete mode 100644 modules/infra/aws/provider.tf create mode 100644 recipes/upstream/aws/k3s/main.tf_bkp create mode 100644 recipes/upstream/aws/rke/provider.tf diff --git a/modules/infra/aws/README.md b/modules/infra/aws/README.md index 768097e3..58da987e 100644 --- a/modules/infra/aws/README.md +++ b/modules/infra/aws/README.md @@ -1,49 +1,79 @@ -# Terraform | AWS Infrastructure +# Terraform | AWS - Preparatory steps -Terraform module to provide AWS nodes prepared for creating a kubernetes cluster. +In order for Terraform to run operations on your behalf, you must [install and configure the AWS CLI tool](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html#getting-started-install-instructions). -Basic infrastructure options are provided to be coupled with other modules for example environments. +## Example -Documentation can be found [here](./docs.md). +#### macOS installation and setup for all users -## Examples +```console +curl "https://awscli.amazonaws.com/AWSCLIV2.pkg" -o "AWSCLIV2.pkg" +``` + +```console +sudo installer -pkg AWSCLIV2.pkg -target / +``` + +#### Verify installation + +```console +$ which aws +/usr/local/bin/aws +``` + +```console +$ aws --version +aws-cli/2.13.33 Python/3.11.6 Darwin/23.1.0 exe/x86_64 prompt/off +``` -#### Launch a single instance, create a keypair +#### Setup credentials and configuration -```terraform -module "upstream_cluster" { - source = "git::https://github.com/rancherlabs/tf-rancher-up.git//modules/infra/aws" - aws_region = "us-east-1" - prefix = "example-rancher" - instance_count = 1 - create_ssh_key_pair = true - user_data = | - echo "hello world" -} +##### Option 1 - AWS CLI + +```console +export AWS_ACCESS_KEY_ID= +export AWS_SECRET_ACCESS_KEY= +export AWS_DEFAULT_REGION= +export AWS_DEFAULT_OUTPUT=text ``` -#### Provide an existing SSH key and Security Group +##### Option 2 - Manually creating credential files -```terraform -module "upstream_cluster" { - source = "git::https://github.com/rancherlabs/tf-rancher-up.git//modules/infra/aws" - aws_region = "us-east-1" - prefix = "example-rancher" - instance_count = 1 - ssh_key_pair_name = "rancher-ssh" - instance_security_group = "sg-xxxxx" -} +```console +mkdir ~/.aws ``` -#### Provide an existing VPC and Subnet +```console +cd ~/.aws +``` + +```console +cat > credentials << EOL +[default] +aws_access_key_id = +aws_secret_access_key = +EOL +``` + +```console +cat > config << EOL +[default] +region = +output = text +EOL +``` + +##### Option 3 - IAM Identity Center credentials + +```console +aws configure sso +``` + +```console +export AWS_PROFILE= +``` -```terraform -module "upstream_cluster" { - source = "git::https://github.com/rancherlabs/tf-rancher-up.git//modules/infra/aws" - aws_region = "us-east-1" - prefix = "example-rancher" - instance_count = 1 - vpc_id = "vpc-xxxxx" - subnet_id = "subnet-xxxxxx" -} +##### Verify credentials +```console +aws sts get-caller-identity ``` diff --git a/modules/infra/aws/ec2/README.md b/modules/infra/aws/ec2/README.md new file mode 100644 index 00000000..b383150d --- /dev/null +++ b/modules/infra/aws/ec2/README.md @@ -0,0 +1,217 @@ +# Terraform | AWS EC2 + +Terraform modules to provide VM instances - AWS EC2. + +Documentation can be found [here](./docs.md). + +## Example + +#### Launch three identical VM instances + +```terraform +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + region = var.aws_region +} + +variable "prefix" {} + +variable "aws_region" { + type = string + description = "AWS region used for all resources" + default = "us-east-1" + + validation { + condition = contains([ + "us-east-2", + "us-east-1", + "us-west-1", + "us-west-2", + "af-south-1", + "ap-east-1", + "ap-south-2", + "ap-southeast-3", + "ap-southeast-4", + "ap-south-1", + "ap-northeast-3", + "ap-northeast-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ca-central-1", + "ca-west-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-south-1", + "eu-west-3", + "eu-south-2", + "eu-north-1", + "eu-central-2", + "il-central-1", + "me-south-1", + "me-central-1", + "sa-east-1", + ], var.aws_region) + error_message = "Invalid Region specified!" + } +} + +variable "ssh_private_key_path" { + description = "The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if `create_ssh_key_pair = false` this variable must be set" + default = null +} + +variable "vpc_id" {} + +variable "subnet_id" {} + +variable "instance_count" {} + +variable "ssh_username" {} + +module "aws-ec2-upstream-cluster" { + source = "git::https://github.com/rancher/tf-rancher-up.git//modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + vpc_id = var.vpc_id + subnet_id = var.subnet_id + instance_count = var.instance_count + ssh_username = var.ssh_username +} + +output "instances_public_ip" { + value = module.aws-ec2-upstream-cluster.instances_public_ip +} + +output "instances_private_ip" { + value = module.aws-ec2-upstream-cluster.instances_private_ip +} +``` + +#### Launch two identical VM instances and a dedicated new VPC/Subnet + +```terraform +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + region = var.aws_region +} + +variable "prefix" {} + +variable "aws_region" { + type = string + description = "AWS region used for all resources" + default = "us-east-1" + + validation { + condition = contains([ + "us-east-2", + "us-east-1", + "us-west-1", + "us-west-2", + "af-south-1", + "ap-east-1", + "ap-south-2", + "ap-southeast-3", + "ap-southeast-4", + "ap-south-1", + "ap-northeast-3", + "ap-northeast-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ca-central-1", + "ca-west-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-south-1", + "eu-west-3", + "eu-south-2", + "eu-north-1", + "eu-central-2", + "il-central-1", + "me-south-1", + "me-central-1", + "sa-east-1", + ], var.aws_region) + error_message = "Invalid Region specified!" + } +} + +variable "ssh_private_key_path" { + description = "The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if `create_ssh_key_pair = false` this variable must be set" + default = null +} + +variable "instance_count" {} + +variable "ssh_username" {} + +module "aws-ec2-upstream-cluster" { + source = "git::https://github.com/rancher/tf-rancher-up.git//modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + instance_count = var.instance_count + ssh_username = var.ssh_username +} + +output "instances_public_ip" { + value = module.aws-ec2-upstream-cluster.instances_public_ip +} + +output "instances_private_ip" { + value = module.aws-ec2-upstream-cluster.instances_private_ip +} +``` diff --git a/modules/infra/aws/data.tf b/modules/infra/aws/ec2/data.tf similarity index 59% rename from modules/infra/aws/data.tf rename to modules/infra/aws/ec2/data.tf index 03859e23..ba260588 100644 --- a/modules/infra/aws/data.tf +++ b/modules/infra/aws/ec2/data.tf @@ -1,3 +1,5 @@ +data "aws_availability_zones" "available" {} + # TODO: Make the Ubuntu OS version configurable # TODO: Add support for ARM architecture data "aws_ami" "ubuntu" { @@ -14,3 +16,9 @@ data "aws_ami" "ubuntu" { values = ["hvm"] } } + +# Save the private SSH key in the Terraform data source for later use +data "local_file" "ssh-private-key" { + depends_on = [local_file.private_key_pem] + filename = local.private_ssh_key_path +} diff --git a/modules/infra/aws/docs.md b/modules/infra/aws/ec2/docs.md similarity index 66% rename from modules/infra/aws/docs.md rename to modules/infra/aws/ec2/docs.md index 0f716aa5..ffbd70f3 100644 --- a/modules/infra/aws/docs.md +++ b/modules/infra/aws/ec2/docs.md @@ -21,39 +21,45 @@ No modules. | Name | Type | |------|------| | [aws_instance.instance](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/instance) | resource | +| [aws_internet_gateway.internet-gateway](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/internet_gateway) | resource | | [aws_key_pair.key_pair](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource | +| [aws_route_table.route-table](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table) | resource | +| [aws_route_table_association.rt-association](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table_association) | resource | | [aws_security_group.sg_allowall](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_subnet.subnet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/subnet) | resource | +| [aws_vpc.vpc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc) | resource | | [local_file.private_key_pem](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.public_key_pem](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | | [tls_private_key.ssh_private_key](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | | [aws_ami.ubuntu](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | +| [local_file.ssh-private-key](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [aws\_access\_key](#input\_aws\_access\_key) | AWS access key used to create infrastructure | `string` | `null` | no | | [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | -| [aws\_secret\_key](#input\_aws\_secret\_key) | AWS secret key used to create AWS infrastructure | `string` | `null` | no | | [bastion\_host](#input\_bastion\_host) | Bastion host configuration to access the instances |
object({
address = string
user = string
ssh_key = string
ssh_key_path = string
})
| `null` | no | | [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `true` | no | -| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `false` | no | +| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `true` | no | | [iam\_instance\_profile](#input\_iam\_instance\_profile) | Specify IAM Instance Profile to assign to the instances/nodes | `string` | `null` | no | | [instance\_count](#input\_instance\_count) | Number of EC2 instances to create | `number` | `3` | no | | [instance\_disk\_size](#input\_instance\_disk\_size) | Specify root disk size (GB) | `string` | `"80"` | no | -| [instance\_security\_group](#input\_instance\_security\_group) | Provide a pre-existing security group ID | `string` | `null` | no | +| [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | Provide a pre-existing security group ID | `string` | `null` | no | | [instance\_type](#input\_instance\_type) | Instance type used for all EC2 instances | `string` | `"t3.medium"` | no | | [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | `"rancher-terraform"` | no | | [spot\_instances](#input\_spot\_instances) | Use spot instances | `bool` | `false` | no | -| [ssh\_key](#input\_ssh\_key) | Contents of the private key to connect to the instances. | `string` | `null` | no | -| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | Specify the SSH key name to use (that's already present in AWS) | `string` | `null` | no | -| [ssh\_key\_pair\_path](#input\_ssh\_key\_pair\_path) | Path to the SSH private key used as the key pair (that's already present in AWS) | `string` | `null` | no | -| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | Path to write the generated SSH private key | `string` | `null` | no | +| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | If you want to use an existing key pair, specify its name | `string` | `null` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform) | `string` | `null` | no | +| [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform) | `any` | `null` | no | | [ssh\_username](#input\_ssh\_username) | Username used for SSH with sudo access | `string` | `"ubuntu"` | no | | [subnet\_id](#input\_subnet\_id) | VPC Subnet ID to create the instance(s) in | `string` | `null` | no | -| [tag_begin](#input\_tag_begin) | When module is being called mode than once, begin tagging from this number | `number` | `1` | no | +| [tag\_begin](#input\_tag\_begin) | When module is being called more than once, begin tagging from this number | `number` | `1` | no | | [tags](#input\_tags) | User-provided tags for the resources | `map(string)` | `{}` | no | | [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | | [vpc\_id](#input\_vpc\_id) | VPC ID to create the instance(s) in | `string` | `null` | no | +| [vpc\_ip\_cidr\_range](#input\_vpc\_ip\_cidr\_range) | Range of private IPs available for the AWS VPC | `string` | `"10.0.0.0/16"` | no | ## Outputs @@ -65,6 +71,3 @@ No modules. | [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | | [node\_username](#output\_node\_username) | n/a | | [sg-id](#output\_sg-id) | n/a | -| [ssh\_key](#output\_ssh\_key) | n/a | -| [ssh\_key\_pair\_name](#output\_ssh\_key\_pair\_name) | n/a | -| [ssh\_key\_path](#output\_ssh\_key\_path) | n/a | diff --git a/modules/infra/aws/main.tf b/modules/infra/aws/ec2/main.tf similarity index 57% rename from modules/infra/aws/main.tf rename to modules/infra/aws/ec2/main.tf index 724f3814..5112a087 100644 --- a/modules/infra/aws/main.tf +++ b/modules/infra/aws/ec2/main.tf @@ -1,6 +1,8 @@ # Condition to use an existing keypair if a keypair name and file is also provided locals { - new_key_pair_path = var.ssh_private_key_path != null ? var.ssh_private_key_path : "${path.cwd}/${var.prefix}-ssh_private_key.pem" + new_key_pair_path = var.ssh_private_key_path != null ? var.ssh_private_key_path : "${path.cwd}/${var.prefix}-ssh_private_key.pem" + private_ssh_key_path = fileexists("${path.cwd}/${var.prefix}-ssh_private_key.pem") ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path + public_ssh_key_path = fileexists("${path.cwd}/${var.prefix}-ssh_public_key.pem") ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path } resource "tls_private_key" "ssh_private_key" { @@ -15,15 +17,70 @@ resource "local_file" "private_key_pem" { file_permission = "0600" } +resource "local_file" "public_key_pem" { + count = var.create_ssh_key_pair ? 1 : 0 + filename = "${path.cwd}/${var.prefix}-ssh_public_key.pem" + content = tls_private_key.ssh_private_key[0].public_key_openssh + file_permission = "0600" +} + resource "aws_key_pair" "key_pair" { count = var.create_ssh_key_pair ? 1 : 0 key_name = "tf-rancher-up-${var.prefix}" public_key = tls_private_key.ssh_private_key[0].public_key_openssh } +resource "aws_vpc" "vpc" { + count = var.vpc_id == null ? 1 : 0 + cidr_block = var.vpc_ip_cidr_range + + tags = { + Name = "${var.prefix}-vpc" + } +} + +resource "aws_subnet" "subnet" { + count = var.subnet_id == null ? 1 : 0 + availability_zone = data.aws_availability_zones.available.names[count.index] + # cidr_block = var.subnet_ip_cidr_range[count.index] + cidr_block = "10.0.${count.index}.0/24" + map_public_ip_on_launch = true + vpc_id = var.vpc_id == null ? aws_vpc.vpc[0].id : var.vpc_id + + tags = { + Name = "${var.prefix}-subnet-${count.index + 1}" + } +} + +resource "aws_internet_gateway" "internet-gateway" { + count = var.vpc_id == null ? 1 : 0 + vpc_id = aws_vpc.vpc[0].id + + tags = { + Name = "${var.prefix}-ig" + } +} + +resource "aws_route_table" "route-table" { + count = var.vpc_id == null ? 1 : 0 + vpc_id = aws_vpc.vpc[0].id + + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.internet-gateway[0].id + } +} + +resource "aws_route_table_association" "rt-association" { + count = var.subnet_id == null ? 1 : 0 + + subnet_id = var.subnet_id == null ? "${aws_subnet.subnet.*.id[0]}" : var.subnet_id + route_table_id = aws_route_table.route-table[0].id +} + resource "aws_security_group" "sg_allowall" { - count = var.create_security_group ? 1 : 0 - vpc_id = var.vpc_id + count = var.create_security_group == true ? 1 : 0 + vpc_id = aws_vpc.vpc[0].id name = "${var.prefix}-allow-nodes" description = "Allow traffic for nodes in the cluster" @@ -77,10 +134,10 @@ resource "aws_instance" "instance" { count = var.instance_count ami = data.aws_ami.ubuntu.id instance_type = var.instance_type - subnet_id = var.subnet_id + subnet_id = var.subnet_id == null ? "${aws_subnet.subnet.*.id[0]}" : var.subnet_id key_name = var.create_ssh_key_pair ? aws_key_pair.key_pair[0].key_name : var.ssh_key_pair_name - vpc_security_group_ids = [var.create_security_group ? aws_security_group.sg_allowall[0].id : var.instance_security_group] + vpc_security_group_ids = [var.create_security_group == true ? aws_security_group.sg_allowall[0].id : var.instance_security_group_id] user_data = var.user_data root_block_device { @@ -102,7 +159,7 @@ resource "aws_instance" "instance" { type = "ssh" host = var.bastion_host == null ? self.public_ip : self.private_ip user = var.ssh_username - private_key = var.create_ssh_key_pair ? tls_private_key.ssh_private_key[0].private_key_pem : (var.ssh_key_pair_path != null ? file(pathexpand(var.ssh_key_pair_path)) : var.ssh_key) + private_key = data.local_file.ssh-private-key.content bastion_host = var.bastion_host != null ? var.bastion_host.address : null bastion_user = var.bastion_host != null ? var.bastion_host.user : null diff --git a/modules/infra/aws/ec2/outputs.tf b/modules/infra/aws/ec2/outputs.tf new file mode 100644 index 00000000..99955f7a --- /dev/null +++ b/modules/infra/aws/ec2/outputs.tf @@ -0,0 +1,30 @@ +output "dependency" { + value = var.instance_count != 0 ? aws_instance.instance[0].arn : null +} + +output "instances_public_ip" { + value = aws_instance.instance.*.public_ip +} + +output "instances_private_ip" { + value = aws_instance.instance.*.private_ip +} + +output "instance_ips" { + value = [ + for i in aws_instance.instance[*] : + { + public_ip = i.public_ip + private_ip = i.private_ip + private_dns = i.private_dns + } + ] +} + +output "node_username" { + value = var.ssh_username +} + +output "sg-id" { + value = var.create_security_group == true ? aws_security_group.sg_allowall[0].id : var.instance_security_group_id +} diff --git a/modules/infra/aws/variables.tf b/modules/infra/aws/ec2/variables.tf similarity index 75% rename from modules/infra/aws/variables.tf rename to modules/infra/aws/ec2/variables.tf index ea4dc590..0475b3a0 100644 --- a/modules/infra/aws/variables.tf +++ b/modules/infra/aws/ec2/variables.tf @@ -1,13 +1,7 @@ -variable "aws_access_key" { - type = string - description = "AWS access key used to create infrastructure" - default = null -} - -variable "aws_secret_key" { +variable "prefix" { type = string - description = "AWS secret key used to create AWS infrastructure" - default = null + description = "Prefix added to names of all resources" + default = "rancher-terraform" } variable "aws_region" { @@ -51,37 +45,33 @@ variable "aws_region" { } } -variable "prefix" { - type = string - description = "Prefix added to names of all resources" - default = "rancher-terraform" +variable "create_ssh_key_pair" { + type = bool + description = "Specify if a new SSH key pair needs to be created for the instances" + default = true } -variable "tag_begin" { - type = number - description = "When module is being called mode than once, begin tagging from this number" - default = 1 +variable "ssh_key_pair_name" { + type = string + description = "If you want to use an existing key pair, specify its name" + default = null } -variable "instance_type" { +variable "ssh_private_key_path" { type = string - description = "Instance type used for all EC2 instances" - default = "t3.medium" - nullable = false + description = "The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform)" + default = null } -variable "instance_disk_size" { - type = string - description = "Specify root disk size (GB)" - default = "80" - nullable = false +variable "ssh_public_key_path" { + description = "The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform)" + default = null } -variable "instance_count" { - type = number - description = "Number of EC2 instances to create" - default = 3 - nullable = false +variable "vpc_ip_cidr_range" { + type = string + default = "10.0.0.0/16" + description = "Range of private IPs available for the AWS VPC" } variable "vpc_id" { @@ -96,50 +86,42 @@ variable "subnet_id" { default = null } -variable "create_ssh_key_pair" { +variable "create_security_group" { type = bool - description = "Specify if a new SSH key pair needs to be created for the instances" - default = false + description = "Should create the security group associated with the instance(s)" + default = true nullable = false } -variable "ssh_key_pair_name" { - type = string - description = "Specify the SSH key name to use (that's already present in AWS)" - default = null +variable "instance_count" { + type = number + description = "Number of EC2 instances to create" + default = 3 + nullable = false } -variable "ssh_key_pair_path" { +variable "instance_type" { type = string - description = "Path to the SSH private key used as the key pair (that's already present in AWS)" - default = null + description = "Instance type used for all EC2 instances" + default = "t3.medium" + nullable = false } -# Used in CI/CD as we don't store the SSH key local. It would read from a secret and -# the contents are passed on directly. Used when create_ssh_key_pair is false and -# ssh_key_pair_name is null -variable "ssh_key" { - type = string - description = "Contents of the private key to connect to the instances." - default = null - sensitive = true +variable "spot_instances" { + type = bool + description = "Use spot instances" + default = false + nullable = false } -variable "ssh_private_key_path" { +variable "instance_disk_size" { type = string - description = "Path to write the generated SSH private key" - default = null -} - -variable "create_security_group" { - type = bool - description = "Should create the security group associated with the instance(s)" - default = true + description = "Specify root disk size (GB)" + default = "80" nullable = false } -# TODO: Add a check based on above value -variable "instance_security_group" { +variable "instance_security_group_id" { type = string description = "Provide a pre-existing security group ID" default = null @@ -152,13 +134,6 @@ variable "ssh_username" { nullable = false } -variable "spot_instances" { - type = bool - description = "Use spot instances" - default = false - nullable = false -} - variable "user_data" { description = "User data content for EC2 instance(s)" default = null @@ -181,6 +156,12 @@ variable "iam_instance_profile" { default = null } +variable "tag_begin" { + type = number + description = "When module is being called more than once, begin tagging from this number" + default = 1 +} + variable "tags" { description = "User-provided tags for the resources" type = map(string) diff --git a/modules/infra/aws/versions.tf b/modules/infra/aws/ec2/versions.tf similarity index 100% rename from modules/infra/aws/versions.tf rename to modules/infra/aws/ec2/versions.tf diff --git a/modules/infra/aws/outputs.tf b/modules/infra/aws/outputs.tf deleted file mode 100644 index e638dc76..00000000 --- a/modules/infra/aws/outputs.tf +++ /dev/null @@ -1,42 +0,0 @@ -output "dependency" { - value = var.instance_count != 0 ? aws_instance.instance[0].arn : null -} - -output "instances_public_ip" { - value = aws_instance.instance.*.public_ip -} - -output "instances_private_ip" { - value = aws_instance.instance.*.private_ip -} - -output "instance_ips" { - value = [ - for i in aws_instance.instance[*] : - { - public_ip = i.public_ip - private_ip = i.private_ip - private_dns = i.private_dns - } - ] -} - -output "node_username" { - value = var.ssh_username -} - -output "ssh_key" { - value = var.create_ssh_key_pair ? tls_private_key.ssh_private_key[0].private_key_openssh : (var.ssh_key_pair_path != null ? file(pathexpand(var.ssh_key_pair_path)) : var.ssh_key) -} - -output "ssh_key_path" { - value = var.create_ssh_key_pair ? local_file.private_key_pem[0].filename : var.ssh_key_pair_path -} - -output "ssh_key_pair_name" { - value = var.create_ssh_key_pair ? aws_key_pair.key_pair[0].key_name : var.ssh_key_pair_name -} - -output "sg-id" { - value = var.create_security_group ? aws_security_group.sg_allowall[0].id : var.instance_security_group -} \ No newline at end of file diff --git a/modules/infra/aws/provider.tf b/modules/infra/aws/provider.tf deleted file mode 100644 index f14e1d72..00000000 --- a/modules/infra/aws/provider.tf +++ /dev/null @@ -1,5 +0,0 @@ -provider "aws" { - access_key = var.aws_access_key != null ? var.aws_access_key : null - secret_key = var.aws_secret_key != null ? var.aws_secret_key : null - region = var.aws_region -} \ No newline at end of file diff --git a/recipes/upstream/aws/k3s/main.tf b/recipes/upstream/aws/k3s/main.tf index c30b9afa..009517a1 100644 --- a/recipes/upstream/aws/k3s/main.tf +++ b/recipes/upstream/aws/k3s/main.tf @@ -1,4 +1,12 @@ +# Setup local variables locals { + vpc = var.vpc == null ? "${var.prefix}-vpc" : var.vpc + subnet = var.subnet == null ? "${var.prefix}-subnet" : var.subnet + create_firewall = var.create_firewall == null ? false : true + private_ssh_key_path = fileexists("${path.cwd}/${var.prefix}-ssh_private_key.pem") ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path + public_ssh_key_path = fileexists("${path.cwd}/${var.prefix}-ssh_public_key.pem") ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path +} + kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" kc_file_backup = "${local.kc_file}.backup" diff --git a/recipes/upstream/aws/k3s/main.tf_bkp b/recipes/upstream/aws/k3s/main.tf_bkp new file mode 100644 index 00000000..c30b9afa --- /dev/null +++ b/recipes/upstream/aws/k3s/main.tf_bkp @@ -0,0 +1,121 @@ +locals { + kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd + kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" + kc_file_backup = "${local.kc_file}.backup" +} + +module "k3s_first" { + source = "../../../../modules/distribution/k3s" + k3s_token = var.k3s_token + k3s_version = var.k3s_version + k3s_channel = var.k3s_channel + k3s_config = var.k3s_config +} + +module "k3s_first_server" { + source = "../../../../modules/infra/aws" + prefix = var.prefix + instance_count = 1 + instance_type = var.instance_type + instance_disk_size = var.instance_disk_size + create_ssh_key_pair = var.create_ssh_key_pair + ssh_key_pair_name = var.ssh_key_pair_name + ssh_key_pair_path = var.ssh_key_pair_path + ssh_username = var.ssh_username + spot_instances = var.spot_instances + aws_region = var.aws_region + create_security_group = var.create_security_group + instance_security_group = var.instance_security_group + subnet_id = var.subnet_id + user_data = module.k3s_first.k3s_server_user_data +} + +module "k3s_additional" { + source = "../../../../modules/distribution/k3s" + k3s_token = module.k3s_first.k3s_token + k3s_version = var.k3s_version + k3s_channel = var.k3s_channel + k3s_config = var.k3s_config + first_server_ip = module.k3s_first_server.instances_private_ip[0] +} + +module "k3s_additional_servers" { + source = "../../../../modules/infra/aws" + prefix = var.prefix + instance_count = var.server_instance_count - 1 + instance_type = var.instance_type + instance_disk_size = var.instance_disk_size + create_ssh_key_pair = false + ssh_key_pair_name = module.k3s_first_server.ssh_key_pair_name + ssh_key_pair_path = pathexpand(module.k3s_first_server.ssh_key_path) + ssh_username = var.ssh_username + spot_instances = var.spot_instances + tag_begin = 2 + aws_region = var.aws_region + create_security_group = false + instance_security_group = module.k3s_first_server.sg-id + subnet_id = var.subnet_id + user_data = module.k3s_additional.k3s_server_user_data +} + + +module "k3s_workers" { + source = "../../../../modules/infra/aws" + prefix = var.prefix + instance_count = var.worker_instance_count + instance_type = var.instance_type + instance_disk_size = var.instance_disk_size + create_ssh_key_pair = false + ssh_key_pair_name = module.k3s_first_server.ssh_key_pair_name + ssh_key_pair_path = pathexpand(module.k3s_first_server.ssh_key_path) + ssh_username = var.ssh_username + spot_instances = var.spot_instances + aws_region = var.aws_region + create_security_group = false + instance_security_group = module.k3s_first_server.sg-id + subnet_id = var.subnet_id + user_data = module.k3s_additional.k3s_worker_user_data +} + + +data "local_file" "ssh_private_key" { + depends_on = [module.k3s_first_server] + filename = pathexpand(module.k3s_first_server.ssh_key_path) +} + +resource "ssh_resource" "retrieve_kubeconfig" { + host = module.k3s_first_server.instances_public_ip[0] + commands = [ + "sudo sed 's/127.0.0.1/${module.k3s_first_server.instances_public_ip[0]}/g' /etc/rancher/k3s/k3s.yaml" + ] + user = var.ssh_username + private_key = data.local_file.ssh_private_key.content +} + +resource "local_file" "kube_config_yaml" { + filename = local.kc_file + content = ssh_resource.retrieve_kubeconfig.result + file_permission = "0600" +} + +resource "local_file" "kube_config_yaml_backup" { + filename = local.kc_file_backup + content = ssh_resource.retrieve_kubeconfig.result + file_permission = "0600" +} + +locals { + rancher_hostname = join(".", ["rancher", module.k3s_first_server.instances_public_ip[0], "sslip.io"]) +} + +module "rancher_install" { + source = "../../../../modules/rancher" + dependency = var.server_instance_count > 1 ? module.k3s_additional_servers.dependency : module.k3s_first_server.dependency + kubeconfig_file = local_file.kube_config_yaml.filename + rancher_hostname = local.rancher_hostname + rancher_replicas = min(var.rancher_replicas, var.server_instance_count) + rancher_bootstrap_password = var.rancher_bootstrap_password + rancher_password = var.rancher_password + rancher_version = var.rancher_version + wait = var.wait +} diff --git a/recipes/upstream/aws/rke/README.md b/recipes/upstream/aws/rke/README.md index ae7ac039..49ff204b 100644 --- a/recipes/upstream/aws/rke/README.md +++ b/recipes/upstream/aws/rke/README.md @@ -1,6 +1,6 @@ -# Upstream | AWS | RKE +# Upstream | AWS | EC2 x RKE -This module is used to establish a Rancher (local) management cluster using AWS and RKE. +This module is used to establish a Rancher (local) management cluster using [AWS EC2](https://aws.amazon.com/ec2/) and [RKE](https://rke.docs.rancher.com/). Documentation can be found [here](./docs.md). @@ -11,90 +11,29 @@ git clone https://github.com/rancherlabs/tf-rancher-up.git cd recipes/upstream/aws/rke ``` -- Copy `terraform.tfvars.example` to `terraform.tfvars` -- Edit `terraform.tfvars` +- Copy `./terraform.tfvars.exmaple` to `./terraform.tfvars` +- Edit `./terraform.tfvars` - Update the required variables: - - `aws_region` to suit your region - `prefix` to give the resources an identifiable name (eg, your initials or first name) - - Recommended: `spot_instances` can be set to `true` to use spot instances -- Check your AWS credentials are configured in `~/.aws/credentials`, terraform will use these by default. Refer the [`aws configure`](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html#cli-configure-files-methods) command on how to do this. -- If you don't want to configure AWS credentials using `aws configure` in above step, uncomment `aws_access_key` and `aws_secret_key` in `terraform.tfvars` and input the required keys there. -- If an HA cluster need to be deployed, change the `instance_count` variable to 3 or more. -- There are more optional variables which can be tweaked under `terraform.tfvars`. - -**NOTE** you may need to use ` terraform init -upgrade` to upgrade provider versions - -Execute the below commands to start deployment. - -```bash -terraform init -terraform plan -terraform apply -``` - -The login details will be displayed in the screen once the deployment is successful. It will have the details as below. - -```bash -rancher_hostname = "https://rancher..sslip.io" -rancher_password = "initial-admin-password" -``` + - `aws_region` to suit your region + - `instance_count` to specify the number of instances to create + - `ssh_username` to specify the user used to create the VMs (default "ubuntu") + - `rancher_hostname` in order to reach the Rancher console via DNS name + - `rancher_password` to configure the initial Admin password (the password must be at least 12 characters) +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../modules/infra/aws/README.md). -- If storing multiple AWS credentials in `~/.aws/credentials`, set the profile when running terraform. +**NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** ```bash -AWS_PROFILE= terraform plan -AWS_PROFILE= terraform apply +terraform init -upgrade ; terraform apply -target=module.aws-ec2-upstream-cluster.tls_private_key.ssh_private_key -target=module.aws-ec2-upstream-cluster.local_file.private_key_pem -target=module.aws-ec2-upstream-cluster.local_file.public_key_pem -auto-approve ; terraform apply -target=module.aws-ec2-upstream-cluster -target=helm_release.ingress-nginx -target=module.rke -auto-approve ; terraform state rm module.rke.local_file.kube_config_yaml ; terraform apply -auto-approve ``` -- Destroy the resources when cluster is no more needed. +- Destroy the resources when finished ```bash -terraform destroy +terraform destroy -target=helm_release.ingress-nginx -target=module.rancher_install -auto-approve ; terraform destroy -auto-approve ``` -**IMPORTANT**: Please retire the services which are deployed using these terraform modules within 48 hours. Soon there will be automation to retire the service automatically after 48 hours but till that is in place it will be the users responsibility to not keep it running more than 48 hours. - -### Advanced - -Target a specific resource/module to action the changes only for that resource/module - -For example, target only the `rke_cluster` resource to re-run the equivalent of `rke up` - -```bash -terraform apply -target module.rke.rke_cluster.this -target module.rke.local_file.kube_config_yaml -``` - -This also updates the kube_config generated by RKE. - -### Notes - -A log file for the RKE provisioning is written to `rke.log` - See full argument list for each module in use: - - [AWS](../../../../modules/infra/aws) - - [RKE](../../../../modules/distribution/rke) - - [Rancher](../../../../modules/rancher) - -### Known Issues -- Terraform plan shows below warnings which can be ignored: - -```bash -Warning: Value for undeclared variable - -The root module does not declare a variable named "ssh_private_key_path" but a value was found in file "terraform.tfvars". If you meant to use this value, add a "variable" block to the configuration. - -Invalid attribute in provider configuration - -with module.rancher_install.provider["registry.terraform.io/hashicorp/kubernetes"], -on ../../../../modules/rancher/provider.tf line 7, in provider "kubernetes": -7: provider "kubernetes" { -``` -- Terraform apply shows below warnings and errors. Please rerun the terraform apply again and it will be successful [(Issue #22)](#22). - -```bash -Warning: - -Helm release "rancher" was created but has a failed status. Use the `helm` command to investigate the error, correct it, then run Terraform again. - -Error: 1 error occurred: -* Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": failed to call webhook: Post "https://rke2-ingress-nginx-controller-admission.kube-system.svc:443/networking/v1/ingresses?timeout=10s": no endpoints available for service "rke2-ingress-nginx-controller-admission" -``` + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - RKE: https://github.com/rancher/tf-rancher-up/tree/main/modules/distribution/rke + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/recipes/upstream/aws/rke/docs.md b/recipes/upstream/aws/rke/docs.md index 0ab0640b..b6f226c8 100644 --- a/recipes/upstream/aws/rke/docs.md +++ b/recipes/upstream/aws/rke/docs.md @@ -1,52 +1,52 @@ ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | +| [ssh](#requirement\_ssh) | 2.6.0 | ## Providers -No providers. +| Name | Version | +|------|---------| +| [null](#provider\_null) | n/a | ## Modules | Name | Source | Version | |------|--------|---------| +| [aws-ec2-upstream-cluster](#module\_aws-ec2-upstream-cluster) | ../../../../modules/infra/aws/ec2 | n/a | | [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | | [rke](#module\_rke) | ../../../../modules/distribution/rke | n/a | -| [upstream-cluster](#module\_upstream-cluster) | ../../../../modules/infra/aws | n/a | ## Resources -No resources. +| Name | Type | +|------|------| +| [null_resource.wait-docker-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.wait-k8s-services-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [aws\_access\_key](#input\_aws\_access\_key) | AWS access key used to create infrastructure | `string` | `null` | no | | [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | -| [aws\_secret\_key](#input\_aws\_secret\_key) | AWS secret key used to create AWS infrastructure | `string` | `null` | no | -| [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `null` | no | -| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `null` | no | +| [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | | [docker\_version](#input\_docker\_version) | Docker version to install on nodes | `string` | `"20.10"` | no | -| [install\_docker](#input\_install\_docker) | Should install docker while creating the instance | `bool` | `true` | no | -| [instance\_count](#input\_instance\_count) | Number of EC2 instances to create | `number` | `null` | no | -| [instance\_disk\_size](#input\_instance\_disk\_size) | Specify root disk size (GB) | `string` | `null` | no | -| [instance\_security\_group](#input\_instance\_security\_group) | Provide a pre-existing security group ID | `string` | `null` | no | -| [instance\_type](#input\_instance\_type) | Instance type used for all EC2 instances | `string` | `null` | no | -| [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | -| [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | -| [kubernetes\_version](#input\_kubernetes\_version) | Kubernetes version to use for the RKE cluster | `string` | `null` | no | -| [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | `null` | no | -| [rancher\_bootstrap\_password](#input\_rancher\_bootstrap\_password) | Password to use for bootstrapping Rancher (min 12 characters) | `string` | `"initial-admin-password"` | no | -| [rancher\_password](#input\_rancher\_password) | Password to use for Rancher (min 12 characters) | `string` | `null` | no | -| [rancher\_replicas](#input\_rancher\_replicas) | Value for replicas when installing the Rancher helm chart | `number` | `3` | no | +| [ingress\_provider](#input\_ingress\_provider) | Ingress controller provider | `string` | `"nginx"` | no | +| [install\_docker](#input\_install\_docker) | Install Docker while creating the instances | `bool` | `true` | no | +| [instance\_count](#input\_instance\_count) | n/a | `any` | n/a | yes | +| [prefix](#input\_prefix) | n/a | `any` | n/a | yes | +| [rancher\_hostname](#input\_rancher\_hostname) | n/a | `any` | n/a | yes | +| [rancher\_password](#input\_rancher\_password) | n/a | `string` | n/a | yes | | [rancher\_version](#input\_rancher\_version) | Rancher version to install | `string` | `null` | no | -| [spot\_instances](#input\_spot\_instances) | Use spot instances | `bool` | `null` | no | -| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | Specify the SSH key name to use (that's already present in AWS) | `string` | `null` | no | -| [ssh\_key\_pair\_path](#input\_ssh\_key\_pair\_path) | Path to the SSH private key used as the key pair (that's already present in AWS) | `string` | `null` | no | -| [ssh\_username](#input\_ssh\_username) | Username used for SSH with sudo access | `string` | `"ubuntu"` | no | -| [subnet\_id](#input\_subnet\_id) | VPC Subnet ID to create the instance(s) in | `string` | `null` | no | -| [wait](#input\_wait) | An optional wait before installing the Rancher helm chart | `string` | `"20s"` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if `create_ssh_key_pair = false` this variable must be set | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `any` | n/a | yes | +| [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | Waiting time (in seconds) | `number` | `180` | no | ## Outputs @@ -54,7 +54,5 @@ No resources. |------|-------------| | [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | | [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | -| [rancher\_admin\_token](#output\_rancher\_admin\_token) | Rancher API token for the admin user | -| [rancher\_bootstrap\_password](#output\_rancher\_bootstrap\_password) | n/a | -| [rancher\_hostname](#output\_rancher\_hostname) | n/a | -| [rancher\_url](#output\_rancher\_url) | n/a | +| [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | +| [rancher\_url](#output\_rancher\_url) | Rancher URL | diff --git a/recipes/upstream/aws/rke/main.tf b/recipes/upstream/aws/rke/main.tf index 4a20cf25..88ee8096 100644 --- a/recipes/upstream/aws/rke/main.tf +++ b/recipes/upstream/aws/rke/main.tf @@ -1,18 +1,19 @@ -module "upstream-cluster" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = var.instance_count - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = var.create_ssh_key_pair - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - ssh_username = var.ssh_username - spot_instances = var.spot_instances - aws_region = var.aws_region - create_security_group = var.create_security_group - instance_security_group = var.instance_security_group - subnet_id = var.subnet_id +module "aws-ec2-upstream-cluster" { + source = "../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + # create_ssh_key_pair = var.create_ssh_key_pair + # ssh_key_pair_name = var.ssh_key_pair_name + # ssh_public_key_path = var.ssh_public_key_path + # vpc_id = var.vpc_id + # subnet_id = var.subnet_id + # create_security_group = var.create_security_group + instance_count = var.instance_count + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + # instance_security_group_id = var.instance_security_group_id + ssh_username = var.ssh_username user_data = templatefile("${path.module}/user_data.tmpl", { install_docker = var.install_docker @@ -20,43 +21,65 @@ module "upstream-cluster" { docker_version = var.docker_version } ) + # bastion_host = var.bastion_host + # iam_instance_profile = var.iam_instance_profile + # tags = var.tags +} + +resource "null_resource" "wait-docker-startup" { + depends_on = [module.aws-ec2-upstream-cluster.instances_public_ip] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } +} + +locals { + ssh_private_key_path = var.ssh_private_key_path != null ? var.ssh_private_key_path : "${path.cwd}/${var.prefix}-ssh_private_key.pem" } module "rke" { source = "../../../../modules/distribution/rke" prefix = var.prefix - dependency = module.upstream-cluster.dependency - ssh_private_key_path = module.upstream-cluster.ssh_key_path + dependency = [resource.null_resource.wait-docker-startup] + ssh_private_key_path = local.ssh_private_key_path node_username = var.ssh_username - kube_config_path = var.kube_config_path - kube_config_filename = var.kube_config_filename - kubernetes_version = var.kubernetes_version + # kubernetes_version = var.kubernetes_version - rancher_nodes = [for instance_ips in module.upstream-cluster.instance_ips : + rancher_nodes = [for instance_ips in module.aws-ec2-upstream-cluster.instance_ips : { public_ip = instance_ips.public_ip, private_ip = instance_ips.private_ip, roles = ["etcd", "controlplane", "worker"], - ssh_key_path = module.upstream-cluster.ssh_key_path - ssh_key = null - node_username = module.upstream-cluster.node_username + ssh_key_path = local.ssh_private_key_path, + ssh_key = null, hostname_override = null } ] } +resource "null_resource" "wait-k8s-services-startup" { + depends_on = [module.rke] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } +} + locals { - rancher_hostname = join(".", ["rancher", module.upstream-cluster.instances_public_ip[0], "sslip.io"]) + kubeconfig_file = "${path.cwd}/${var.prefix}_kube_config.yml" + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.aws-ec2-upstream-cluster.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.aws-ec2-upstream-cluster.instances_public_ip[0], "sslip.io"]) + } module "rancher_install" { source = "../../../../modules/rancher" - dependency = module.rke.dependency - kubeconfig_file = module.rke.rke_kubeconfig_filename + dependency = [null_resource.wait-k8s-services-startup] + kubeconfig_file = local.kubeconfig_file rancher_hostname = local.rancher_hostname - rancher_replicas = min(var.rancher_replicas, var.instance_count) - rancher_bootstrap_password = var.rancher_bootstrap_password + rancher_bootstrap_password = var.rancher_password rancher_password = var.rancher_password + bootstrap_rancher = var.bootstrap_rancher rancher_version = var.rancher_version - wait = var.wait + rancher_additional_helm_values = [ + "replicas: ${var.instance_count}" + ] } diff --git a/recipes/upstream/aws/rke/outputs.tf b/recipes/upstream/aws/rke/outputs.tf index cfe13e44..3f0a3cc5 100644 --- a/recipes/upstream/aws/rke/outputs.tf +++ b/recipes/upstream/aws/rke/outputs.tf @@ -1,25 +1,17 @@ output "instances_public_ip" { - value = module.upstream-cluster.instances_public_ip + value = module.aws-ec2-upstream-cluster.instances_public_ip } output "instances_private_ip" { - value = module.upstream-cluster.instances_private_ip -} - -output "rancher_hostname" { - value = local.rancher_hostname + value = module.aws-ec2-upstream-cluster.instances_private_ip } output "rancher_url" { - value = "https://${local.rancher_hostname}" -} - -output "rancher_bootstrap_password" { - value = var.rancher_bootstrap_password + description = "Rancher URL" + value = "https://${module.rancher_install.rancher_hostname}" } -output "rancher_admin_token" { - description = "Rancher API token for the admin user" - value = module.rancher_install.rancher_admin_token - sensitive = true +output "rancher_password" { + description = "Rancher Initial Custom Password" + value = var.rancher_password } diff --git a/recipes/upstream/aws/rke/provider.tf b/recipes/upstream/aws/rke/provider.tf new file mode 100644 index 00000000..8e915083 --- /dev/null +++ b/recipes/upstream/aws/rke/provider.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/recipes/upstream/aws/rke/terraform.tfvars.example b/recipes/upstream/aws/rke/terraform.tfvars.example index c155c6ef..7787da60 100644 --- a/recipes/upstream/aws/rke/terraform.tfvars.example +++ b/recipes/upstream/aws/rke/terraform.tfvars.example @@ -1,48 +1,96 @@ ###### !! Required variables !! ###### -## -- Terraform will use the default ~/.aws/credentials file or environment variables to determine the access/secret keys. Uncomment the below only if necessary. -# aws_access_key = "ACCESS_KEY_HERE" -# aws_secret_key = "SECRET_KEY_HERE" +## -- The prefix used in front of all AWS resources +prefix = "" -## -- AWS region to create the resources, uncomment one or adjust as needed -# aws_region = "us-east-1" # US, Virginia -# aws_region = "us-west-2" # US, Oregon -# aws_region = "eu-west-1" # EU, Ireland -# aws_region = "eu-west-1" # EU, Frankfurt -# aws_region = "ap-southeast-2" # AU, Sydney -# aws_region = "ap-south-1" # IN, Mumbai +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = -## -- Set the prefix for the name tag on instancrease created. A default prefix (rancher-terraform) if not provided. -prefix = "my-name-here" +## -- AWS Region to create the resources +aws_region = "" -###### !! Optional variables !! ###### +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html -## -- Password to set when installing Rancher, otherwise use default (initial-admin-password) -# rancher_password = "at-least-12-characters" +## -- Specify if a new SSH key pair needs to be created for the instances +# create_ssh_key_pair = true -## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository -# rancher_version = "2.7.3" +## -- If you want to use an existing key pair, specify its name +# ssh_key_pair_name = null -## -- Override the default k8s version used by RKE -# kubernetes_version = "v1.24.10-rancher4-1" +#Ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html + +## -- The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_private_key_path = null + +## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_public_key_path = null + +## -- Range of private IPs available for the AWS VPC +# vpc_ip_cidr_range = "10.0.0.0/16" -## -- Number and type of EC2 instances to launch -instance_count = 1 +## -- AWS VPC used for all resources +# vpc_id = null + +## -- AWS Subnet used for all resources +# subnet_id = null + +## -- AWS Security Group used for all resources +# create_security_group = null + +#Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html + +## -- The number of nodes +instance_count = 3 + +## -- The name of a AWS EC2 machine type # instance_type = "t3.medium" ## -- Use spot instances # spot_instances = false -##### SSH -## -- (A) Create a new keypair in AWS -create_ssh_key_pair = true -## -- Override the default (./${prefix}_ssh_private_key.pem) path where this SSH key is written -# ssh_private_key_path = "/path/to/private/key.pem" +## -- Size of the disk attached to each node, specified in GB +# instance_disk_size = 80 + +## -- If the Security Group was created manually, it can be entered here +# instance_security_group_id = null + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" -## -- (B) Provide an existing keypair name in AWS to use for nodes, the matching private key file for this keypair also must be provided so RKE can SSH to the launched nodes -# ssh_key_pair_name = "aws_keypair_name" -# ssh_key_pair_path = "/path/to/private/key.pem" -##### +## -- Script that will run when the VMs start +# user_data = "" -## -- Override the default (${prefix}_kube_config.yml) kubeconfig file/path -# kube_config_path = "~/.kube/rancher-terraform.yml" \ No newline at end of file +## -- Bastion host configuration to access the instances +# bastion_host = null + +## -- IAM Instance Profile to assign to the instances/nodes +# iam_instance_profile = null + +## -- User-provided tags for the resources +# tags = {} + +## -- Install Docker while creating the instance +# install_docker = true + +## -- Docker version to install on nodes +# docker_version = "20.10" + +## -- Waiting time (in seconds) +# waiting_time = 180 + +## -- Override the default k8s version used by RKE +# kubernetes_version = "v1.24.10-rancher4-1" + +## -- K8s Ingress Controller +# ingress_provider = nginx + +## -- Hostname to set when installing Rancher +# rancher_hostname = "rancher" + +## -- Password to set when installing Rancher +rancher_password = "at-least-12-characters" + +## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository +# rancher_version = "2.7.3" diff --git a/recipes/upstream/aws/rke/variables.tf b/recipes/upstream/aws/rke/variables.tf index 260e9690..ec2a12e2 100644 --- a/recipes/upstream/aws/rke/variables.tf +++ b/recipes/upstream/aws/rke/variables.tf @@ -1,14 +1,10 @@ -variable "aws_access_key" { - type = string - description = "AWS access key used to create infrastructure" - default = null -} +variable "prefix" {} -variable "aws_secret_key" { - type = string - description = "AWS secret key used to create AWS infrastructure" - default = null -} +# variable "aws_access_key" {} + +# variable "aws_secret_key" {} + +# variable "aws_session_token" {} variable "aws_region" { type = string @@ -51,139 +47,103 @@ variable "aws_region" { } } -variable "prefix" { - type = string - description = "Prefix added to names of all resources" - default = null -} +# variable "create_ssh_key_pair" { +# description = "Specify if a new SSH key pair needs to be created for the instances" +# default = true +#} -variable "instance_count" { - type = number - description = "Number of EC2 instances to create" - default = null -} +# variable "ssh_key_pair_name" {} -variable "instance_type" { - type = string - description = "Instance type used for all EC2 instances" +variable "ssh_private_key_path" { + description = "The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if `create_ssh_key_pair = false` this variable must be set" default = null } -variable "instance_disk_size" { - type = string - description = "Specify root disk size (GB)" - default = null -} +# variable "ssh_public_key_path" {} -variable "kube_config_path" { - description = "The path to write the kubeconfig for the RKE cluster" - type = string - default = null -} +# variable "vpc_ip_cidr_range" {} -variable "kube_config_filename" { - description = "Filename to write the kube config" - type = string - default = null -} +# variable "vpc_id" {} -variable "kubernetes_version" { - type = string - description = "Kubernetes version to use for the RKE cluster" - default = null -} +# variable "subnet_id" {} -variable "install_docker" { - type = bool - description = "Should install docker while creating the instance" - default = true -} +# variable "create_security_group" {} -variable "docker_version" { - type = string - description = "Docker version to install on nodes" - default = "20.10" -} +variable "instance_count" {} -variable "rancher_bootstrap_password" { - description = "Password to use for bootstrapping Rancher (min 12 characters)" - default = "initial-admin-password" - type = string -} +# variable "instance_type" {} -variable "rancher_password" { - description = "Password to use for Rancher (min 12 characters)" - default = null - type = string +# variable "spot_instances" {} - validation { - condition = length(var.rancher_password) >= 12 - error_message = "The password provided for Rancher (rancher_password) must be at least 12 characters" - } -} +# variable "instance_disk_size" {} -variable "rancher_version" { - description = "Rancher version to install" +# variable "instance_security_group_id" {} + +variable "ssh_username" {} + +variable "user_data" { + description = "User data content for EC2 instance(s)" default = null - type = string } -variable "rancher_replicas" { - description = "Value for replicas when installing the Rancher helm chart" - default = 3 - type = number -} +#variable "bastion_host" { +# type = object({ +# address = string +# user = string +# ssh_key = string +# ssh_key_path = string +# }) +# default = null +# description = "Bastion host configuration to access the instances" +#} -variable "create_ssh_key_pair" { +# variable "iam_instance_profile" {} + +# variable "tags" {} + +variable "install_docker" { type = bool - description = "Specify if a new SSH key pair needs to be created for the instances" - default = null + description = "Install Docker while creating the instances" + default = true } -variable "ssh_key_pair_name" { +variable "docker_version" { type = string - description = "Specify the SSH key name to use (that's already present in AWS)" - default = null + description = "Docker version to install on nodes" + default = "20.10" } -variable "ssh_key_pair_path" { - type = string - description = "Path to the SSH private key used as the key pair (that's already present in AWS)" - default = null +variable "waiting_time" { + description = "Waiting time (in seconds)" + default = 180 } -variable "ssh_username" { - type = string - description = "Username used for SSH with sudo access" - default = "ubuntu" +# variable "kubernetes_version" {} + +variable "ingress_provider" { + description = "Ingress controller provider" + default = "nginx" } -variable "spot_instances" { +variable "bootstrap_rancher" { + description = "Bootstrap the Rancher installation" type = bool - description = "Use spot instances" - default = null + default = true } -variable "subnet_id" { - type = string - description = "VPC Subnet ID to create the instance(s) in" - default = null -} +variable "rancher_hostname" {} -variable "create_security_group" { - type = bool - description = "Should create the security group associated with the instance(s)" - default = null +variable "rancher_password" { + type = string + + validation { + condition = length(var.rancher_password) >= 12 + error_message = "The password must be at least 12 characters." + } } -# TODO: Add a check based on above value -variable "instance_security_group" { +variable "rancher_version" { + description = "Rancher version to install" type = string - description = "Provide a pre-existing security group ID" default = null } - -variable "wait" { - description = "An optional wait before installing the Rancher helm chart" - default = "20s" -} From ac24390227d76185924ce7a0b2b02ec6d84ec03d Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Tue, 25 Jun 2024 16:37:12 +0200 Subject: [PATCH 02/35] Reviewed tests for AWS EC2 and AWS EC2 x RKE x Rancher --- tests/modules/infra/aws/README.md | 1 - tests/modules/infra/aws/ec2/README.md | 32 +++++++ tests/modules/infra/aws/ec2/docs.md | 40 ++++++++ tests/modules/infra/aws/ec2/main.tf | 7 ++ tests/modules/infra/aws/ec2/outputs.tf | 7 ++ tests/modules/infra/aws/ec2/provider.tf | 36 +++++++ .../infra/aws/ec2/terraform.tfvars.example | 20 ++++ tests/modules/infra/aws/ec2/user_data.tmpl | 9 ++ tests/modules/infra/aws/ec2/variables.tf | 19 ++++ tests/modules/infra/aws/main.tf | 29 ------ tests/recipes/upstream/aws/rke/README.md | 31 ++++++ tests/recipes/upstream/aws/rke/docs.md | 57 +++++++++++ tests/recipes/upstream/aws/rke/main.tf | 79 +++++++++++++-- tests/recipes/upstream/aws/rke/outputs.tf | 17 ++++ tests/recipes/upstream/aws/rke/provider.tf | 36 +++++++ .../upstream/aws/rke/terraform.tfvars.example | 96 +++++++++++++++++++ tests/recipes/upstream/aws/rke/user_data.tmpl | 9 ++ tests/recipes/upstream/aws/rke/variables.tf | 59 ++++++++++-- 18 files changed, 536 insertions(+), 48 deletions(-) delete mode 100644 tests/modules/infra/aws/README.md create mode 100644 tests/modules/infra/aws/ec2/README.md create mode 100644 tests/modules/infra/aws/ec2/docs.md create mode 100644 tests/modules/infra/aws/ec2/main.tf create mode 100644 tests/modules/infra/aws/ec2/outputs.tf create mode 100644 tests/modules/infra/aws/ec2/provider.tf create mode 100644 tests/modules/infra/aws/ec2/terraform.tfvars.example create mode 100644 tests/modules/infra/aws/ec2/user_data.tmpl create mode 100644 tests/modules/infra/aws/ec2/variables.tf delete mode 100644 tests/modules/infra/aws/main.tf create mode 100644 tests/recipes/upstream/aws/rke/README.md create mode 100644 tests/recipes/upstream/aws/rke/docs.md create mode 100644 tests/recipes/upstream/aws/rke/outputs.tf create mode 100644 tests/recipes/upstream/aws/rke/provider.tf create mode 100644 tests/recipes/upstream/aws/rke/terraform.tfvars.example create mode 100644 tests/recipes/upstream/aws/rke/user_data.tmpl diff --git a/tests/modules/infra/aws/README.md b/tests/modules/infra/aws/README.md deleted file mode 100644 index bb1fa9d2..00000000 --- a/tests/modules/infra/aws/README.md +++ /dev/null @@ -1 +0,0 @@ -This directory has code to test the aws infra [module](../../../../modules/infra/aws). diff --git a/tests/modules/infra/aws/ec2/README.md b/tests/modules/infra/aws/ec2/README.md new file mode 100644 index 00000000..b27c012d --- /dev/null +++ b/tests/modules/infra/aws/ec2/README.md @@ -0,0 +1,32 @@ +# TEST - AWS EC2 instances deploy + +This directory has code to test the AWS EC2 [module](../../../../../modules/infra/aws/ec2). + +Documentation can be found [here](./docs.md). + +## Usage + +```bash +git clone https://github.com/rancherlabs/tf-rancher-up.git +cd test/modules/infra/aws/ec2 +``` + +- Edit `./variables.tf` + - Update the required variables: + - `prefix` to give the resources an identifiable name (eg, your initials or first name) + - `aws_region` to suit your region + - `instance_count` to specify the number of instances to create + - `ssh_username` to specify the user used to create the VMs (default "ubuntu") +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../../modules/infra/aws/README.md). + +```bash +terraform init --upgrade ; terraform apply --auto-approve +``` + +- Destroy the resources when finished +```bash +terraform destroy --auto-approve +``` + +See full argument list for each module in use: + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 diff --git a/tests/modules/infra/aws/ec2/docs.md b/tests/modules/infra/aws/ec2/docs.md new file mode 100644 index 00000000..42859dea --- /dev/null +++ b/tests/modules/infra/aws/ec2/docs.md @@ -0,0 +1,40 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | +| [ssh](#requirement\_ssh) | 2.6.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [aws-ec2-upstream-cluster](#module\_aws-ec2-upstream-cluster) | ../../../../../modules/infra/aws/ec2 | n/a | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws\_region](#input\_aws\_region) | n/a | `string` | `"us-east-1"` | no | +| [instance\_count](#input\_instance\_count) | n/a | `number` | `1` | no | +| [prefix](#input\_prefix) | n/a | `string` | `"ec2-test"` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `string` | `"ubuntu"` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | +| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | diff --git a/tests/modules/infra/aws/ec2/main.tf b/tests/modules/infra/aws/ec2/main.tf new file mode 100644 index 00000000..f3f90174 --- /dev/null +++ b/tests/modules/infra/aws/ec2/main.tf @@ -0,0 +1,7 @@ +module "aws-ec2-upstream-cluster" { + source = "../../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + instance_count = var.instance_count + ssh_username = var.ssh_username +} diff --git a/tests/modules/infra/aws/ec2/outputs.tf b/tests/modules/infra/aws/ec2/outputs.tf new file mode 100644 index 00000000..28474230 --- /dev/null +++ b/tests/modules/infra/aws/ec2/outputs.tf @@ -0,0 +1,7 @@ +output "instances_public_ip" { + value = module.aws-ec2-upstream-cluster.instances_public_ip +} + +output "instances_private_ip" { + value = module.aws-ec2-upstream-cluster.instances_private_ip +} diff --git a/tests/modules/infra/aws/ec2/provider.tf b/tests/modules/infra/aws/ec2/provider.tf new file mode 100644 index 00000000..8e915083 --- /dev/null +++ b/tests/modules/infra/aws/ec2/provider.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/tests/modules/infra/aws/ec2/terraform.tfvars.example b/tests/modules/infra/aws/ec2/terraform.tfvars.example new file mode 100644 index 00000000..f5d4fd1e --- /dev/null +++ b/tests/modules/infra/aws/ec2/terraform.tfvars.example @@ -0,0 +1,20 @@ +###### !! Required variables !! ###### + +## -- The prefix used in front of all AWS resources +prefix = "" + +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = + +## -- AWS Region to create the resources +aws_region = "" + +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html + +## -- The number of nodes +instance_count = 1 + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" diff --git a/tests/modules/infra/aws/ec2/user_data.tmpl b/tests/modules/infra/aws/ec2/user_data.tmpl new file mode 100644 index 00000000..e794cfa6 --- /dev/null +++ b/tests/modules/infra/aws/ec2/user_data.tmpl @@ -0,0 +1,9 @@ +#!/bin/bash + +if [ ${install_docker} = true ] + then + echo 'Installing Docker' + export DEBIAN_FRONTEND=noninteractive + curl -sSL https://releases.rancher.com/install-docker/${docker_version}.sh | sh - + sudo usermod -aG docker ${username} +fi diff --git a/tests/modules/infra/aws/ec2/variables.tf b/tests/modules/infra/aws/ec2/variables.tf new file mode 100644 index 00000000..efdb192c --- /dev/null +++ b/tests/modules/infra/aws/ec2/variables.tf @@ -0,0 +1,19 @@ +variable "prefix" { + default = "ec2-test" +} + +variable "aws_region" { + default = "us-east-1" +} + +variable "ssh_private_key_path" { + default = null +} + +variable "instance_count" { + default = 1 +} + +variable "ssh_username" { + default = "ubuntu" +} diff --git a/tests/modules/infra/aws/main.tf b/tests/modules/infra/aws/main.tf deleted file mode 100644 index ab6acd57..00000000 --- a/tests/modules/infra/aws/main.tf +++ /dev/null @@ -1,29 +0,0 @@ -module "test1_all_defaults" { - source = "../../../../modules/infra/aws" - - instance_count = 1 - create_security_group = false - create_ssh_key_pair = true - instance_security_group = "default" -} - -module "test2_specify_sg" { - source = "../../../../modules/infra/aws" - - instance_count = 1 - create_security_group = false - create_ssh_key_pair = true - instance_security_group = "default" -} - -resource "aws_vpc" "for_test3" { - -} - -module "test3_specify_dynamic_vpc" { - source = "../../../../modules/infra/aws" - - instance_count = 1 - create_ssh_key_pair = true - vpc_id = aws_vpc.for_test3.id -} diff --git a/tests/recipes/upstream/aws/rke/README.md b/tests/recipes/upstream/aws/rke/README.md new file mode 100644 index 00000000..d06f70fe --- /dev/null +++ b/tests/recipes/upstream/aws/rke/README.md @@ -0,0 +1,31 @@ +# Upstream | AWS | EC2 x RKE + +This directory contains the code for testing the AWS EC2 x RKE x Rancher modules. + +Documentation can be found [here](./docs.md). + +## Usage + +```bash +git clone https://github.com/rancherlabs/tf-rancher-up.git +cd tests/recipes/upstream/aws/rke +``` + +- Edit `./variables.tf` + - Update the required variables (`prefix`, `aws_region`, `ssh_private_key_path`, `instance_count`, `ssh_username`, `user_data`, `install_docker`, `docker_version`, `waiting_time`, `ingress_provider`, `bootstrap_rancher`, `rancher_hostname`, and `rancher_password`). +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../../modules/infra/aws/README.md). + +```bash +terraform init --upgrade ; terraform apply --auto-approve +``` + +- Destroy the resources when finished +```bash +terraform destroy --auto-approve + +``` + +See full argument list for each module in use: + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - RKE: https://github.com/rancher/tf-rancher-up/tree/main/modules/distribution/rke + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/tests/recipes/upstream/aws/rke/docs.md b/tests/recipes/upstream/aws/rke/docs.md new file mode 100644 index 00000000..5d51fd1b --- /dev/null +++ b/tests/recipes/upstream/aws/rke/docs.md @@ -0,0 +1,57 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | +| [ssh](#requirement\_ssh) | 2.6.0 | + +## Providers + +| Name | Version | +|------|---------| +| [null](#provider\_null) | n/a | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [aws-ec2-upstream-cluster](#module\_aws-ec2-upstream-cluster) | ../../../../../modules/infra/aws/ec2 | n/a | +| [rancher\_install](#module\_rancher\_install) | ../../../../../modules/rancher | n/a | +| [rke](#module\_rke) | ../../../../../modules/distribution/rke | n/a | + +## Resources + +| Name | Type | +|------|------| +| [null_resource.wait-docker-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.wait-k8s-services-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws\_region](#input\_aws\_region) | n/a | `string` | `"us-east-1"` | no | +| [bootstrap\_rancher](#input\_bootstrap\_rancher) | n/a | `bool` | `true` | no | +| [docker\_version](#input\_docker\_version) | n/a | `string` | `"20.10"` | no | +| [ingress\_provider](#input\_ingress\_provider) | n/a | `string` | `"nginx"` | no | +| [install\_docker](#input\_install\_docker) | n/a | `bool` | `true` | no | +| [instance\_count](#input\_instance\_count) | n/a | `number` | `1` | no | +| [prefix](#input\_prefix) | n/a | `string` | `"ec2-test"` | no | +| [rancher\_hostname](#input\_rancher\_hostname) | n/a | `string` | `"rancher"` | no | +| [rancher\_password](#input\_rancher\_password) | n/a | `string` | `"at-least-12-characters"` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `string` | `"ubuntu"` | no | +| [user\_data](#input\_user\_data) | n/a | `any` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | n/a | `number` | `180` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | +| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | +| [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | +| [rancher\_url](#output\_rancher\_url) | Rancher URL | diff --git a/tests/recipes/upstream/aws/rke/main.tf b/tests/recipes/upstream/aws/rke/main.tf index 5491f35e..b870cd4f 100644 --- a/tests/recipes/upstream/aws/rke/main.tf +++ b/tests/recipes/upstream/aws/rke/main.tf @@ -1,11 +1,70 @@ -module "test1_default" { - source = "../../../../../recipes/upstream/aws/rke" - - prefix = "test1_default" - aws_access_key = var.aws_access_key - aws_secret_key = var.aws_secret_key - aws_region = "us-west-2" - create_ssh_key_pair = true - rancher_password = "this-is-an-insecure-password" - instance_count = 1 +module "aws-ec2-upstream-cluster" { + source = "../../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + instance_count = var.instance_count + ssh_username = var.ssh_username + user_data = templatefile("${path.module}/user_data.tmpl", + { + install_docker = var.install_docker + username = var.ssh_username + docker_version = var.docker_version + } + ) +} + +resource "null_resource" "wait-docker-startup" { + depends_on = [module.aws-ec2-upstream-cluster.instances_public_ip] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } +} + +locals { + ssh_private_key_path = var.ssh_private_key_path != null ? var.ssh_private_key_path : "${path.cwd}/${var.prefix}-ssh_private_key.pem" +} + +module "rke" { + source = "../../../../../modules/distribution/rke" + prefix = var.prefix + dependency = [resource.null_resource.wait-docker-startup] + ssh_private_key_path = local.ssh_private_key_path + node_username = var.ssh_username + + rancher_nodes = [for instance_ips in module.aws-ec2-upstream-cluster.instance_ips : + { + public_ip = instance_ips.public_ip, + private_ip = instance_ips.private_ip, + roles = ["etcd", "controlplane", "worker"], + ssh_key_path = local.ssh_private_key_path, + ssh_key = null, + hostname_override = null + } + ] +} + +resource "null_resource" "wait-k8s-services-startup" { + depends_on = [module.rke] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } +} + +locals { + kubeconfig_file = "${path.cwd}/${var.prefix}_kube_config.yml" + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.aws-ec2-upstream-cluster.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.aws-ec2-upstream-cluster.instances_public_ip[0], "sslip.io"]) + +} + +module "rancher_install" { + source = "../../../../../modules/rancher" + dependency = [null_resource.wait-k8s-services-startup] + kubeconfig_file = local.kubeconfig_file + rancher_hostname = local.rancher_hostname + rancher_bootstrap_password = var.rancher_password + rancher_password = var.rancher_password + bootstrap_rancher = var.bootstrap_rancher + rancher_additional_helm_values = [ + "replicas: ${var.instance_count}" + ] } diff --git a/tests/recipes/upstream/aws/rke/outputs.tf b/tests/recipes/upstream/aws/rke/outputs.tf new file mode 100644 index 00000000..3f0a3cc5 --- /dev/null +++ b/tests/recipes/upstream/aws/rke/outputs.tf @@ -0,0 +1,17 @@ +output "instances_public_ip" { + value = module.aws-ec2-upstream-cluster.instances_public_ip +} + +output "instances_private_ip" { + value = module.aws-ec2-upstream-cluster.instances_private_ip +} + +output "rancher_url" { + description = "Rancher URL" + value = "https://${module.rancher_install.rancher_hostname}" +} + +output "rancher_password" { + description = "Rancher Initial Custom Password" + value = var.rancher_password +} diff --git a/tests/recipes/upstream/aws/rke/provider.tf b/tests/recipes/upstream/aws/rke/provider.tf new file mode 100644 index 00000000..8e915083 --- /dev/null +++ b/tests/recipes/upstream/aws/rke/provider.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/tests/recipes/upstream/aws/rke/terraform.tfvars.example b/tests/recipes/upstream/aws/rke/terraform.tfvars.example new file mode 100644 index 00000000..7787da60 --- /dev/null +++ b/tests/recipes/upstream/aws/rke/terraform.tfvars.example @@ -0,0 +1,96 @@ +###### !! Required variables !! ###### + +## -- The prefix used in front of all AWS resources +prefix = "" + +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = + +## -- AWS Region to create the resources +aws_region = "" + +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html + +## -- Specify if a new SSH key pair needs to be created for the instances +# create_ssh_key_pair = true + +## -- If you want to use an existing key pair, specify its name +# ssh_key_pair_name = null + +#Ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html + +## -- The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_private_key_path = null + +## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_public_key_path = null + +## -- Range of private IPs available for the AWS VPC +# vpc_ip_cidr_range = "10.0.0.0/16" + +## -- AWS VPC used for all resources +# vpc_id = null + +## -- AWS Subnet used for all resources +# subnet_id = null + +## -- AWS Security Group used for all resources +# create_security_group = null + +#Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html + +## -- The number of nodes +instance_count = 3 + +## -- The name of a AWS EC2 machine type +# instance_type = "t3.medium" + +## -- Use spot instances +# spot_instances = false + +## -- Size of the disk attached to each node, specified in GB +# instance_disk_size = 80 + +## -- If the Security Group was created manually, it can be entered here +# instance_security_group_id = null + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" + +## -- Script that will run when the VMs start +# user_data = "" + +## -- Bastion host configuration to access the instances +# bastion_host = null + +## -- IAM Instance Profile to assign to the instances/nodes +# iam_instance_profile = null + +## -- User-provided tags for the resources +# tags = {} + +## -- Install Docker while creating the instance +# install_docker = true + +## -- Docker version to install on nodes +# docker_version = "20.10" + +## -- Waiting time (in seconds) +# waiting_time = 180 + +## -- Override the default k8s version used by RKE +# kubernetes_version = "v1.24.10-rancher4-1" + +## -- K8s Ingress Controller +# ingress_provider = nginx + +## -- Hostname to set when installing Rancher +# rancher_hostname = "rancher" + +## -- Password to set when installing Rancher +rancher_password = "at-least-12-characters" + +## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository +# rancher_version = "2.7.3" diff --git a/tests/recipes/upstream/aws/rke/user_data.tmpl b/tests/recipes/upstream/aws/rke/user_data.tmpl new file mode 100644 index 00000000..e794cfa6 --- /dev/null +++ b/tests/recipes/upstream/aws/rke/user_data.tmpl @@ -0,0 +1,9 @@ +#!/bin/bash + +if [ ${install_docker} = true ] + then + echo 'Installing Docker' + export DEBIAN_FRONTEND=noninteractive + curl -sSL https://releases.rancher.com/install-docker/${docker_version}.sh | sh - + sudo usermod -aG docker ${username} +fi diff --git a/tests/recipes/upstream/aws/rke/variables.tf b/tests/recipes/upstream/aws/rke/variables.tf index 21e0b5af..bca65038 100644 --- a/tests/recipes/upstream/aws/rke/variables.tf +++ b/tests/recipes/upstream/aws/rke/variables.tf @@ -1,11 +1,54 @@ -variable "aws_access_key" { - type = string - description = "AWS access key used to create infrastructure" - default = null +variable "prefix" { + default = "ec2-test" } -variable "aws_secret_key" { - type = string - description = "AWS secret key used to create AWS infrastructure" - default = null +variable "aws_region" { + default = "us-east-1" +} + +variable "ssh_private_key_path" { + default = null +} + +variable "instance_count" { + default = 1 +} + +variable "ssh_username" { + default = "ubuntu" +} + +variable "user_data" { + default = null +} + +variable "install_docker" { + type = bool + default = true +} + +variable "docker_version" { + type = string + default = "20.10" +} + +variable "waiting_time" { + default = 180 +} + +variable "ingress_provider" { + default = "nginx" +} + +variable "bootstrap_rancher" { + type = bool + default = true +} + +variable "rancher_hostname" { + default = "rancher" +} + +variable "rancher_password" { + default = "at-least-12-characters" } From dd88caf46ca790fb5e47b28950ba068a38458d89 Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Wed, 26 Jun 2024 21:18:54 +0200 Subject: [PATCH 03/35] Fixed code in path recipes/rke/split-roles/aws + Optimization of code already reviewed --- modules/infra/aws/ec2/docs.md | 6 +- modules/infra/aws/ec2/main.tf | 5 +- modules/infra/aws/ec2/outputs.tf | 16 +- recipes/rke/split-roles/aws/README.md | 35 +++ recipes/rke/split-roles/aws/docs.md | 88 +++---- recipes/rke/split-roles/aws/main.tf | 116 ++++----- recipes/rke/split-roles/aws/outputs.tf | 31 ++- recipes/rke/split-roles/aws/provider.tf | 36 +++ .../split-roles/aws/terraform.tfvars.example | 108 +++++++++ recipes/rke/split-roles/aws/variables.tf | 227 ++++++------------ recipes/upstream/aws/rke/main.tf | 1 + 11 files changed, 400 insertions(+), 269 deletions(-) create mode 100644 recipes/rke/split-roles/aws/provider.tf create mode 100644 recipes/rke/split-roles/aws/terraform.tfvars.example diff --git a/modules/infra/aws/ec2/docs.md b/modules/infra/aws/ec2/docs.md index ffbd70f3..ce74a4eb 100644 --- a/modules/infra/aws/ec2/docs.md +++ b/modules/infra/aws/ec2/docs.md @@ -65,9 +65,9 @@ No modules. | Name | Description | |------|-------------| -| [dependency](#output\_dependency) | n/a | | [instance\_ips](#output\_instance\_ips) | n/a | | [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | | [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | -| [node\_username](#output\_node\_username) | n/a | -| [sg-id](#output\_sg-id) | n/a | +| [security\_group](#output\_security\_group) | n/a | +| [subnet](#output\_subnet) | n/a | +| [vpc](#output\_vpc) | n/a | diff --git a/modules/infra/aws/ec2/main.tf b/modules/infra/aws/ec2/main.tf index 5112a087..ebc88eb2 100644 --- a/modules/infra/aws/ec2/main.tf +++ b/modules/infra/aws/ec2/main.tf @@ -1,4 +1,3 @@ -# Condition to use an existing keypair if a keypair name and file is also provided locals { new_key_pair_path = var.ssh_private_key_path != null ? var.ssh_private_key_path : "${path.cwd}/${var.prefix}-ssh_private_key.pem" private_ssh_key_path = fileexists("${path.cwd}/${var.prefix}-ssh_private_key.pem") ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path @@ -19,7 +18,7 @@ resource "local_file" "private_key_pem" { resource "local_file" "public_key_pem" { count = var.create_ssh_key_pair ? 1 : 0 - filename = "${path.cwd}/${var.prefix}-ssh_public_key.pem" + filename = var.ssh_public_key_path != null ? var.ssh_public_key_path : "${path.cwd}/${var.prefix}-ssh_public_key.pem" content = tls_private_key.ssh_private_key[0].public_key_openssh file_permission = "0600" } @@ -48,7 +47,7 @@ resource "aws_subnet" "subnet" { vpc_id = var.vpc_id == null ? aws_vpc.vpc[0].id : var.vpc_id tags = { - Name = "${var.prefix}-subnet-${count.index + 1}" + Name = "${var.prefix}-subnet" } } diff --git a/modules/infra/aws/ec2/outputs.tf b/modules/infra/aws/ec2/outputs.tf index 99955f7a..32aebcb7 100644 --- a/modules/infra/aws/ec2/outputs.tf +++ b/modules/infra/aws/ec2/outputs.tf @@ -1,7 +1,3 @@ -output "dependency" { - value = var.instance_count != 0 ? aws_instance.instance[0].arn : null -} - output "instances_public_ip" { value = aws_instance.instance.*.public_ip } @@ -21,10 +17,14 @@ output "instance_ips" { ] } -output "node_username" { - value = var.ssh_username +output "vpc" { + value = aws_vpc.vpc +} + +output "subnet" { + value = aws_subnet.subnet } -output "sg-id" { - value = var.create_security_group == true ? aws_security_group.sg_allowall[0].id : var.instance_security_group_id +output "security_group" { + value = aws_security_group.sg_allowall } diff --git a/recipes/rke/split-roles/aws/README.md b/recipes/rke/split-roles/aws/README.md index ff4456c4..ba37426a 100644 --- a/recipes/rke/split-roles/aws/README.md +++ b/recipes/rke/split-roles/aws/README.md @@ -3,3 +3,38 @@ This module helps to create an RKE cluster with split roles (master, worker) on AWS infrastructure. Documentation can be found [here](./docs.md). + +## Usage + +```bash +git clone https://github.com/rancherlabs/tf-rancher-up.git +cd recipes/rke/split-roles/aws +``` + +- Copy `./terraform.tfvars.exmaple` to `./terraform.tfvars` +- Edit `./terraform.tfvars` + - Update the required variables: + - `prefix` to give the resources an identifiable name (eg, your initials or first name) + - `aws_region` to suit your region + - `master_node_count` to specify the number of Master nodes to create + - `worker_node_count` to specify the number of Worker nodes to create + - `ssh_username` to specify the user used to create the VMs (default "ubuntu") + - `rancher_hostname` in order to reach the Rancher console via DNS name + - `rancher_password` to configure the initial Admin password (the password must be at least 12 characters) +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../modules/infra/aws/README.md). + +**NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** + +```bash +terraform init -upgrade ; terraform apply -target=module.aws-ec2-upstream-master-nodes.tls_private_key.ssh_private_key -target=module.aws-ec2-upstream-master-nodes.local_file.private_key_pem -target=module.aws-ec2-upstream-master-nodes.local_file.public_key_pem -target=module.aws-ec2-upstream-master-nodes.aws_key_pair.key_pair -target=module.aws-ec2-upstream-master-nodes.aws_vpc.vpc -target=module.aws-ec2-upstream-master-nodes.aws_subnet.subnet -target=module.aws-ec2-upstream-master-nodes.aws_security_group.sg_allowall -auto-approve ; terraform apply -auto-approve +``` + +- Destroy the resources when finished +```bash +terraform destroy -auto-approve +``` + +See full argument list for each module in use: + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - RKE: https://github.com/rancher/tf-rancher-up/tree/main/modules/distribution/rke + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/recipes/rke/split-roles/aws/docs.md b/recipes/rke/split-roles/aws/docs.md index 8d44c5e5..ed7c80b4 100644 --- a/recipes/rke/split-roles/aws/docs.md +++ b/recipes/rke/split-roles/aws/docs.md @@ -1,66 +1,70 @@ ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | +| [ssh](#requirement\_ssh) | 2.6.0 | ## Providers -No providers. +| Name | Version | +|------|---------| +| [null](#provider\_null) | n/a | ## Modules | Name | Source | Version | |------|--------|---------| -| [master\_nodes](#module\_master\_nodes) | ../../../../modules/infra/aws | n/a | +| [aws-ec2-upstream-master-nodes](#module\_aws-ec2-upstream-master-nodes) | ../../../../modules/infra/aws/ec2 | n/a | +| [aws-ec2-upstream-worker-nodes](#module\_aws-ec2-upstream-worker-nodes) | ../../../../modules/infra/aws/ec2 | n/a | | [rke](#module\_rke) | ../../../../modules/distribution/rke | n/a | -| [worker\_nodes](#module\_worker\_nodes) | ../../../../modules/infra/aws | n/a | ## Resources -No resources. +| Name | Type | +|------|------| +| [null_resource.wait-docker-startup-m](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.wait-docker-startup-w](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.wait-k8s-services-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [aws\_access\_key](#input\_aws\_access\_key) | Enter your AWS access key | `string` | n/a | yes | -| [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | n/a | yes | -| [aws\_secret\_key](#input\_aws\_secret\_key) | Enter your AWS secret key | `string` | n/a | yes | -| [bastion\_host](#input\_bastion\_host) | Bastion host configuration to access the RKE nodes |
object({
address = string
user = string
ssh_key_path = string
ssh_key = string
})
| `null` | no | -| [cloud\_provider](#input\_cloud\_provider) | Specify the cloud provider name | `string` | `null` | no | -| [create\_kubeconfig\_file](#input\_create\_kubeconfig\_file) | Boolean flag to generate a kubeconfig file (mostly used for dev only) | `bool` | `true` | no | -| [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `true` | no | -| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `false` | no | -| [dependency](#input\_dependency) | An optional variable to add a dependency from another resource (not used) | `any` | `null` | no | -| [docker\_version](#input\_docker\_version) | Docker version to install on nodes | `string` | `"23.0.6"` | no | -| [install\_docker](#input\_install\_docker) | Should install docker while creating the instance | `bool` | `true` | no | -| [instance\_security\_group](#input\_instance\_security\_group) | Provide a pre-existing security group ID | `string` | `null` | no | -| [instance\_security\_group\_name](#input\_instance\_security\_group\_name) | Provide a pre-existing security group name | `string` | `null` | no | -| [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | -| [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | -| [kubernetes\_version](#input\_kubernetes\_version) | Kubernetes version to use for the RKE cluster | `string` | `null` | no | -| [master\_nodes\_count](#input\_master\_nodes\_count) | Number of master nodes to create | `number` | `1` | no | -| [master\_nodes\_iam\_instance\_profile](#input\_master\_nodes\_iam\_instance\_profile) | Specify IAM instance profile to attach to master nodes | `string` | `null` | no | -| [master\_nodes\_instance\_disk\_size](#input\_master\_nodes\_instance\_disk\_size) | Disk size used for all master nodes (in GB) | `string` | `"80"` | no | -| [master\_nodes\_instance\_type](#input\_master\_nodes\_instance\_type) | Instance type used for all master nodes | `string` | `"t3.medium"` | no | -| [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | n/a | yes | -| [ssh\_key](#input\_ssh\_key) | Contents of the private key to connect to the instances. | `string` | `null` | no | -| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | Specify the SSH key name to use (that's already present in AWS) | `string` | `null` | no | -| [ssh\_key\_pair\_path](#input\_ssh\_key\_pair\_path) | Path to the SSH private key used as the key pair (that's already present in AWS) | `string` | `null` | no | -| [ssh\_username](#input\_ssh\_username) | Username used for SSH with sudo access | `string` | `"ubuntu"` | no | -| [subnet\_id](#input\_subnet\_id) | VPC Subnet ID to create the instance(s) in | `string` | `null` | no | -| [tags](#input\_tags) | User-provided tags for the resources | `map(string)` | `{}` | no | -| [vpc\_id](#input\_vpc\_id) | VPC ID to create the instance(s) in | `string` | `null` | no | -| [vpc\_zone](#input\_vpc\_zone) | VPC zone | `string` | `null` | no | -| [worker\_nodes\_count](#input\_worker\_nodes\_count) | Number of worker nodes to create | `number` | `1` | no | -| [worker\_nodes\_iam\_instance\_profile](#input\_worker\_nodes\_iam\_instance\_profile) | Specify IAM instance profile to attach to worker nodes | `string` | `null` | no | -| [worker\_nodes\_instance\_disk\_size](#input\_worker\_nodes\_instance\_disk\_size) | Disk size used for all worker nodes (in GB) | `string` | `"80"` | no | -| [worker\_nodes\_instance\_type](#input\_worker\_nodes\_instance\_type) | Instance type used for all worker nodes | `string` | `"t3.large"` | no | +| [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | +| [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | +| [create\_security\_group](#input\_create\_security\_group) | n/a | `any` | `null` | no | +| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | +| [docker\_version](#input\_docker\_version) | Docker version to install on nodes | `string` | `"20.10"` | no | +| [ingress\_provider](#input\_ingress\_provider) | Ingress controller provider | `string` | `"nginx"` | no | +| [install\_docker](#input\_install\_docker) | Install Docker while creating the instances | `bool` | `true` | no | +| [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | n/a | `any` | `null` | no | +| [master\_nodes\_count](#input\_master\_nodes\_count) | n/a | `any` | n/a | yes | +| [prefix](#input\_prefix) | n/a | `any` | n/a | yes | +| [rancher\_hostname](#input\_rancher\_hostname) | n/a | `any` | n/a | yes | +| [rancher\_password](#input\_rancher\_password) | n/a | `string` | n/a | yes | +| [rancher\_version](#input\_rancher\_version) | Rancher version to install | `string` | `null` | no | +| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | n/a | `any` | `null` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `any` | n/a | yes | +| [subnet\_id](#input\_subnet\_id) | n/a | `any` | `null` | no | +| [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | +| [vpc\_id](#input\_vpc\_id) | n/a | `any` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | Waiting time (in seconds) | `number` | `180` | no | +| [worker\_nodes\_count](#input\_worker\_nodes\_count) | n/a | `any` | n/a | yes | ## Outputs | Name | Description | |------|-------------| -| [credentials](#output\_credentials) | n/a | -| [dependency](#output\_dependency) | n/a | -| [kube\_config\_yaml](#output\_kube\_config\_yaml) | n/a | -| [kubeconfig\_file](#output\_kubeconfig\_file) | n/a | +| [master\_instances\_private\_ip](#output\_master\_instances\_private\_ip) | n/a | +| [master\_instances\_public\_ip](#output\_master\_instances\_public\_ip) | n/a | +| [security\_group](#output\_security\_group) | n/a | +| [subnet](#output\_subnet) | n/a | +| [vpc](#output\_vpc) | n/a | +| [worker\_instances\_private\_ip](#output\_worker\_instances\_private\_ip) | n/a | +| [worker\_instances\_public\_ip](#output\_worker\_instances\_public\_ip) | n/a | diff --git a/recipes/rke/split-roles/aws/main.tf b/recipes/rke/split-roles/aws/main.tf index f7ab3d8c..7691fa2a 100644 --- a/recipes/rke/split-roles/aws/main.tf +++ b/recipes/rke/split-roles/aws/main.tf @@ -1,20 +1,20 @@ -module "master_nodes" { - source = "../../../../modules/infra/aws" +locals { + create_ssh_key_pair = var.create_ssh_key_pair == null ? false : true + ssh_key_pair_name = var.ssh_key_pair_name == null ? "tf-rancher-up-${var.prefix}" : var.ssh_key_pair_name + local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path + local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + vpc_id = var.vpc_id == null ? module.aws-ec2-upstream-master-nodes.vpc[0].id : var.vpc_id + subnet_id = var.subnet_id == null ? module.aws-ec2-upstream-master-nodes.subnet[0].id : var.subnet_id + create_security_group = var.create_security_group == null ? false : true + instance_security_group_id = local.create_security_group == "true" ? null : module.aws-ec2-upstream-master-nodes.security_group[0].id +} - prefix = "${var.prefix}-m" - instance_count = var.master_nodes_count - instance_type = var.master_nodes_instance_type - instance_disk_size = var.master_nodes_instance_disk_size - create_ssh_key_pair = var.create_ssh_key_pair - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - ssh_key = var.ssh_key - ssh_username = var.ssh_username - aws_region = var.aws_region - create_security_group = var.create_security_group - instance_security_group = var.instance_security_group - subnet_id = var.subnet_id - bastion_host = var.bastion_host +module "aws-ec2-upstream-master-nodes" { + source = "../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + instance_count = var.master_nodes_count + ssh_username = var.ssh_username user_data = templatefile("${path.module}/user_data.tmpl", { install_docker = var.install_docker @@ -22,27 +22,22 @@ module "master_nodes" { docker_version = var.docker_version } ) - iam_instance_profile = var.master_nodes_iam_instance_profile != null ? var.master_nodes_iam_instance_profile : null - tags = var.tags } -module "worker_nodes" { - source = "../../../../modules/infra/aws" - - prefix = "${var.prefix}-w" - instance_count = var.worker_nodes_count - instance_type = var.worker_nodes_instance_type - instance_disk_size = var.worker_nodes_instance_disk_size - create_ssh_key_pair = var.create_ssh_key_pair - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - ssh_key = var.ssh_key - ssh_username = var.ssh_username - aws_region = var.aws_region - create_security_group = var.create_security_group - instance_security_group = var.instance_security_group - subnet_id = var.subnet_id - bastion_host = var.bastion_host +module "aws-ec2-upstream-worker-nodes" { + source = "../../../../modules/infra/aws/ec2" + prefix = "${var.prefix}-w" + aws_region = var.aws_region + create_ssh_key_pair = local.create_ssh_key_pair + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path + vpc_id = local.vpc_id + subnet_id = local.subnet_id + create_security_group = local.create_security_group + instance_count = var.worker_nodes_count + instance_security_group_id = local.instance_security_group_id + ssh_username = var.ssh_username user_data = templatefile("${path.module}/user_data.tmpl", { install_docker = var.install_docker @@ -50,45 +45,58 @@ module "worker_nodes" { docker_version = var.docker_version } ) - iam_instance_profile = var.worker_nodes_iam_instance_profile != null ? var.worker_nodes_iam_instance_profile : null - tags = var.tags +} + +resource "null_resource" "wait-docker-startup-m" { + depends_on = [module.aws-ec2-upstream-master-nodes.instances_public_ip] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } +} + +resource "null_resource" "wait-docker-startup-w" { + depends_on = [module.aws-ec2-upstream-worker-nodes.instances_public_ip] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } } locals { - master_nodes = [for instance_ips in module.master_nodes.instance_ips : + ssh_private_key_path = var.ssh_private_key_path != null ? var.ssh_private_key_path : "${path.cwd}/${var.prefix}-ssh_private_key.pem" + master_nodes = [for instance_ips in module.aws-ec2-upstream-master-nodes.instance_ips : { public_ip = instance_ips.public_ip, private_ip = instance_ips.private_ip, roles = ["etcd", "controlplane"], - ssh_key_path = var.ssh_key_pair_path, - ssh_key = var.ssh_key - node_username = module.master_nodes.node_username, + ssh_key_path = local.ssh_private_key_path, + ssh_key = null, hostname_override = instance_ips.private_dns } ] - worker_nodes = [for instance_ips in module.worker_nodes.instance_ips : + worker_nodes = [for instance_ips in module.aws-ec2-upstream-worker-nodes.instance_ips : { public_ip = instance_ips.public_ip, private_ip = instance_ips.private_ip, roles = ["worker"], - ssh_key_path = var.ssh_key_pair_path, - ssh_key = var.ssh_key - node_username = module.worker_nodes.node_username + ssh_key_path = local.ssh_private_key_path, + ssh_key = null, hostname_override = instance_ips.private_dns } ] } module "rke" { - source = "../../../../modules/distribution/rke" - prefix = var.prefix - node_username = var.ssh_username - create_kubeconfig_file = var.create_kubeconfig_file - kube_config_path = var.kube_config_path - kube_config_filename = var.kube_config_filename - kubernetes_version = var.kubernetes_version - bastion_host = var.bastion_host - cloud_provider = var.cloud_provider + source = "../../../../modules/distribution/rke" + prefix = var.prefix + ssh_private_key_path = local.ssh_private_key_path + node_username = var.ssh_username rancher_nodes = concat(local.master_nodes, local.worker_nodes) } + +resource "null_resource" "wait-k8s-services-startup" { + depends_on = [module.rke] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } +} diff --git a/recipes/rke/split-roles/aws/outputs.tf b/recipes/rke/split-roles/aws/outputs.tf index a974b706..b8272b66 100644 --- a/recipes/rke/split-roles/aws/outputs.tf +++ b/recipes/rke/split-roles/aws/outputs.tf @@ -1,18 +1,27 @@ -output "dependency" { - value = [ - var.master_nodes_count != 0 ? module.master_nodes[*].instance_ips : null, - var.worker_nodes_count != 0 ? module.worker_nodes[*].instance_ips : null - ] +output "master_instances_public_ip" { + value = module.aws-ec2-upstream-master-nodes.instances_public_ip } -output "kubeconfig_file" { - value = module.rke.rke_kubeconfig_filename +output "master_instances_private_ip" { + value = module.aws-ec2-upstream-master-nodes.instances_private_ip } -output "kube_config_yaml" { - value = module.rke.kube_config_yaml +output "worker_instances_public_ip" { + value = module.aws-ec2-upstream-worker-nodes.instances_public_ip } -output "credentials" { - value = module.rke.credentials +output "worker_instances_private_ip" { + value = module.aws-ec2-upstream-worker-nodes.instances_private_ip +} + +output "vpc" { + value = module.aws-ec2-upstream-master-nodes.vpc[0].id +} + +output "subnet" { + value = module.aws-ec2-upstream-master-nodes.subnet[0].id +} + +output "security_group" { + value = module.aws-ec2-upstream-master-nodes.security_group[0].id } diff --git a/recipes/rke/split-roles/aws/provider.tf b/recipes/rke/split-roles/aws/provider.tf new file mode 100644 index 00000000..8e915083 --- /dev/null +++ b/recipes/rke/split-roles/aws/provider.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/recipes/rke/split-roles/aws/terraform.tfvars.example b/recipes/rke/split-roles/aws/terraform.tfvars.example new file mode 100644 index 00000000..d0527b51 --- /dev/null +++ b/recipes/rke/split-roles/aws/terraform.tfvars.example @@ -0,0 +1,108 @@ +###### !! Required variables !! ###### + +## -- The prefix used in front of all AWS resources +prefix = "" + +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = + +## -- AWS Region to create the resources +aws_region = "" + +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html + +## -- Specify if a new SSH key pair needs to be created for the instances +# create_ssh_key_pair = true + +## -- If you want to use an existing key pair, specify its name +# ssh_key_pair_name = null + +#Ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html + +## -- The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_private_key_path = null + +## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_public_key_path = null + +## -- Range of private IPs available for the AWS VPC +# vpc_ip_cidr_range = "10.0.0.0/16" + +## -- AWS VPC used for all resources +# vpc_id = null + +## -- AWS Subnet used for all resources +# subnet_id = null + +## -- AWS Security Group used for all resources +# create_security_group = null + +#Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html + +## -- The number of Master nodes +master_nodes_count = 1 + +## -- The number of Worker nodes +worker_nodes_count = 1 + +## -- Master nodes type +# master_nodes_type = "t3.medium" + +## -- Worker nodes type +# worker_nodes_type = "t3.medium" + +## -- Use spot instances +# spot_instances = false + +## -- Master nodes disk size (GB) +# master_nodes_disk_size = 80 + +## -- Worker nodes disk size (GB) +# worker_nodes_disk_size = 80 + +## -- If the Security Group was created manually, it can be entered here +# instance_security_group_id = null + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" + +## -- Script that will run when the VMs start +# user_data = "" + +## -- Bastion host configuration to access the instances +# bastion_host = null + +## -- IAM Instance Profile to assign to the Master nodes +# master_nodes_iam_instance_profile = null + +## -- IAM Instance Profile to assign to the Worker nodes +# worker_nodes_iam_instance_profile = null + +## -- User-provided tags for the resources +# tags = {} + +## -- Install Docker while creating the instance +# install_docker = true + +## -- Docker version to install on nodes +# docker_version = "20.10" + +## -- Waiting time (in seconds) +# waiting_time = 180 + +## -- Override the default k8s version used by RKE +# kubernetes_version = "v1.24.10-rancher4-1" + +## -- K8s Ingress Controller +# ingress_provider = nginx + +## -- Hostname to set when installing Rancher +# rancher_hostname = "rancher" + +## -- Password to set when installing Rancher +rancher_password = "at-least-12-characters" + +## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository +# rancher_version = "2.7.3" diff --git a/recipes/rke/split-roles/aws/variables.tf b/recipes/rke/split-roles/aws/variables.tf index dabc8251..0aaf7084 100644 --- a/recipes/rke/split-roles/aws/variables.tf +++ b/recipes/rke/split-roles/aws/variables.tf @@ -1,199 +1,130 @@ -variable "aws_access_key" { - type = string - description = "Enter your AWS access key" -} - -variable "aws_secret_key" { - type = string - description = "Enter your AWS secret key" - sensitive = true -} +variable "prefix" {} variable "aws_region" { type = string description = "AWS region used for all resources" + default = "us-east-1" + + validation { + condition = contains([ + "us-east-2", + "us-east-1", + "us-west-1", + "us-west-2", + "af-south-1", + "ap-east-1", + "ap-south-2", + "ap-southeast-3", + "ap-southeast-4", + "ap-south-1", + "ap-northeast-3", + "ap-northeast-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ca-central-1", + "ca-west-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-south-1", + "eu-west-3", + "eu-south-2", + "eu-north-1", + "eu-central-2", + "il-central-1", + "me-south-1", + "me-central-1", + "sa-east-1", + ], var.aws_region) + error_message = "Invalid Region specified!" + } } -variable "vpc_zone" { - type = string - description = "VPC zone" - default = null +variable "create_ssh_key_pair" { + default = null } -variable "subnet_id" { - type = string - description = "VPC Subnet ID to create the instance(s) in" - default = null +variable "ssh_key_pair_name" { + default = null } -variable "vpc_id" { - type = string - description = "VPC ID to create the instance(s) in" - default = null +variable "ssh_private_key_path" { + default = null } -variable "create_security_group" { - type = bool - description = "Should create the security group associated with the instance(s)" - default = true +variable "ssh_public_key_path" { + default = null } -# TODO: Add a check based on above value -variable "instance_security_group" { - type = string - description = "Provide a pre-existing security group ID" - default = null +variable "vpc_id" { + default = null } -variable "instance_security_group_name" { - type = string - description = "Provide a pre-existing security group name" - default = null +variable "subnet_id" { + default = null } -variable "prefix" { - type = string - description = "Prefix added to names of all resources" +variable "create_security_group" { + default = null } -variable "master_nodes_count" { - type = number - description = "Number of master nodes to create" - default = 1 -} +variable "master_nodes_count" {} -variable "worker_nodes_count" { - type = number - description = "Number of worker nodes to create" - default = 1 -} +variable "worker_nodes_count" {} -variable "kube_config_path" { - description = "The path to write the kubeconfig for the RKE cluster" - type = string - default = null +variable "instance_security_group_id" { + default = null } -variable "kube_config_filename" { - description = "Filename to write the kube config" - type = string - default = null -} +variable "ssh_username" {} -variable "kubernetes_version" { - type = string - description = "Kubernetes version to use for the RKE cluster" +variable "user_data" { + description = "User data content for EC2 instance(s)" default = null } variable "install_docker" { type = bool - description = "Should install docker while creating the instance" + description = "Install Docker while creating the instances" default = true } variable "docker_version" { type = string description = "Docker version to install on nodes" - default = "23.0.6" + default = "20.10" } -variable "create_ssh_key_pair" { - type = bool - description = "Specify if a new SSH key pair needs to be created for the instances" - default = false +variable "waiting_time" { + description = "Waiting time (in seconds)" + default = 180 } -variable "ssh_key_pair_name" { - type = string - description = "Specify the SSH key name to use (that's already present in AWS)" - default = null +variable "ingress_provider" { + description = "Ingress controller provider" + default = "nginx" } -variable "ssh_key_pair_path" { - type = string - description = "Path to the SSH private key used as the key pair (that's already present in AWS)" - default = null -} - -variable "ssh_key" { - type = string - description = "Contents of the private key to connect to the instances." - default = null - sensitive = true -} - -variable "bastion_host" { - type = object({ - address = string - user = string - ssh_key_path = string - ssh_key = string - }) - default = null - description = "Bastion host configuration to access the RKE nodes" -} - -variable "ssh_username" { - type = string - description = "Username used for SSH with sudo access" - default = "ubuntu" -} - -variable "master_nodes_instance_type" { - type = string - description = "Instance type used for all master nodes" - default = "t3.medium" -} - -variable "master_nodes_instance_disk_size" { - type = string - description = "Disk size used for all master nodes (in GB)" - default = "80" -} - -variable "worker_nodes_instance_type" { - type = string - description = "Instance type used for all worker nodes" - default = "t3.large" +variable "bootstrap_rancher" { + description = "Bootstrap the Rancher installation" + type = bool + default = true } -variable "worker_nodes_instance_disk_size" { - type = string - description = "Disk size used for all worker nodes (in GB)" - default = "80" -} +variable "rancher_hostname" {} -variable "dependency" { - description = "An optional variable to add a dependency from another resource (not used)" - default = null -} +variable "rancher_password" { + type = string -variable "master_nodes_iam_instance_profile" { - description = "Specify IAM instance profile to attach to master nodes" - default = null - type = string + validation { + condition = length(var.rancher_password) >= 12 + error_message = "The password must be at least 12 characters." + } } -variable "worker_nodes_iam_instance_profile" { - description = "Specify IAM instance profile to attach to worker nodes" - default = null - type = string -} - -variable "tags" { - description = "User-provided tags for the resources" - type = map(string) - default = {} -} - -variable "cloud_provider" { - description = "Specify the cloud provider name" +variable "rancher_version" { + description = "Rancher version to install" type = string default = null } - -variable "create_kubeconfig_file" { - description = "Boolean flag to generate a kubeconfig file (mostly used for dev only)" - default = true -} diff --git a/recipes/upstream/aws/rke/main.tf b/recipes/upstream/aws/rke/main.tf index 88ee8096..0e8fdac1 100644 --- a/recipes/upstream/aws/rke/main.tf +++ b/recipes/upstream/aws/rke/main.tf @@ -4,6 +4,7 @@ module "aws-ec2-upstream-cluster" { aws_region = var.aws_region # create_ssh_key_pair = var.create_ssh_key_pair # ssh_key_pair_name = var.ssh_key_pair_name + # ssh_private_key_path = var.ssh_private_key_path # ssh_public_key_path = var.ssh_public_key_path # vpc_id = var.vpc_id # subnet_id = var.subnet_id From 691613dfe0c1fc4173115bc18f54a4575bc413bb Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Thu, 27 Jun 2024 18:24:56 +0200 Subject: [PATCH 04/35] Reviewed tests for AWS EC2 and AWS EC2 x RKE2 x Rancher --- recipes/upstream/aws/rke2/README.md | 81 ++------- recipes/upstream/aws/rke2/docs.md | 65 ++++---- recipes/upstream/aws/rke2/main.tf | 132 ++++++++------- recipes/upstream/aws/rke2/outputs.tf | 40 +++-- recipes/upstream/aws/rke2/provider.tf | 30 +++- .../aws/rke2/terraform.tfvars.example | 112 +++++++++---- recipes/upstream/aws/rke2/variables.tf | 154 ++++++++---------- 7 files changed, 331 insertions(+), 283 deletions(-) diff --git a/recipes/upstream/aws/rke2/README.md b/recipes/upstream/aws/rke2/README.md index 4dd089e5..811b29e6 100644 --- a/recipes/upstream/aws/rke2/README.md +++ b/recipes/upstream/aws/rke2/README.md @@ -1,6 +1,6 @@ -# Upstream | AWS | RKE2 +# Upstream | AWS | EC2 x RKE2 -This module is used to establish a Rancher (local) management cluster using AWS and RKE2. +This module is used to establish a Rancher (local) management cluster using [AWS EC2](https://aws.amazon.com/ec2/) and [RKE2](https://docs.rke2.io/). Documentation can be found [here](./docs.md). @@ -11,76 +11,29 @@ git clone https://github.com/rancherlabs/tf-rancher-up.git cd recipes/upstream/aws/rke2 ``` -- Copy `terraform.tfvars.example` to `terraform.tfvars` -- Edit `terraform.tfvars` +- Copy `./terraform.tfvars.exmaple` to `./terraform.tfvars` +- Edit `./terraform.tfvars` - Update the required variables: - - `aws_region` to suit your region - `prefix` to give the resources an identifiable name (eg, your initials or first name) - - Recommended: `spot_instances` can be set to `true` to use spot instances -- Check your AWS credentials are configured in `~/.aws/credentials`, terraform will use these by default. Refer the [`aws configure`](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html#cli-configure-files-methods) command on how to do this. -- If you don't want to configure AWS credentials using `aws configure` in above step, uncomment `aws_access_key` and `aws_secret_key` in `terraform.tfvars` and input the required keys there. -- If an HA cluster need to be deployed, change the `instance_count` variable to 3 or more. -- There are more optional variables which can be tweaked under `terraform.tfvars`. - -**NOTE** you may need to use ` terraform init -upgrade` to upgrade provider versions - -Execute the below commands to start deployment. - -```bash -terraform init -terraform plan -terraform apply -``` -The login details will be displayed in the screen once the deployment is successful. It will have the details as below. - -```bash -rancher_hostname = "https://rancher..sslip.io" -rancher_password = "initial-admin-password" -``` + - `aws_region` to suit your region + - `instance_count` to specify the number of instances to create + - `ssh_username` to specify the user used to create the VMs (default "ubuntu") + - `rancher_hostname` in order to reach the Rancher console via DNS name + - `rancher_password` to configure the initial Admin password (the password must be at least 12 characters) +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../modules/infra/aws/README.md). -- If storing multiple AWS credentials in `~/.aws/credentials`, set the profile when running terraform. +**NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** ```bash -AWS_PROFILE= terraform plan -AWS_PROFILE= terraform apply +terraform init -upgrade ; terraform apply -target=module.rke2-first-server.tls_private_key.ssh_private_key -target=module.rke2-first-server.local_file.private_key_pem -target=module.rke2-first-server.local_file.public_key_pem -target=module.rke2-first-server.aws_key_pair.key_pair -target=module.rke2-first-server.aws_vpc.vpc -target=module.rke2-first-server.aws_subnet.subnet -target=module.rke2-first-server.aws_security_group.sg_allowall -auto-approve ; terraform apply -auto-approve ; terraform apply -target=module.rancher_install -auto-approve ``` -- Destroy the resources when cluster is no more needed. +- Destroy the resources when finished ```bash -terraform destroy +terraform destroy -target=module.rancher_install -auto-approve ; terraform destroy -auto-approve ``` -**IMPORTANT**: Please retire the services which are deployed using these terraform modules within 48 hours. Soon there will be automation to retire the service automatically after 48 hours but till that is in place it will be the users responsibility to not keep it running more than 48 hours. - -### Notes - -The user data automatically sets up each node for use with kubectl (also alias to k) and crictl when logged in. See full argument list for each module in use: - - [AWS](../../../../modules/infra/aws) - - [RKE2](../../../../modules/distribution/rke2) - - [Rancher](../../../../modules/rancher) - -### Known Issues -- Terraform plan shows below warnings which can be ignored: - -```bash -Warning: Value for undeclared variable - -The root module does not declare a variable named "ssh_private_key_path" but a value was found in file "terraform.tfvars". If you meant to use this value, add a "variable" block to the configuration. - -Invalid attribute in provider configuration - -with module.rancher_install.provider["registry.terraform.io/hashicorp/kubernetes"], -on ../../../../modules/rancher/provider.tf line 7, in provider "kubernetes": -7: provider "kubernetes" { -``` -- Terraform apply shows below warnings and errors. Please rerun terraform apply again, and it will be successful[(Issue #22)](#22). - -```bash -Warning: - -Helm release "rancher" was created but has a failed status. Use the `helm` command to investigate the error, correct it, then run Terraform again. - -Error: 1 error occurred: -* Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": failed to call webhook: Post "https://rke2-ingress-nginx-controller-admission.kube-system.svc:443/networking/v1/ingresses?timeout=10s": no endpoints available for service "rke2-ingress-nginx-controller-admission" -``` + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - RKE2: https://github.com/rancher/tf-rancher-up/tree/main/modules/distribution/rke2 + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/recipes/upstream/aws/rke2/docs.md b/recipes/upstream/aws/rke2/docs.md index 36b24c76..0801c42e 100644 --- a/recipes/upstream/aws/rke2/docs.md +++ b/recipes/upstream/aws/rke2/docs.md @@ -2,6 +2,10 @@ | Name | Version | |------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | | [ssh](#requirement\_ssh) | 2.6.0 | ## Providers @@ -9,6 +13,7 @@ | Name | Version | |------|---------| | [local](#provider\_local) | n/a | +| [null](#provider\_null) | n/a | | [ssh](#provider\_ssh) | 2.6.0 | ## Modules @@ -16,49 +21,50 @@ | Name | Source | Version | |------|--------|---------| | [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | -| [rke2\_additional](#module\_rke2\_additional) | ../../../../modules/distribution/rke2 | n/a | -| [rke2\_additional\_servers](#module\_rke2\_additional\_servers) | ../../../../modules/infra/aws | n/a | -| [rke2\_first](#module\_rke2\_first) | ../../../../modules/distribution/rke2 | n/a | -| [rke2\_first\_server](#module\_rke2\_first\_server) | ../../../../modules/infra/aws | n/a | +| [rke2-additional](#module\_rke2-additional) | ../../../../modules/distribution/rke2 | n/a | +| [rke2-additional-servers](#module\_rke2-additional-servers) | ../../../../modules/infra/aws/ec2 | n/a | +| [rke2-first](#module\_rke2-first) | ../../../../modules/distribution/rke2 | n/a | +| [rke2-first-server](#module\_rke2-first-server) | ../../../../modules/infra/aws/ec2 | n/a | ## Resources | Name | Type | |------|------| -| [local_file.kube_config_yaml](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [local_file.kube_config_yaml_backup](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [ssh_resource.retrieve_kubeconfig](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource | +| [local_file.kube-config-yaml](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.kube-config-yaml-backup](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [null_resource.wait-k8s-services-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [ssh_resource.retrieve-kubeconfig](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource | | [local_file.ssh_private_key](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [aws\_access\_key](#input\_aws\_access\_key) | AWS access key used to create infrastructure | `string` | `null` | no | | [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | -| [aws\_secret\_key](#input\_aws\_secret\_key) | AWS secret key used to create AWS infrastructure | `string` | `null` | no | -| [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `null` | no | -| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `null` | no | -| [instance\_count](#input\_instance\_count) | Number of EC2 instances to create | `number` | `null` | no | -| [instance\_disk\_size](#input\_instance\_disk\_size) | Specify root disk size (GB) | `string` | `null` | no | -| [instance\_security\_group](#input\_instance\_security\_group) | Provide a pre-existing security group ID | `string` | `null` | no | -| [instance\_type](#input\_instance\_type) | Instance type used for all EC2 instances | `string` | `null` | no | +| [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | +| [create\_security\_group](#input\_create\_security\_group) | n/a | `any` | `null` | no | +| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | +| [instance\_count](#input\_instance\_count) | n/a | `any` | n/a | yes | +| [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | n/a | `any` | `null` | no | | [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | | [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | -| [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | `null` | no | -| [rancher\_bootstrap\_password](#input\_rancher\_bootstrap\_password) | Password to use for bootstrapping Rancher (min 12 characters) | `string` | `"initial-admin-password"` | no | -| [rancher\_password](#input\_rancher\_password) | Password to use for Rancher (min 12 characters) | `string` | `null` | no | -| [rancher\_replicas](#input\_rancher\_replicas) | Value for replicas when installing the Rancher helm chart | `number` | `3` | no | +| [prefix](#input\_prefix) | n/a | `any` | n/a | yes | +| [rancher\_hostname](#input\_rancher\_hostname) | n/a | `any` | n/a | yes | +| [rancher\_ingress\_class\_name](#input\_rancher\_ingress\_class\_name) | Rancher ingressClassName value | `string` | `"nginx"` | no | +| [rancher\_password](#input\_rancher\_password) | n/a | `string` | n/a | yes | +| [rancher\_service\_type](#input\_rancher\_service\_type) | Rancher serviceType value | `string` | `"ClusterIP"` | no | | [rancher\_version](#input\_rancher\_version) | Rancher version to install | `string` | `null` | no | | [rke2\_config](#input\_rke2\_config) | Additional RKE2 configuration to add to the config.yaml file | `any` | `null` | no | | [rke2\_token](#input\_rke2\_token) | Token to use when configuring RKE2 nodes | `any` | `null` | no | | [rke2\_version](#input\_rke2\_version) | Kubernetes version to use for the RKE2 cluster | `string` | `null` | no | -| [spot\_instances](#input\_spot\_instances) | Use spot instances | `bool` | `null` | no | -| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | Specify the SSH key name to use (that's already present in AWS) | `string` | `null` | no | -| [ssh\_key\_pair\_path](#input\_ssh\_key\_pair\_path) | Path to the SSH private key used as the key pair (that's already present in AWS) | `string` | `null` | no | -| [ssh\_username](#input\_ssh\_username) | Username used for SSH with sudo access | `string` | `"ubuntu"` | no | -| [subnet\_id](#input\_subnet\_id) | VPC Subnet ID to create the instance(s) in | `string` | `null` | no | -| [wait](#input\_wait) | An optional wait before installing the Rancher helm chart | `string` | `"20s"` | no | +| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | n/a | `any` | `null` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `any` | n/a | yes | +| [subnet\_id](#input\_subnet\_id) | n/a | `any` | `null` | no | +| [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | +| [vpc\_id](#input\_vpc\_id) | n/a | `any` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | Waiting time (in seconds) | `number` | `180` | no | ## Outputs @@ -66,7 +72,8 @@ |------|-------------| | [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | | [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | -| [rancher\_admin\_token](#output\_rancher\_admin\_token) | Rancher API token for the admin user | -| [rancher\_hostname](#output\_rancher\_hostname) | n/a | -| [rancher\_password](#output\_rancher\_password) | n/a | -| [rancher\_url](#output\_rancher\_url) | n/a | +| [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | +| [rancher\_url](#output\_rancher\_url) | Rancher URL | +| [security\_group](#output\_security\_group) | n/a | +| [subnet](#output\_subnet) | n/a | +| [vpc](#output\_vpc) | n/a | diff --git a/recipes/upstream/aws/rke2/main.tf b/recipes/upstream/aws/rke2/main.tf index b4c6cc0a..be1aa4bc 100644 --- a/recipes/upstream/aws/rke2/main.tf +++ b/recipes/upstream/aws/rke2/main.tf @@ -1,99 +1,121 @@ locals { - kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd - kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" - kc_file_backup = "${local.kc_file}.backup" + create_ssh_key_pair = var.create_ssh_key_pair == null ? false : true + ssh_key_pair_name = var.ssh_key_pair_name == null ? "tf-rancher-up-${var.prefix}" : var.ssh_key_pair_name + local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path + local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + vpc_id = var.vpc_id == null ? module.rke2-first-server.vpc[0].id : var.vpc_id + subnet_id = var.subnet_id == null ? module.rke2-first-server.subnet[0].id : var.subnet_id + create_security_group = var.create_security_group == null ? false : true + instance_security_group_id = local.create_security_group == "true" ? null : module.rke2-first-server.security_group[0].id + kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd + kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" + kc_file_backup = "${local.kc_file}.backup" } -module "rke2_first" { +module "rke2-first" { source = "../../../../modules/distribution/rke2" rke2_token = var.rke2_token rke2_version = var.rke2_version rke2_config = var.rke2_config } -module "rke2_first_server" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = 1 - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = var.create_ssh_key_pair - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - ssh_username = var.ssh_username - spot_instances = var.spot_instances - aws_region = var.aws_region - create_security_group = var.create_security_group - instance_security_group = var.ssh_key_pair_name - subnet_id = var.subnet_id - user_data = module.rke2_first.rke2_user_data +module "rke2-first-server" { + source = "../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + # create_ssh_key_pair = var.create_ssh_key_pair + # ssh_key_pair_name = var.ssh_key_pair_name + # ssh_private_key_path = var.ssh_private_key_path + # ssh_public_key_path = var.ssh_public_key_path + # vpc_id = var.vpc_id + # subnet_id = var.subnet_id + # create_security_group = var.create_security_group + instance_count = 1 + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + # instance_security_group_id = var.instance_security_group_id + ssh_username = var.ssh_username + user_data = module.rke2-first.rke2_user_data } -module "rke2_additional" { +module "rke2-additional" { source = "../../../../modules/distribution/rke2" - rke2_token = module.rke2_first.rke2_token + rke2_token = module.rke2-first.rke2_token rke2_version = var.rke2_version rke2_config = var.rke2_config - first_server_ip = module.rke2_first_server.instances_private_ip[0] + first_server_ip = module.rke2-first-server.instances_private_ip[0] } -module "rke2_additional_servers" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = var.instance_count - 1 - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = false - ssh_key_pair_name = module.rke2_first_server.ssh_key_pair_name - ssh_key_pair_path = module.rke2_first_server.ssh_key_path - ssh_username = var.ssh_username - spot_instances = var.spot_instances - tag_begin = 2 - aws_region = var.aws_region - create_security_group = false - instance_security_group = module.rke2_first_server.sg-id - subnet_id = var.subnet_id - user_data = module.rke2_additional.rke2_user_data +module "rke2-additional-servers" { + source = "../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + create_ssh_key_pair = local.create_ssh_key_pair + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path + vpc_id = local.vpc_id + subnet_id = local.subnet_id + create_security_group = local.create_security_group + instance_count = var.instance_count + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + instance_security_group_id = local.instance_security_group_id + ssh_username = var.ssh_username + user_data = module.rke2-additional.rke2_user_data } data "local_file" "ssh_private_key" { - depends_on = [module.rke2_first_server] - filename = module.rke2_first_server.ssh_key_path + depends_on = [module.rke2-first-server] + filename = local.local_ssh_private_key_path } -resource "ssh_resource" "retrieve_kubeconfig" { - host = module.rke2_first_server.instances_public_ip[0] +resource "ssh_resource" "retrieve-kubeconfig" { + host = module.rke2-first-server.instances_public_ip[0] commands = [ - "sudo sed 's/127.0.0.1/${module.rke2_first_server.instances_public_ip[0]}/g' /etc/rancher/rke2/rke2.yaml" + "sudo sed 's/127.0.0.1/${module.rke2-first-server.instances_public_ip[0]}/g' /etc/rancher/rke2/rke2.yaml" ] user = var.ssh_username private_key = data.local_file.ssh_private_key.content } -resource "local_file" "kube_config_yaml" { +resource "local_file" "kube-config-yaml" { filename = local.kc_file - content = ssh_resource.retrieve_kubeconfig.result file_permission = "0600" + content = ssh_resource.retrieve-kubeconfig.result } -resource "local_file" "kube_config_yaml_backup" { +resource "local_file" "kube-config-yaml-backup" { filename = local.kc_file_backup - content = ssh_resource.retrieve_kubeconfig.result file_permission = "0600" + content = ssh_resource.retrieve-kubeconfig.result +} + +resource "null_resource" "wait-k8s-services-startup" { + depends_on = [module.rke2-additional-servers] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } } locals { - rancher_hostname = join(".", ["rancher", module.rke2_first_server.instances_public_ip[0], "sslip.io"]) + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.rke2-first-server.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.rke2-first-server.instances_public_ip[0], "sslip.io"]) } module "rancher_install" { source = "../../../../modules/rancher" - dependency = var.instance_count > 1 ? module.rke2_additional_servers.dependency : module.rke2_first_server.dependency - kubeconfig_file = local_file.kube_config_yaml.filename + dependency = [null_resource.wait-k8s-services-startup] + kubeconfig_file = local.kc_file rancher_hostname = local.rancher_hostname - rancher_replicas = min(var.rancher_replicas, var.instance_count) - rancher_bootstrap_password = var.rancher_bootstrap_password + rancher_bootstrap_password = var.rancher_password rancher_password = var.rancher_password + bootstrap_rancher = var.bootstrap_rancher rancher_version = var.rancher_version - wait = var.wait + rancher_additional_helm_values = [ + "replicas: ${var.instance_count}", + "ingress.ingressClassName: ${var.rancher_ingress_class_name}", + "service.type: ${var.rancher_service_type}" + ] } diff --git a/recipes/upstream/aws/rke2/outputs.tf b/recipes/upstream/aws/rke2/outputs.tf index 25659cfc..a85d4257 100644 --- a/recipes/upstream/aws/rke2/outputs.tf +++ b/recipes/upstream/aws/rke2/outputs.tf @@ -1,25 +1,39 @@ +output "instances_private_ip" { + value = concat([module.rke2-first-server.instances_private_ip], [module.rke2-additional-servers.instances_private_ip]) +} + output "instances_public_ip" { - value = concat([module.rke2_first_server.instances_public_ip], [module.rke2_additional_servers.instances_public_ip]) + value = concat([module.rke2-first-server.instances_public_ip], [module.rke2-additional-servers.instances_public_ip]) } -output "instances_private_ip" { - value = concat([module.rke2_first_server.instances_private_ip], [module.rke2_additional_servers.instances_private_ip]) +output "vpc" { + value = module.rke2-first-server.vpc[0].id } -output "rancher_hostname" { - value = local.rancher_hostname +output "subnet" { + value = module.rke2-first-server.subnet[0].id } -output "rancher_url" { - value = "https://${local.rancher_hostname}" +output "security_group" { + value = module.rke2-first-server.security_group[0].id } -output "rancher_password" { - value = var.rancher_bootstrap_password +# Uncomment for debugging purposes +#output "rke2_first_server_config_file" { +# value = nonsensitive(module.rke2-first.rke2_user_data) +#} + +# Uncomment for debugging purposes +#output "rke2_additional_servers_config_file" { +# value = nonsensitive(module.rke2-additional.rke2_user_data) +#} + +output "rancher_url" { + description = "Rancher URL" + value = "https://${module.rancher_install.rancher_hostname}" } -output "rancher_admin_token" { - description = "Rancher API token for the admin user" - value = module.rancher_install.rancher_admin_token - sensitive = true +output "rancher_password" { + description = "Rancher Initial Custom Password" + value = var.rancher_password } diff --git a/recipes/upstream/aws/rke2/provider.tf b/recipes/upstream/aws/rke2/provider.tf index 6997a762..8e915083 100644 --- a/recipes/upstream/aws/rke2/provider.tf +++ b/recipes/upstream/aws/rke2/provider.tf @@ -1,8 +1,36 @@ terraform { required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + ssh = { source = "loafoe/ssh" version = "2.6.0" } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } } -} \ No newline at end of file + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/recipes/upstream/aws/rke2/terraform.tfvars.example b/recipes/upstream/aws/rke2/terraform.tfvars.example index f084ca75..3b85cf4e 100644 --- a/recipes/upstream/aws/rke2/terraform.tfvars.example +++ b/recipes/upstream/aws/rke2/terraform.tfvars.example @@ -1,51 +1,99 @@ ###### !! Required variables !! ###### -## -- Terraform will use the default ~/.aws/credentials file or environment variables to determine the access/secret keys. Uncomment the below only if necessary. -# aws_access_key = "ACCESS_KEY_HERE" -# aws_secret_key = "SECRET_KEY_HERE" +## -- The prefix used in front of all AWS resources +prefix = "" -## -- AWS region to create the resources, uncomment one or adjust as needed -# aws_region = "us-east-1" # US, Virginia -# aws_region = "us-west-2" # US, Oregon -# aws_region = "eu-west-1" # EU, Ireland -# aws_region = "eu-west-1" # EU, Frankfurt -# aws_region = "ap-southeast-2" # AU, Sydney -# aws_region = "ap-south-1" # IN, Mumbai +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = -## -- Set the prefix for the name tag on instancrease created. A default prefix (rancher-terraform) if not provided. -prefix = "my-name-here" +## -- AWS Region to create the resources +aws_region = "" -###### !! Optional variables !! ###### +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html -## -- Password to set when installing Rancher, otherwise use default (initial-admin-password) -# rancher_password = "at-least-12-characters" +## -- Specify if a new SSH key pair needs to be created for the instances +# create_ssh_key_pair = true -## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository -# rancher_version = "2.7.3" +## -- If you want to use an existing key pair, specify its name +# ssh_key_pair_name = null + +#Ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html + +## -- The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_private_key_path = null + +## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_public_key_path = null + +## -- Range of private IPs available for the AWS VPC +# vpc_ip_cidr_range = "10.0.0.0/16" -## -- Override the default k8s version used by RKE2 -# rke2_version = "v1.25.10+rke2r1" +## -- AWS VPC used for all resources +# vpc_id = null -## -- Number and type of EC2 instances to launch -instance_count = 1 +## -- AWS Subnet used for all resources +# subnet_id = null + +## -- AWS Security Group used for all resources +# create_security_group = null + +#Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html + +## -- The number of nodes +instance_count = 3 + +## -- The name of a AWS EC2 machine type # instance_type = "t3.medium" ## -- Use spot instances # spot_instances = false +## -- Size of the disk attached to each node, specified in GB +# instance_disk_size = 80 + +## -- If the Security Group was created manually, it can be entered here +# instance_security_group_id = null + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" + +## -- Script that will run when the VMs start +# user_data = "" + +## -- Waiting time (in seconds) +# waiting_time = 180 + +## -- RKE2 version +# rke2_version = "v1.28.3+rke2r2" + ## -- RKE2 token, override the programmatically generated token # rke2_token = "string here" -##### SSH -## -- (A) Create a new keypair in AWS -create_ssh_key_pair = true -## -- Override the default (./${prefix}_ssh_private_key.pem) path where this SSH key is written -# ssh_private_key_path = "/path/to/private/key.pem" +## -- RKE2 custom config file +# rke2_config = "" + +## -- RKE2 KUBECONFIG file path +# kube_config_path = "" + +## -- RKE2 KUBECONFIG file +# kube_config_filename = "" + +## -- Bootstrap the Rancher installation +# bootstrap_rancher = false + +## -- Hostname to set when installing Rancher +rancher_hostname = "rancher" + +## -- Password to set when installing Rancher +rancher_password = "at-least-12-characters" + +## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository +# rancher_version = "2.7.3" -## -- (B) Provide an existing keypair name in AWS to use for nodes, the matching private key file for this keypair also must be provided so RKE can SSH to the launched nodes -# ssh_key_pair_name = "aws_keypair_name" -# ssh_key_pair_path = "/path/to/private/key.pem" -##### +## -- Rancher ingressClassName value +# rancher_ingress_class_name = "nginx" -## -- Override the default (${prefix}_kube_config.yml) kubeconfig file/path -# kube_config_path = "~/.kube/rancher-terraform.yml" +## -- Rancher serviceType value +# rancher_service_type = "ClusterIP" diff --git a/recipes/upstream/aws/rke2/variables.tf b/recipes/upstream/aws/rke2/variables.tf index 3e27b687..e4263217 100644 --- a/recipes/upstream/aws/rke2/variables.tf +++ b/recipes/upstream/aws/rke2/variables.tf @@ -1,14 +1,10 @@ -variable "aws_access_key" { - type = string - description = "AWS access key used to create infrastructure" - default = null -} +variable "prefix" {} -variable "aws_secret_key" { - type = string - description = "AWS secret key used to create AWS infrastructure" - default = null -} +# variable "aws_access_key" {} + +# variable "aws_secret_key" {} + +# variable "aws_session_token" {} variable "aws_region" { type = string @@ -51,30 +47,60 @@ variable "aws_region" { } } -variable "prefix" { - type = string - description = "Prefix added to names of all resources" - default = null +variable "create_ssh_key_pair" { + default = null } -variable "instance_count" { - type = number - description = "Number of EC2 instances to create" - default = null +variable "ssh_key_pair_name" { + default = null } -variable "instance_type" { - type = string - description = "Instance type used for all EC2 instances" - default = null +variable "ssh_private_key_path" { + default = null } -variable "instance_disk_size" { - type = string - description = "Specify root disk size (GB)" +variable "ssh_public_key_path" { + default = null +} + +# variable "vpc_ip_cidr_range" {} + +variable "vpc_id" { + default = null +} + +variable "subnet_id" { + default = null +} + +variable "create_security_group" { + default = null +} + +variable "instance_count" {} + +# variable "instance_type" {} + +# variable "spot_instances" {} + +# variable "instance_disk_size" {} + +variable "instance_security_group_id" { + default = null +} + +variable "ssh_username" {} + +variable "user_data" { + description = "User data content for EC2 instance(s)" default = null } +variable "waiting_time" { + description = "Waiting time (in seconds)" + default = 180 +} + variable "rke2_version" { type = string description = "Kubernetes version to use for the RKE2 cluster" @@ -103,85 +129,35 @@ variable "kube_config_filename" { default = null } -variable "rancher_bootstrap_password" { - description = "Password to use for bootstrapping Rancher (min 12 characters)" - default = "initial-admin-password" - type = string +variable "bootstrap_rancher" { + description = "Bootstrap the Rancher installation" + type = bool + default = true } +variable "rancher_hostname" {} + variable "rancher_password" { - description = "Password to use for Rancher (min 12 characters)" - default = null - type = string + type = string validation { condition = length(var.rancher_password) >= 12 - error_message = "The password provided for Rancher (rancher_password) must be at least 12 characters" + error_message = "The password must be at least 12 characters." } } variable "rancher_version" { description = "Rancher version to install" - default = null - type = string -} - -variable "rancher_replicas" { - description = "Value for replicas when installing the Rancher helm chart" - default = 3 - type = number -} - -variable "create_ssh_key_pair" { - type = bool - description = "Specify if a new SSH key pair needs to be created for the instances" - default = null -} - -variable "ssh_key_pair_name" { - type = string - description = "Specify the SSH key name to use (that's already present in AWS)" - default = null -} - -variable "ssh_key_pair_path" { type = string - description = "Path to the SSH private key used as the key pair (that's already present in AWS)" default = null } -variable "ssh_username" { - type = string - description = "Username used for SSH with sudo access" - default = "ubuntu" -} - -variable "spot_instances" { - type = bool - description = "Use spot instances" - default = null -} - -variable "subnet_id" { - type = string - description = "VPC Subnet ID to create the instance(s) in" - default = null -} - -variable "create_security_group" { - type = bool - description = "Should create the security group associated with the instance(s)" - default = null -} - -# TODO: Add a check based on above value -variable "instance_security_group" { - type = string - description = "Provide a pre-existing security group ID" - default = null +variable "rancher_ingress_class_name" { + description = "Rancher ingressClassName value" + default = "nginx" } -variable "wait" { - description = "An optional wait before installing the Rancher helm chart" - default = "20s" +variable "rancher_service_type" { + description = "Rancher serviceType value" + default = "ClusterIP" } From 03c6a34bd4b97f3f9a95822c4b36e4564584b59f Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Fri, 28 Jun 2024 17:08:02 +0200 Subject: [PATCH 05/35] Fixed code in path recipes/rke/split-roles/aws - Added Rancher deployment --- recipes/rke/split-roles/aws/README.md | 2 +- recipes/rke/split-roles/aws/docs.md | 11 +++---- recipes/rke/split-roles/aws/main.tf | 30 +++++++++++++++---- recipes/rke/split-roles/aws/outputs.tf | 26 ++++++++-------- .../split-roles/aws/terraform.tfvars.example | 10 +++---- recipes/rke/split-roles/aws/variables.tf | 14 ++++++++- 6 files changed, 64 insertions(+), 29 deletions(-) diff --git a/recipes/rke/split-roles/aws/README.md b/recipes/rke/split-roles/aws/README.md index ba37426a..0a90c9b5 100644 --- a/recipes/rke/split-roles/aws/README.md +++ b/recipes/rke/split-roles/aws/README.md @@ -31,7 +31,7 @@ terraform init -upgrade ; terraform apply -target=module.aws-ec2-upstream-master - Destroy the resources when finished ```bash -terraform destroy -auto-approve +terraform destroy -target=module.rancher_install -auto-approve ; terraform destroy -auto-approve ``` See full argument list for each module in use: diff --git a/recipes/rke/split-roles/aws/docs.md b/recipes/rke/split-roles/aws/docs.md index ed7c80b4..75c5341d 100644 --- a/recipes/rke/split-roles/aws/docs.md +++ b/recipes/rke/split-roles/aws/docs.md @@ -20,6 +20,7 @@ |------|--------|---------| | [aws-ec2-upstream-master-nodes](#module\_aws-ec2-upstream-master-nodes) | ../../../../modules/infra/aws/ec2 | n/a | | [aws-ec2-upstream-worker-nodes](#module\_aws-ec2-upstream-worker-nodes) | ../../../../modules/infra/aws/ec2 | n/a | +| [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | | [rke](#module\_rke) | ../../../../modules/distribution/rke | n/a | ## Resources @@ -42,11 +43,11 @@ | [ingress\_provider](#input\_ingress\_provider) | Ingress controller provider | `string` | `"nginx"` | no | | [install\_docker](#input\_install\_docker) | Install Docker while creating the instances | `bool` | `true` | no | | [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | n/a | `any` | `null` | no | -| [master\_nodes\_count](#input\_master\_nodes\_count) | n/a | `any` | n/a | yes | | [prefix](#input\_prefix) | n/a | `any` | n/a | yes | | [rancher\_hostname](#input\_rancher\_hostname) | n/a | `any` | n/a | yes | | [rancher\_password](#input\_rancher\_password) | n/a | `string` | n/a | yes | | [rancher\_version](#input\_rancher\_version) | Rancher version to install | `string` | `null` | no | +| [server\_nodes\_count](#input\_server\_nodes\_count) | The number of Server nodes | `number` | `3` | no | | [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | n/a | `any` | `null` | no | | [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | | [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | n/a | `any` | `null` | no | @@ -61,10 +62,10 @@ | Name | Description | |------|-------------| -| [master\_instances\_private\_ip](#output\_master\_instances\_private\_ip) | n/a | -| [master\_instances\_public\_ip](#output\_master\_instances\_public\_ip) | n/a | +| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | +| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | +| [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | +| [rancher\_url](#output\_rancher\_url) | Rancher URL | | [security\_group](#output\_security\_group) | n/a | | [subnet](#output\_subnet) | n/a | | [vpc](#output\_vpc) | n/a | -| [worker\_instances\_private\_ip](#output\_worker\_instances\_private\_ip) | n/a | -| [worker\_instances\_public\_ip](#output\_worker\_instances\_public\_ip) | n/a | diff --git a/recipes/rke/split-roles/aws/main.tf b/recipes/rke/split-roles/aws/main.tf index 7691fa2a..4c837074 100644 --- a/recipes/rke/split-roles/aws/main.tf +++ b/recipes/rke/split-roles/aws/main.tf @@ -13,7 +13,7 @@ module "aws-ec2-upstream-master-nodes" { source = "../../../../modules/infra/aws/ec2" prefix = var.prefix aws_region = var.aws_region - instance_count = var.master_nodes_count + instance_count = var.server_nodes_count ssh_username = var.ssh_username user_data = templatefile("${path.module}/user_data.tmpl", { @@ -63,14 +63,14 @@ resource "null_resource" "wait-docker-startup-w" { locals { ssh_private_key_path = var.ssh_private_key_path != null ? var.ssh_private_key_path : "${path.cwd}/${var.prefix}-ssh_private_key.pem" - master_nodes = [for instance_ips in module.aws-ec2-upstream-master-nodes.instance_ips : + server_nodes = [for instance_ips in module.aws-ec2-upstream-master-nodes.instance_ips : { public_ip = instance_ips.public_ip, private_ip = instance_ips.private_ip, roles = ["etcd", "controlplane"], ssh_key_path = local.ssh_private_key_path, ssh_key = null, - hostname_override = instance_ips.private_dns + hostname_override = null } ] worker_nodes = [for instance_ips in module.aws-ec2-upstream-worker-nodes.instance_ips : @@ -80,7 +80,7 @@ locals { roles = ["worker"], ssh_key_path = local.ssh_private_key_path, ssh_key = null, - hostname_override = instance_ips.private_dns + hostname_override = null } ] } @@ -90,8 +90,9 @@ module "rke" { prefix = var.prefix ssh_private_key_path = local.ssh_private_key_path node_username = var.ssh_username + ingress_provider = var.ingress_provider - rancher_nodes = concat(local.master_nodes, local.worker_nodes) + rancher_nodes = concat(local.server_nodes, local.worker_nodes) } resource "null_resource" "wait-k8s-services-startup" { @@ -100,3 +101,22 @@ resource "null_resource" "wait-k8s-services-startup" { command = "sleep ${var.waiting_time}" } } + +locals { + kubeconfig_file = "${path.cwd}/${var.prefix}_kube_config.yml" + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.aws-ec2-upstream-worker-nodes.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.aws-ec2-upstream-worker-nodes.instances_public_ip[0], "sslip.io"]) +} + +module "rancher_install" { + source = "../../../../modules/rancher" + dependency = [null_resource.wait-k8s-services-startup] + kubeconfig_file = local.kubeconfig_file + rancher_hostname = local.rancher_hostname + rancher_bootstrap_password = var.rancher_password + rancher_password = var.rancher_password + bootstrap_rancher = var.bootstrap_rancher + rancher_version = var.rancher_version + rancher_additional_helm_values = [ + "replicas: ${var.worker_nodes_count}" + ] +} diff --git a/recipes/rke/split-roles/aws/outputs.tf b/recipes/rke/split-roles/aws/outputs.tf index b8272b66..f992227d 100644 --- a/recipes/rke/split-roles/aws/outputs.tf +++ b/recipes/rke/split-roles/aws/outputs.tf @@ -1,17 +1,9 @@ -output "master_instances_public_ip" { - value = module.aws-ec2-upstream-master-nodes.instances_public_ip +output "instances_private_ip" { + value = concat([module.aws-ec2-upstream-master-nodes.instances_private_ip], [module.aws-ec2-upstream-worker-nodes.instances_private_ip]) } -output "master_instances_private_ip" { - value = module.aws-ec2-upstream-master-nodes.instances_private_ip -} - -output "worker_instances_public_ip" { - value = module.aws-ec2-upstream-worker-nodes.instances_public_ip -} - -output "worker_instances_private_ip" { - value = module.aws-ec2-upstream-worker-nodes.instances_private_ip +output "instances_public_ip" { + value = concat([module.aws-ec2-upstream-master-nodes.instances_public_ip], [module.aws-ec2-upstream-worker-nodes.instances_public_ip]) } output "vpc" { @@ -25,3 +17,13 @@ output "subnet" { output "security_group" { value = module.aws-ec2-upstream-master-nodes.security_group[0].id } + +output "rancher_url" { + description = "Rancher URL" + value = "https://${module.rancher_install.rancher_hostname}" +} + +output "rancher_password" { + description = "Rancher Initial Custom Password" + value = var.rancher_password +} diff --git a/recipes/rke/split-roles/aws/terraform.tfvars.example b/recipes/rke/split-roles/aws/terraform.tfvars.example index d0527b51..a1847526 100644 --- a/recipes/rke/split-roles/aws/terraform.tfvars.example +++ b/recipes/rke/split-roles/aws/terraform.tfvars.example @@ -41,14 +41,14 @@ aws_region = "" #Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html -## -- The number of Master nodes -master_nodes_count = 1 +## -- The number of Server nodes +server_nodes_count = 1 ## -- The number of Worker nodes worker_nodes_count = 1 ## -- Master nodes type -# master_nodes_type = "t3.medium" +# server_nodes_type = "t3.medium" ## -- Worker nodes type # worker_nodes_type = "t3.medium" @@ -57,7 +57,7 @@ worker_nodes_count = 1 # spot_instances = false ## -- Master nodes disk size (GB) -# master_nodes_disk_size = 80 +# server_nodes_disk_size = 80 ## -- Worker nodes disk size (GB) # worker_nodes_disk_size = 80 @@ -75,7 +75,7 @@ ssh_username = "ubuntu" # bastion_host = null ## -- IAM Instance Profile to assign to the Master nodes -# master_nodes_iam_instance_profile = null +# server_nodes_iam_instance_profile = null ## -- IAM Instance Profile to assign to the Worker nodes # worker_nodes_iam_instance_profile = null diff --git a/recipes/rke/split-roles/aws/variables.tf b/recipes/rke/split-roles/aws/variables.tf index 0aaf7084..b3017b3a 100644 --- a/recipes/rke/split-roles/aws/variables.tf +++ b/recipes/rke/split-roles/aws/variables.tf @@ -69,7 +69,19 @@ variable "create_security_group" { default = null } -variable "master_nodes_count" {} +variable "server_nodes_count" { + description = "The number of Server nodes" + default = 3 + + validation { + condition = contains([ + 1, + 3, + 5, + ], var.server_nodes_count) + error_message = "Invalid number of Server nodes specified! The value must be 1, 3 or 5 (ETCD quorum)." + } +} variable "worker_nodes_count" {} From feebbd57885c281dd1a819078b093ec0c9f63f73 Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Mon, 1 Jul 2024 15:08:46 +0200 Subject: [PATCH 06/35] Fixed AWS x RKE2 instances count --- recipes/upstream/aws/rke2/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipes/upstream/aws/rke2/main.tf b/recipes/upstream/aws/rke2/main.tf index be1aa4bc..84ae7825 100644 --- a/recipes/upstream/aws/rke2/main.tf +++ b/recipes/upstream/aws/rke2/main.tf @@ -58,7 +58,7 @@ module "rke2-additional-servers" { vpc_id = local.vpc_id subnet_id = local.subnet_id create_security_group = local.create_security_group - instance_count = var.instance_count + instance_count = var.instance_count - 1 # instance_type = var.instance_type # spot_instances = var.spot_instances # instance_disk_size = var.instance_disk_size From 9364d503f66a4e291c9d50b06f5faa4e91fdfa3d Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Tue, 2 Jul 2024 10:36:20 +0200 Subject: [PATCH 07/35] Rewrote AWS EC2 x K3S recipe --- recipes/rke/split-roles/aws/README.md | 4 +- recipes/upstream/aws/k3s/README.md | 83 ++------ recipes/upstream/aws/k3s/docs.md | 74 +++---- recipes/upstream/aws/k3s/main.tf | 181 ++++++++++-------- recipes/upstream/aws/k3s/main.tf_bkp | 121 ------------ recipes/upstream/aws/k3s/outputs.tf | 24 +-- recipes/upstream/aws/k3s/provider.tf | 30 ++- .../upstream/aws/k3s/terraform.tfvars.example | 122 ++++++++---- recipes/upstream/aws/k3s/variables.tf | 178 ++++++++--------- 9 files changed, 362 insertions(+), 455 deletions(-) delete mode 100644 recipes/upstream/aws/k3s/main.tf_bkp diff --git a/recipes/rke/split-roles/aws/README.md b/recipes/rke/split-roles/aws/README.md index 0a90c9b5..a7a1f8b1 100644 --- a/recipes/rke/split-roles/aws/README.md +++ b/recipes/rke/split-roles/aws/README.md @@ -16,8 +16,8 @@ cd recipes/rke/split-roles/aws - Update the required variables: - `prefix` to give the resources an identifiable name (eg, your initials or first name) - `aws_region` to suit your region - - `master_node_count` to specify the number of Master nodes to create - - `worker_node_count` to specify the number of Worker nodes to create + - `server_nodes_count` to specify the number of Master nodes to create + - `worker_nodes_count` to specify the number of Worker nodes to create - `ssh_username` to specify the user used to create the VMs (default "ubuntu") - `rancher_hostname` in order to reach the Rancher console via DNS name - `rancher_password` to configure the initial Admin password (the password must be at least 12 characters) diff --git a/recipes/upstream/aws/k3s/README.md b/recipes/upstream/aws/k3s/README.md index 427b01bc..e80dee31 100644 --- a/recipes/upstream/aws/k3s/README.md +++ b/recipes/upstream/aws/k3s/README.md @@ -1,6 +1,6 @@ -# Upstream | AWS | K3S +# Upstream | AWS | EC2 x RKE2 -This module is used to establish a Rancher (local) management cluster using AWS and K3S. +This module is used to establish a Rancher (local) management cluster using [AWS EC2](https://aws.amazon.com/ec2/) and [K3s](https://docs.k3s.io/). Documentation can be found [here](./docs.md). @@ -11,77 +11,30 @@ git clone https://github.com/rancherlabs/tf-rancher-up.git cd recipes/upstream/aws/k3s ``` -- Copy `terraform.tfvars.example` to `terraform.tfvars` -- Edit `terraform.tfvars` +- Copy `./terraform.tfvars.exmaple` to `./terraform.tfvars` +- Edit `./terraform.tfvars` - Update the required variables: - - `aws_region` to suit your region - - uncomment `instance_type` and change the instance type if needed. - `prefix` to give the resources an identifiable name (eg, your initials or first name) - - Recommended: `spot_instances` can be set to `true` to use spot instances -- Check your AWS credentials are configured in `~/.aws/credentials`, terraform will use these by default. Refer the [`aws configure`](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html#cli-configure-files-methods) command on how to do this. -- If you don't want to configure AWS credentials using `aws configure` in above step, uncomment `aws_access_key` and `aws_secret_key` in `terraform.tfvars` and input the required keys there. -- If an HA cluster need to be deployed, change the `instance_count` variable to 3 or more. -- There are more optional variables which can be tweaked under `terraform.tfvars`. - -**NOTE** you may need to use ` terraform init -upgrade` to upgrade provider versions - -Execute the below commands to start deployment. - -```bash -terraform init -terraform plan -var-file terraform.tfvars -terraform apply -var-file terraform.tfvars -``` -The login details will be displayed in the screen once the deployment is successful. It will have the details as below. - -```bash -rancher_hostname = "https://rancher..sslip.io" -rancher_password = "initial-admin-password" -``` + - `aws_region` to suit your region + - `server_nodes_count` to specify the number of Master nodes to create + - `worker_nodes_count` to specify the number of Worker nodes to create + - `ssh_username` to specify the user used to create the VMs (default "ubuntu") + - `rancher_hostname` in order to reach the Rancher console via DNS name + - `rancher_password` to configure the initial Admin password (the password must be at least 12 characters) +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../modules/infra/aws/README.md). -- If storing multiple AWS credentials in `~/.aws/credentials`, set the profile when running terraform. +**NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** ```bash -AWS_PROFILE= terraform plan -var-file terraform.tfvars -AWS_PROFILE= terraform apply -var-file terraform.tfvars +terraform init -upgrade ; terraform apply -target=module.k3s-first-server.tls_private_key.ssh_private_key -target=module.k3s-first-server.local_file.private_key_pem -target=module.k3s-first-server.local_file.public_key_pem -target=module.k3s-first-server.aws_key_pair.key_pair -target=module.k3s-first-server.aws_vpc.vpc -target=module.k3s-first-server.aws_subnet.subnet -target=module.k3s-first-server.aws_security_group.sg_allowall -auto-approve ; terraform apply -auto-approve ``` -- Destroy the resources when cluster is no more needed. +- Destroy the resources when finished ```bash -terraform destroy -var-file terraform.tfvars +terraform destroy -target=module.rancher_install -auto-approve ; terraform destroy -auto-approve ``` -**IMPORTANT**: Please retire the services which are deployed using these terraform modules within 48 hours. Soon there will be automation to retire the service automatically after 48 hours but till that is in place it will be the users responsibility to not keep it running more than 48 hours. - -### Notes - -The user data automatically sets up each node for use with kubectl (also alias to k) and crictl when logged in. See full argument list for each module in use: - - [AWS](../../../../modules/infra/aws) - - [K3S](../../../../modules/distribution/k3s) - - [Rancher](../../../../modules/rancher) - -### Known Issues -- Terraform plan shows below warnings which can be ignored: - -```bash -Warning: Value for undeclared variable - -The root module does not declare a variable named "ssh_private_key_path" but a value was found in file "terraform.tfvars". If you meant to use this value, add a "variable" block to the configuration. - -Invalid attribute in provider configuration - -with module.rancher_install.provider["registry.terraform.io/hashicorp/kubernetes"], -on ../../../../modules/rancher/provider.tf line 7, in provider "kubernetes": -7: provider "kubernetes" { -``` -- Terraform apply shows below warnings and errors. Please rerun the terraform apply again and it will be successful[(Issue #22)](#22). - -```bash -Warning: - -Helm release "rancher" was created but has a failed status. Use the `helm` command to investigate the error, correct it, then run Terraform again. - -Error: 1 error occurred: -* Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": failed to call webhook: Post "https://rke2-ingress-nginx-controller-admission.kube-system.svc:443/networking/v1/ingresses?timeout=10s": no endpoints available for service "rke2-ingress-nginx-controller-admission" -``` + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - K3s: https://github.com/rancherlabs/tf-rancher-up/tree/main/modules/distribution/k3s + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/recipes/upstream/aws/k3s/docs.md b/recipes/upstream/aws/k3s/docs.md index d85c7101..998163cb 100644 --- a/recipes/upstream/aws/k3s/docs.md +++ b/recipes/upstream/aws/k3s/docs.md @@ -2,6 +2,10 @@ | Name | Version | |------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | | [ssh](#requirement\_ssh) | 2.6.0 | ## Providers @@ -9,59 +13,61 @@ | Name | Version | |------|---------| | [local](#provider\_local) | n/a | +| [null](#provider\_null) | n/a | | [ssh](#provider\_ssh) | 2.6.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [k3s\_additional](#module\_k3s\_additional) | ../../../../modules/distribution/k3s | n/a | -| [k3s\_additional\_servers](#module\_k3s\_additional\_servers) | ../../../../modules/infra/aws | n/a | -| [k3s\_first](#module\_k3s\_first) | ../../../../modules/distribution/k3s | n/a | -| [k3s\_first\_server](#module\_k3s\_first\_server) | ../../../../modules/infra/aws | n/a | -| [k3s\_workers](#module\_k3s\_workers) | ../../../../modules/infra/aws | n/a | +| [k3s-additional](#module\_k3s-additional) | ../../../../modules/distribution/k3s | n/a | +| [k3s-additional-servers](#module\_k3s-additional-servers) | ../../../../modules/infra/aws/ec2 | n/a | +| [k3s-additional-workers](#module\_k3s-additional-workers) | ../../../../modules/infra/aws/ec2 | n/a | +| [k3s-first](#module\_k3s-first) | ../../../../modules/distribution/k3s | n/a | +| [k3s-first-server](#module\_k3s-first-server) | ../../../../modules/infra/aws/ec2 | n/a | | [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | ## Resources | Name | Type | |------|------| -| [local_file.kube_config_yaml](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [local_file.kube_config_yaml_backup](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [ssh_resource.retrieve_kubeconfig](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource | -| [local_file.ssh_private_key](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source | +| [local_file.kube-config-yaml](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.kube-config-yaml-backup](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [null_resource.wait-k8s-services-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [ssh_resource.retrieve-kubeconfig](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource | +| [local_file.ssh-private-key](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [aws\_access\_key](#input\_aws\_access\_key) | AWS access key used to create infrastructure | `string` | `null` | no | | [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | -| [aws\_secret\_key](#input\_aws\_secret\_key) | AWS secret key used to create AWS infrastructure | `string` | `null` | no | -| [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `null` | no | -| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `null` | no | -| [instance\_disk\_size](#input\_instance\_disk\_size) | Specify root disk size (GB) | `string` | `null` | no | -| [instance\_security\_group](#input\_instance\_security\_group) | Provide a pre-existing security group ID | `string` | `null` | no | -| [instance\_type](#input\_instance\_type) | Instance type used for all EC2 instances | `string` | `null` | no | +| [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | +| [create\_security\_group](#input\_create\_security\_group) | n/a | `any` | `null` | no | +| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | +| [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | n/a | `any` | `null` | no | | [k3s\_channel](#input\_k3s\_channel) | K3s channel to use, the latest patch version for the provided minor version will be used | `string` | `null` | no | -| [k3s\_config](#input\_k3s\_config) | Additional k3s configuration to add to the config.yaml file | `any` | `null` | no | -| [k3s\_token](#input\_k3s\_token) | Token to use when configuring k3s nodes | `any` | `null` | no | -| [k3s\_version](#input\_k3s\_version) | Kubernetes version to use for the k3s cluster | `string` | `null` | no | +| [k3s\_config](#input\_k3s\_config) | Additional RKE2 configuration to add to the config.yaml file | `any` | `null` | no | +| [k3s\_token](#input\_k3s\_token) | Token to use when configuring RKE2 nodes | `any` | `null` | no | +| [k3s\_version](#input\_k3s\_version) | Kubernetes version to use for the RKE2 cluster | `string` | `"v1.28.9+k3s1"` | no | | [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | | [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | -| [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | `null` | no | -| [rancher\_bootstrap\_password](#input\_rancher\_bootstrap\_password) | Password to use for bootstrapping Rancher (min 12 characters) | `string` | `"initial-admin-password"` | no | -| [rancher\_password](#input\_rancher\_password) | Password to use for Rancher (min 12 characters) | `string` | `null` | no | -| [rancher\_replicas](#input\_rancher\_replicas) | Value for replicas when installing the Rancher helm chart | `number` | `3` | no | +| [prefix](#input\_prefix) | n/a | `any` | n/a | yes | +| [rancher\_hostname](#input\_rancher\_hostname) | n/a | `any` | n/a | yes | +| [rancher\_ingress\_class\_name](#input\_rancher\_ingress\_class\_name) | Rancher ingressClassName value | `string` | `"traefik"` | no | +| [rancher\_password](#input\_rancher\_password) | n/a | `string` | n/a | yes | +| [rancher\_service\_type](#input\_rancher\_service\_type) | Rancher serviceType value | `string` | `"ClusterIP"` | no | | [rancher\_version](#input\_rancher\_version) | Rancher version to install | `string` | `null` | no | -| [server\_instance\_count](#input\_server\_instance\_count) | Number of server EC2 instances to create | `number` | `null` | no | -| [spot\_instances](#input\_spot\_instances) | Use spot instances | `bool` | `null` | no | -| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | Specify the SSH key name to use (that's already present in AWS) | `string` | `null` | no | -| [ssh\_key\_pair\_path](#input\_ssh\_key\_pair\_path) | Path to the SSH private key used as the key pair (that's already present in AWS) | `string` | `null` | no | -| [ssh\_username](#input\_ssh\_username) | Username used for SSH with sudo access | `string` | `"ubuntu"` | no | -| [subnet\_id](#input\_subnet\_id) | VPC Subnet ID to create the instance(s) in | `string` | `null` | no | -| [wait](#input\_wait) | An optional wait before installing the Rancher helm chart | `string` | `"20s"` | no | -| [worker\_instance\_count](#input\_worker\_instance\_count) | Number of worker EC2 instances to create | `number` | `null` | no | +| [server\_nodes\_count](#input\_server\_nodes\_count) | The number of Server nodes | `number` | `3` | no | +| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | n/a | `any` | `null` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `any` | n/a | yes | +| [subnet\_id](#input\_subnet\_id) | n/a | `any` | `null` | no | +| [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | +| [vpc\_id](#input\_vpc\_id) | n/a | `any` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | Waiting time (in seconds) | `number` | `180` | no | +| [worker\_nodes\_count](#input\_worker\_nodes\_count) | n/a | `any` | n/a | yes | ## Outputs @@ -69,7 +75,5 @@ |------|-------------| | [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | | [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | -| [rancher\_admin\_token](#output\_rancher\_admin\_token) | Rancher API token for the admin user | -| [rancher\_bootstrap\_password](#output\_rancher\_bootstrap\_password) | n/a | -| [rancher\_hostname](#output\_rancher\_hostname) | n/a | -| [rancher\_url](#output\_rancher\_url) | n/a | +| [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | +| [rancher\_url](#output\_rancher\_url) | Rancher URL | diff --git a/recipes/upstream/aws/k3s/main.tf b/recipes/upstream/aws/k3s/main.tf index 009517a1..123b84fa 100644 --- a/recipes/upstream/aws/k3s/main.tf +++ b/recipes/upstream/aws/k3s/main.tf @@ -1,18 +1,18 @@ -# Setup local variables locals { - vpc = var.vpc == null ? "${var.prefix}-vpc" : var.vpc - subnet = var.subnet == null ? "${var.prefix}-subnet" : var.subnet - create_firewall = var.create_firewall == null ? false : true - private_ssh_key_path = fileexists("${path.cwd}/${var.prefix}-ssh_private_key.pem") ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path - public_ssh_key_path = fileexists("${path.cwd}/${var.prefix}-ssh_public_key.pem") ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + create_ssh_key_pair = var.create_ssh_key_pair == null ? false : true + ssh_key_pair_name = var.ssh_key_pair_name == null ? "tf-rancher-up-${var.prefix}" : var.ssh_key_pair_name + local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path + local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + vpc_id = var.vpc_id == null ? module.k3s-first-server.vpc[0].id : var.vpc_id + subnet_id = var.subnet_id == null ? module.k3s-first-server.subnet[0].id : var.subnet_id + create_security_group = var.create_security_group == null ? false : true + instance_security_group_id = local.create_security_group == "true" ? null : module.k3s-first-server.security_group[0].id + kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd + kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" + kc_file_backup = "${local.kc_file}.backup" } - kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd - kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" - kc_file_backup = "${local.kc_file}.backup" -} - -module "k3s_first" { +module "k3s-first" { source = "../../../../modules/distribution/k3s" k3s_token = var.k3s_token k3s_version = var.k3s_version @@ -20,110 +20,125 @@ module "k3s_first" { k3s_config = var.k3s_config } -module "k3s_first_server" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = 1 - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = var.create_ssh_key_pair - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - ssh_username = var.ssh_username - spot_instances = var.spot_instances - aws_region = var.aws_region - create_security_group = var.create_security_group - instance_security_group = var.instance_security_group - subnet_id = var.subnet_id - user_data = module.k3s_first.k3s_server_user_data +module "k3s-first-server" { + source = "../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + # create_ssh_key_pair = var.create_ssh_key_pair + # ssh_key_pair_name = var.ssh_key_pair_name + # ssh_private_key_path = var.ssh_private_key_path + # ssh_public_key_path = var.ssh_public_key_path + # vpc_id = var.vpc_id + # subnet_id = var.subnet_id + # create_security_group = var.create_security_group + instance_count = 1 + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + # instance_security_group_id = var.instance_security_group_id + ssh_username = var.ssh_username + user_data = module.k3s-first.k3s_server_user_data } -module "k3s_additional" { +module "k3s-additional" { source = "../../../../modules/distribution/k3s" - k3s_token = module.k3s_first.k3s_token + k3s_token = module.k3s-first.k3s_token k3s_version = var.k3s_version k3s_channel = var.k3s_channel k3s_config = var.k3s_config - first_server_ip = module.k3s_first_server.instances_private_ip[0] + first_server_ip = module.k3s-first-server.instances_private_ip[0] } -module "k3s_additional_servers" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = var.server_instance_count - 1 - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = false - ssh_key_pair_name = module.k3s_first_server.ssh_key_pair_name - ssh_key_pair_path = pathexpand(module.k3s_first_server.ssh_key_path) - ssh_username = var.ssh_username - spot_instances = var.spot_instances - tag_begin = 2 - aws_region = var.aws_region - create_security_group = false - instance_security_group = module.k3s_first_server.sg-id - subnet_id = var.subnet_id - user_data = module.k3s_additional.k3s_server_user_data +module "k3s-additional-servers" { + source = "../../../../modules/infra/aws/ec2" + prefix = "${var.prefix}-additional-server" + aws_region = var.aws_region + create_ssh_key_pair = local.create_ssh_key_pair + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path + vpc_id = local.vpc_id + subnet_id = local.subnet_id + create_security_group = local.create_security_group + instance_count = var.server_nodes_count - 1 + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + instance_security_group_id = local.instance_security_group_id + ssh_username = var.ssh_username + user_data = module.k3s-additional.k3s_server_user_data } - -module "k3s_workers" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = var.worker_instance_count - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = false - ssh_key_pair_name = module.k3s_first_server.ssh_key_pair_name - ssh_key_pair_path = pathexpand(module.k3s_first_server.ssh_key_path) - ssh_username = var.ssh_username - spot_instances = var.spot_instances - aws_region = var.aws_region - create_security_group = false - instance_security_group = module.k3s_first_server.sg-id - subnet_id = var.subnet_id - user_data = module.k3s_additional.k3s_worker_user_data +module "k3s-additional-workers" { + source = "../../../../modules/infra/aws/ec2" + prefix = "${var.prefix}-worker" + aws_region = var.aws_region + create_ssh_key_pair = local.create_ssh_key_pair + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path + vpc_id = local.vpc_id + subnet_id = local.subnet_id + create_security_group = local.create_security_group + instance_count = var.worker_nodes_count + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + instance_security_group_id = local.instance_security_group_id + ssh_username = var.ssh_username + user_data = module.k3s-additional.k3s_worker_user_data } - -data "local_file" "ssh_private_key" { - depends_on = [module.k3s_first_server] - filename = pathexpand(module.k3s_first_server.ssh_key_path) +data "local_file" "ssh-private-key" { + depends_on = [module.k3s-additional-workers] + filename = local.local_ssh_private_key_path } -resource "ssh_resource" "retrieve_kubeconfig" { - host = module.k3s_first_server.instances_public_ip[0] +resource "ssh_resource" "retrieve-kubeconfig" { + host = module.k3s-first-server.instances_public_ip[0] commands = [ - "sudo sed 's/127.0.0.1/${module.k3s_first_server.instances_public_ip[0]}/g' /etc/rancher/k3s/k3s.yaml" + "sudo sed 's/127.0.0.1/${module.k3s-first-server.instances_public_ip[0]}/g' /etc/rancher/k3s/k3s.yaml" ] user = var.ssh_username - private_key = data.local_file.ssh_private_key.content + private_key = data.local_file.ssh-private-key.content + retry_delay = "60s" } -resource "local_file" "kube_config_yaml" { +resource "local_file" "kube-config-yaml" { filename = local.kc_file - content = ssh_resource.retrieve_kubeconfig.result file_permission = "0600" + content = ssh_resource.retrieve-kubeconfig.result } -resource "local_file" "kube_config_yaml_backup" { +resource "local_file" "kube-config-yaml-backup" { filename = local.kc_file_backup - content = ssh_resource.retrieve_kubeconfig.result file_permission = "0600" + content = ssh_resource.retrieve-kubeconfig.result +} + +resource "null_resource" "wait-k8s-services-startup" { + depends_on = [local_file.kube-config-yaml] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } } locals { - rancher_hostname = join(".", ["rancher", module.k3s_first_server.instances_public_ip[0], "sslip.io"]) + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.k3s-first-server.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.k3s-first-server.instances_public_ip[0], "sslip.io"]) } module "rancher_install" { source = "../../../../modules/rancher" - dependency = var.server_instance_count > 1 ? module.k3s_additional_servers.dependency : module.k3s_first_server.dependency - kubeconfig_file = local_file.kube_config_yaml.filename + dependency = [null_resource.wait-k8s-services-startup] + kubeconfig_file = local.kc_file rancher_hostname = local.rancher_hostname - rancher_replicas = min(var.rancher_replicas, var.server_instance_count) - rancher_bootstrap_password = var.rancher_bootstrap_password + rancher_bootstrap_password = var.rancher_password rancher_password = var.rancher_password + bootstrap_rancher = var.bootstrap_rancher rancher_version = var.rancher_version - wait = var.wait + rancher_additional_helm_values = [ + "replicas: ${var.worker_nodes_count}", + "ingress.ingressClassName: ${var.rancher_ingress_class_name}", + "service.type: ${var.rancher_service_type}" + ] } diff --git a/recipes/upstream/aws/k3s/main.tf_bkp b/recipes/upstream/aws/k3s/main.tf_bkp deleted file mode 100644 index c30b9afa..00000000 --- a/recipes/upstream/aws/k3s/main.tf_bkp +++ /dev/null @@ -1,121 +0,0 @@ -locals { - kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd - kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" - kc_file_backup = "${local.kc_file}.backup" -} - -module "k3s_first" { - source = "../../../../modules/distribution/k3s" - k3s_token = var.k3s_token - k3s_version = var.k3s_version - k3s_channel = var.k3s_channel - k3s_config = var.k3s_config -} - -module "k3s_first_server" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = 1 - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = var.create_ssh_key_pair - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - ssh_username = var.ssh_username - spot_instances = var.spot_instances - aws_region = var.aws_region - create_security_group = var.create_security_group - instance_security_group = var.instance_security_group - subnet_id = var.subnet_id - user_data = module.k3s_first.k3s_server_user_data -} - -module "k3s_additional" { - source = "../../../../modules/distribution/k3s" - k3s_token = module.k3s_first.k3s_token - k3s_version = var.k3s_version - k3s_channel = var.k3s_channel - k3s_config = var.k3s_config - first_server_ip = module.k3s_first_server.instances_private_ip[0] -} - -module "k3s_additional_servers" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = var.server_instance_count - 1 - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = false - ssh_key_pair_name = module.k3s_first_server.ssh_key_pair_name - ssh_key_pair_path = pathexpand(module.k3s_first_server.ssh_key_path) - ssh_username = var.ssh_username - spot_instances = var.spot_instances - tag_begin = 2 - aws_region = var.aws_region - create_security_group = false - instance_security_group = module.k3s_first_server.sg-id - subnet_id = var.subnet_id - user_data = module.k3s_additional.k3s_server_user_data -} - - -module "k3s_workers" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = var.worker_instance_count - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = false - ssh_key_pair_name = module.k3s_first_server.ssh_key_pair_name - ssh_key_pair_path = pathexpand(module.k3s_first_server.ssh_key_path) - ssh_username = var.ssh_username - spot_instances = var.spot_instances - aws_region = var.aws_region - create_security_group = false - instance_security_group = module.k3s_first_server.sg-id - subnet_id = var.subnet_id - user_data = module.k3s_additional.k3s_worker_user_data -} - - -data "local_file" "ssh_private_key" { - depends_on = [module.k3s_first_server] - filename = pathexpand(module.k3s_first_server.ssh_key_path) -} - -resource "ssh_resource" "retrieve_kubeconfig" { - host = module.k3s_first_server.instances_public_ip[0] - commands = [ - "sudo sed 's/127.0.0.1/${module.k3s_first_server.instances_public_ip[0]}/g' /etc/rancher/k3s/k3s.yaml" - ] - user = var.ssh_username - private_key = data.local_file.ssh_private_key.content -} - -resource "local_file" "kube_config_yaml" { - filename = local.kc_file - content = ssh_resource.retrieve_kubeconfig.result - file_permission = "0600" -} - -resource "local_file" "kube_config_yaml_backup" { - filename = local.kc_file_backup - content = ssh_resource.retrieve_kubeconfig.result - file_permission = "0600" -} - -locals { - rancher_hostname = join(".", ["rancher", module.k3s_first_server.instances_public_ip[0], "sslip.io"]) -} - -module "rancher_install" { - source = "../../../../modules/rancher" - dependency = var.server_instance_count > 1 ? module.k3s_additional_servers.dependency : module.k3s_first_server.dependency - kubeconfig_file = local_file.kube_config_yaml.filename - rancher_hostname = local.rancher_hostname - rancher_replicas = min(var.rancher_replicas, var.server_instance_count) - rancher_bootstrap_password = var.rancher_bootstrap_password - rancher_password = var.rancher_password - rancher_version = var.rancher_version - wait = var.wait -} diff --git a/recipes/upstream/aws/k3s/outputs.tf b/recipes/upstream/aws/k3s/outputs.tf index 5dd2766a..c21cc80f 100644 --- a/recipes/upstream/aws/k3s/outputs.tf +++ b/recipes/upstream/aws/k3s/outputs.tf @@ -1,25 +1,17 @@ -output "instances_public_ip" { - value = concat([module.k3s_first_server.instances_public_ip], [module.k3s_additional_servers.instances_public_ip]) -} - output "instances_private_ip" { - value = concat([module.k3s_first_server.instances_private_ip], [module.k3s_additional_servers.instances_private_ip]) + value = concat([module.k3s-first-server.instances_private_ip], [module.k3s-additional-servers.instances_private_ip]) } -output "rancher_hostname" { - value = local.rancher_hostname +output "instances_public_ip" { + value = concat([module.k3s-first-server.instances_public_ip], [module.k3s-additional-servers.instances_public_ip]) } output "rancher_url" { - value = "https://${local.rancher_hostname}" -} - -output "rancher_bootstrap_password" { - value = var.rancher_bootstrap_password + description = "Rancher URL" + value = "https://${module.rancher_install.rancher_hostname}" } -output "rancher_admin_token" { - description = "Rancher API token for the admin user" - value = module.rancher_install.rancher_admin_token - sensitive = true +output "rancher_password" { + description = "Rancher Initial Custom Password" + value = var.rancher_password } diff --git a/recipes/upstream/aws/k3s/provider.tf b/recipes/upstream/aws/k3s/provider.tf index 6997a762..8e915083 100644 --- a/recipes/upstream/aws/k3s/provider.tf +++ b/recipes/upstream/aws/k3s/provider.tf @@ -1,8 +1,36 @@ terraform { required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + ssh = { source = "loafoe/ssh" version = "2.6.0" } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } } -} \ No newline at end of file + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/recipes/upstream/aws/k3s/terraform.tfvars.example b/recipes/upstream/aws/k3s/terraform.tfvars.example index c73ad2a8..1ec29de6 100644 --- a/recipes/upstream/aws/k3s/terraform.tfvars.example +++ b/recipes/upstream/aws/k3s/terraform.tfvars.example @@ -1,53 +1,105 @@ ###### !! Required variables !! ###### -## -- Terraform will use the default ~/.aws/credentials file or environment variables to determine the access/secret keys. Uncomment the below only if necessary. -# aws_access_key = "ACCESS_KEY_HERE" -# aws_secret_key = "SECRET_KEY_HERE" +## -- The prefix used in front of all AWS resources +prefix = "" -## -- AWS region to create the resources, uncomment one or adjust as needed -# aws_region = "us-east-1" # US, Virginia -# aws_region = "us-west-2" # US, Oregon -# aws_region = "eu-west-1" # EU, Ireland -# aws_region = "eu-west-1" # EU, Frankfurt -# aws_region = "ap-southeast-2" # AU, Sydney -# aws_region = "ap-south-1" # IN, Mumbai +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = -## -- Set the prefix for the name tag on instancrease created. A default prefix (rancher-terraform) if not provided. -prefix = "my-name-here" +## -- AWS Region to create the resources +aws_region = "" -###### !! Optional variables !! ###### +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html -## -- Password to set when installing Rancher, otherwise use default (initial-admin-password) -# rancher_password = "at-least-12-characters" +## -- Specify if a new SSH key pair needs to be created for the instances +# create_ssh_key_pair = true -## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository -# rancher_version = "2.7.3" +## -- If you want to use an existing key pair, specify its name +# ssh_key_pair_name = null + +#Ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html + +## -- The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_private_key_path = null + +## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_public_key_path = null + +## -- Range of private IPs available for the AWS VPC +# vpc_ip_cidr_range = "10.0.0.0/16" + +## -- AWS VPC used for all resources +# vpc_id = null -## -- Override the default k8s version or channel used by K3S -# k3s_version = "v1.24.14+k3s1" -k3s_channel = "v1.25" +## -- AWS Subnet used for all resources +# subnet_id = null -## -- Number and type of EC2 instances to launch -server_instance_count = 1 -worker_instance_count = 1 +## -- AWS Security Group used for all resources +# create_security_group = null + +#Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html + +## -- The number of Server nodes +server_nodes_count = 1 + +## -- The number of Worker nodes +worker_nodes_count = 1 + +## -- The name of a AWS EC2 machine type # instance_type = "t3.medium" ## -- Use spot instances # spot_instances = false -## -- K3S token, override the programmatically generated token +## -- Size of the disk attached to each node, specified in GB +# instance_disk_size = 80 + +## -- If the Security Group was created manually, it can be entered here +# instance_security_group_id = null + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" + +## -- Script that will run when the VMs start +# user_data = "" + +## -- Waiting time (in seconds) +# waiting_time = 180 + +## -- RKE2 version +# k3s_version = "v1.28.3+k3sr2" + +## -- K3s channel +# k3s_channel = + +## -- RKE2 token, override the programmatically generated token # k3s_token = "string here" -##### SSH -## -- (A) Create a new keypair in AWS -create_ssh_key_pair = true -## -- Override the default (./${prefix}_ssh_private_key.pem) path where this SSH key is written -# ssh_private_key_path = "/path/to/private/key.pem" +## -- RKE2 custom config file +# k3s_config = "" + +## -- RKE2 KUBECONFIG file path +# kube_config_path = "" + +## -- RKE2 KUBECONFIG file +# kube_config_filename = "" + +## -- Bootstrap the Rancher installation +# bootstrap_rancher = false + +## -- Hostname to set when installing Rancher +rancher_hostname = "rancher" + +## -- Password to set when installing Rancher +rancher_password = "at-least-12-characters" + +## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository +# rancher_version = "2.7.3" -## -- (B) Provide an existing keypair name in AWS to use for nodes, the matching private key file for this keypair also must be provided so RKE can SSH to the launched nodes -# ssh_key_pair_name = "aws_keypair_name" -# ssh_key_pair_path = "/path/to/private/key.pem" -##### +## -- Rancher ingressClassName value +# rancher_ingress_class_name = "nginx" -## -- Override the default (${prefix}_kube_config.yml) kubeconfig file/path -# kube_config_path = "~/.kube/rancher-terraform.yml" +## -- Rancher serviceType value +# rancher_service_type = "ClusterIP" diff --git a/recipes/upstream/aws/k3s/variables.tf b/recipes/upstream/aws/k3s/variables.tf index 1c13e035..a999344a 100644 --- a/recipes/upstream/aws/k3s/variables.tf +++ b/recipes/upstream/aws/k3s/variables.tf @@ -1,14 +1,10 @@ -variable "aws_access_key" { - type = string - description = "AWS access key used to create infrastructure" - default = null -} +variable "prefix" {} -variable "aws_secret_key" { - type = string - description = "AWS secret key used to create AWS infrastructure" - default = null -} +# variable "aws_access_key" {} + +# variable "aws_secret_key" {} + +# variable "aws_session_token" {} variable "aws_region" { type = string @@ -51,40 +47,78 @@ variable "aws_region" { } } -variable "prefix" { - type = string - description = "Prefix added to names of all resources" - default = null +variable "create_ssh_key_pair" { + default = null } -variable "server_instance_count" { - type = number - description = "Number of server EC2 instances to create" - default = null +variable "ssh_key_pair_name" { + default = null } -variable "worker_instance_count" { - type = number - description = "Number of worker EC2 instances to create" - default = null +variable "ssh_private_key_path" { + default = null } -variable "instance_type" { - type = string - description = "Instance type used for all EC2 instances" - default = null +variable "ssh_public_key_path" { + default = null } -variable "instance_disk_size" { - type = string - description = "Specify root disk size (GB)" +# variable "vpc_ip_cidr_range" {} + +variable "vpc_id" { + default = null +} + +variable "subnet_id" { + default = null +} + +variable "create_security_group" { + default = null +} + +variable "server_nodes_count" { + description = "The number of Server nodes" + default = 3 + + validation { + condition = contains([ + 1, + 3, + 5, + ], var.server_nodes_count) + error_message = "Invalid number of Server nodes specified! The value must be 1, 3 or 5 (ETCD quorum)." + } +} + +variable "worker_nodes_count" {} + +# variable "instance_type" {} + +# variable "spot_instances" {} + +# variable "instance_disk_size" {} + +variable "instance_security_group_id" { + default = null +} + +variable "ssh_username" {} + +variable "user_data" { + description = "User data content for EC2 instance(s)" default = null } +variable "waiting_time" { + description = "Waiting time (in seconds)" + default = 180 +} + variable "k3s_version" { type = string - description = "Kubernetes version to use for the k3s cluster" - default = null + description = "Kubernetes version to use for the RKE2 cluster" + default = "v1.28.9+k3s1" #Version compatible with Rancher v2.8.3 } variable "k3s_channel" { @@ -94,12 +128,12 @@ variable "k3s_channel" { } variable "k3s_token" { - description = "Token to use when configuring k3s nodes" + description = "Token to use when configuring RKE2 nodes" default = null } variable "k3s_config" { - description = "Additional k3s configuration to add to the config.yaml file" + description = "Additional RKE2 configuration to add to the config.yaml file" default = null } @@ -115,85 +149,35 @@ variable "kube_config_filename" { default = null } -variable "rancher_bootstrap_password" { - description = "Password to use for bootstrapping Rancher (min 12 characters)" - default = "initial-admin-password" - type = string +variable "bootstrap_rancher" { + description = "Bootstrap the Rancher installation" + type = bool + default = true } +variable "rancher_hostname" {} + variable "rancher_password" { - description = "Password to use for Rancher (min 12 characters)" - default = null - type = string + type = string validation { condition = length(var.rancher_password) >= 12 - error_message = "The password provided for Rancher (rancher_password) must be at least 12 characters" + error_message = "The password must be at least 12 characters." } } variable "rancher_version" { description = "Rancher version to install" - default = null - type = string -} - -variable "rancher_replicas" { - description = "Value for replicas when installing the Rancher helm chart" - default = 3 - type = number -} - -variable "create_ssh_key_pair" { - type = bool - description = "Specify if a new SSH key pair needs to be created for the instances" - default = null -} - -variable "ssh_key_pair_name" { - type = string - description = "Specify the SSH key name to use (that's already present in AWS)" - default = null -} - -variable "ssh_key_pair_path" { type = string - description = "Path to the SSH private key used as the key pair (that's already present in AWS)" default = null } -variable "ssh_username" { - type = string - description = "Username used for SSH with sudo access" - default = "ubuntu" -} - -variable "spot_instances" { - type = bool - description = "Use spot instances" - default = null -} - -variable "subnet_id" { - type = string - description = "VPC Subnet ID to create the instance(s) in" - default = null -} - -variable "create_security_group" { - type = bool - description = "Should create the security group associated with the instance(s)" - default = null -} - -# TODO: Add a check based on above value -variable "instance_security_group" { - type = string - description = "Provide a pre-existing security group ID" - default = null +variable "rancher_ingress_class_name" { + description = "Rancher ingressClassName value" + default = "traefik" } -variable "wait" { - description = "An optional wait before installing the Rancher helm chart" - default = "20s" +variable "rancher_service_type" { + description = "Rancher serviceType value" + default = "ClusterIP" } From f90b4e901e50b18ac131df88420a0b4a66dd1805 Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Tue, 2 Jul 2024 11:27:05 +0200 Subject: [PATCH 08/35] Fixed conflicts --- modules/rancher/docs.md | 9 ++++--- modules/rancher/main.tf | 12 ++++----- modules/rancher/variables.tf | 38 ++++++++++++++++++++------- recipes/upstream/aws/rke/docs.md | 6 +++++ recipes/upstream/aws/rke/main.tf | 22 ++++++++++------ recipes/upstream/aws/rke/variables.tf | 36 +++++++++++++++++++++++++ 6 files changed, 96 insertions(+), 27 deletions(-) diff --git a/modules/rancher/docs.md b/modules/rancher/docs.md index 6eab796f..fc1980a6 100644 --- a/modules/rancher/docs.md +++ b/modules/rancher/docs.md @@ -42,18 +42,21 @@ No modules. | [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | | [cacerts\_path](#input\_cacerts\_path) | Private CA certificate to use for Rancher UI/API connectivity | `string` | `null` | no | | [cert\_manager\_enable](#input\_cert\_manager\_enable) | Install cert-manager even if not needed for Rancher, useful if migrating to certificates | `string` | `false` | no | +| [cert\_manager\_helm\_repository](#input\_cert\_manager\_helm\_repository) | Helm repository for Cert Manager chart | `string` | `null` | no | +| [cert\_manager\_helm\_repository\_password](#input\_cert\_manager\_helm\_repository\_password) | Private Cert Manager helm repository password | `string` | `null` | no | +| [cert\_manager\_helm\_repository\_username](#input\_cert\_manager\_helm\_repository\_username) | Private Cert Manager helm repository username | `string` | `null` | no | | [cert\_manager\_namespace](#input\_cert\_manager\_namespace) | Namespace to install cert-manager | `string` | `"cert-manager"` | no | | [cert\_manager\_version](#input\_cert\_manager\_version) | Version of cert-manager to install | `string` | `"v1.11.0"` | no | | [default\_registry](#input\_default\_registry) | Default container image registry to pull images in the format of registry.domain.com:port (systemDefaultRegistry helm value) | `string` | `null` | no | | [dependency](#input\_dependency) | An optional variable to add a dependency from another resource (not used) | `any` | `null` | no | -| [helm\_password](#input\_helm\_password) | Private helm repository password | `string` | `null` | no | -| [helm\_repository](#input\_helm\_repository) | Helm repository for Rancher and cert-manager charts | `string` | `null` | no | | [helm\_timeout](#input\_helm\_timeout) | Specify the timeout value in seconds for helm operation(s) | `number` | `600` | no | -| [helm\_username](#input\_helm\_username) | Private helm repository username | `string` | `null` | no | | [kubeconfig\_file](#input\_kubeconfig\_file) | The kubeconfig to use to interact with the cluster | `string` | `"~/.kube/config"` | no | | [rancher\_additional\_helm\_values](#input\_rancher\_additional\_helm\_values) | Helm options to provide to the Rancher helm chart | `list(string)` | `[]` | no | | [rancher\_antiaffinity](#input\_rancher\_antiaffinity) | Value for antiAffinity when installing the Rancher helm chart (required/preferred) | `string` | `"required"` | no | | [rancher\_bootstrap\_password](#input\_rancher\_bootstrap\_password) | Password to use for bootstrapping Rancher (min 12 characters) | `string` | `"initial-admin-password"` | no | +| [rancher\_helm\_repository](#input\_rancher\_helm\_repository) | Helm repository for Rancher chart | `string` | `null` | no | +| [rancher\_helm\_repository\_password](#input\_rancher\_helm\_repository\_password) | Private Rancher helm repository password | `string` | `null` | no | +| [rancher\_helm\_repository\_username](#input\_rancher\_helm\_repository\_username) | Private Rancher helm repository username | `string` | `null` | no | | [rancher\_hostname](#input\_rancher\_hostname) | Value for hostname when installing the Rancher helm chart | `string` | n/a | yes | | [rancher\_namespace](#input\_rancher\_namespace) | The Rancher release will be deployed to this namespace | `string` | `"cattle-system"` | no | | [rancher\_password](#input\_rancher\_password) | Password to use for Rancher (min 12 characters) | `string` | `null` | no | diff --git a/modules/rancher/main.tf b/modules/rancher/main.tf index 3f33eb7e..8b5f13f0 100644 --- a/modules/rancher/main.tf +++ b/modules/rancher/main.tf @@ -94,9 +94,9 @@ resource "helm_release" "cert_manager" { chart = "cert-manager" create_namespace = true namespace = var.cert_manager_namespace - repository = var.helm_repository != null ? var.helm_repository : "https://charts.jetstack.io" - repository_username = var.helm_username != null ? var.helm_username : null - repository_password = var.helm_password != null ? var.helm_password : null + repository = var.cert_manager_helm_repository != null ? var.cert_manager_helm_repository : "https://charts.jetstack.io" + repository_username = var.cert_manager_helm_repository_username != null ? var.cert_manager_helm_repository_username : null + repository_password = var.cert_manager_helm_repository_password != null ? var.cert_manager_helm_repository_password : null version = var.cert_manager_version wait = false @@ -121,9 +121,9 @@ resource "helm_release" "rancher" { chart = "rancher" create_namespace = true namespace = var.rancher_namespace - repository = var.helm_repository != null ? var.helm_repository : "https://releases.rancher.com/server-charts/stable" - repository_username = var.helm_username != null ? var.helm_username : null - repository_password = var.helm_password != null ? var.helm_password : null + repository = var.rancher_helm_repository != null ? var.rancher_helm_repository : "https://releases.rancher.com/server-charts/stable" + repository_username = var.rancher_helm_repository_username != null ? var.rancher_helm_repository_username : null + repository_password = var.rancher_helm_repository_password != null ? var.rancher_helm_repository_password : null version = var.rancher_version timeout = var.helm_timeout wait = true diff --git a/modules/rancher/variables.tf b/modules/rancher/variables.tf index 975c9ced..ef2f0d3d 100644 --- a/modules/rancher/variables.tf +++ b/modules/rancher/variables.tf @@ -40,12 +40,6 @@ variable "default_registry" { type = string } -variable "helm_repository" { - description = "Helm repository for Rancher and cert-manager charts" - default = null - type = string -} - variable "rancher_additional_helm_values" { description = "Helm options to provide to the Rancher helm chart" default = [] @@ -103,14 +97,38 @@ variable "rancher_version" { type = string } -variable "helm_username" { - description = "Private helm repository username" +variable "rancher_helm_repository" { + description = "Helm repository for Rancher chart" + default = null + type = string +} + +variable "rancher_helm_repository_username" { + description = "Private Rancher helm repository username" + default = null + type = string +} + +variable "rancher_helm_repository_password" { + description = "Private Rancher helm repository password" + default = null + type = string +} + +variable "cert_manager_helm_repository" { + description = "Helm repository for Cert Manager chart" + default = null + type = string +} + +variable "cert_manager_helm_repository_username" { + description = "Private Cert Manager helm repository username" default = null type = string } -variable "helm_password" { - description = "Private helm repository password" +variable "cert_manager_helm_repository_password" { + description = "Private Cert Manager helm repository password" default = null type = string } diff --git a/recipes/upstream/aws/rke/docs.md b/recipes/upstream/aws/rke/docs.md index b6f226c8..6bbae188 100644 --- a/recipes/upstream/aws/rke/docs.md +++ b/recipes/upstream/aws/rke/docs.md @@ -35,11 +35,17 @@ |------|-------------|------|---------|:--------:| | [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | | [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | +| [cert\_manager\_helm\_repository](#input\_cert\_manager\_helm\_repository) | Helm repository for Cert Manager chart | `string` | `null` | no | +| [cert\_manager\_helm\_repository\_password](#input\_cert\_manager\_helm\_repository\_password) | Private Cert Manager helm repository password | `string` | `null` | no | +| [cert\_manager\_helm\_repository\_username](#input\_cert\_manager\_helm\_repository\_username) | Private Cert Manager helm repository username | `string` | `null` | no | | [docker\_version](#input\_docker\_version) | Docker version to install on nodes | `string` | `"20.10"` | no | | [ingress\_provider](#input\_ingress\_provider) | Ingress controller provider | `string` | `"nginx"` | no | | [install\_docker](#input\_install\_docker) | Install Docker while creating the instances | `bool` | `true` | no | | [instance\_count](#input\_instance\_count) | n/a | `any` | n/a | yes | | [prefix](#input\_prefix) | n/a | `any` | n/a | yes | +| [rancher\_helm\_repository](#input\_rancher\_helm\_repository) | Helm repository for Rancher chart | `string` | `null` | no | +| [rancher\_helm\_repository\_password](#input\_rancher\_helm\_repository\_password) | Private Rancher helm repository password | `string` | `null` | no | +| [rancher\_helm\_repository\_username](#input\_rancher\_helm\_repository\_username) | Private Rancher helm repository username | `string` | `null` | no | | [rancher\_hostname](#input\_rancher\_hostname) | n/a | `any` | n/a | yes | | [rancher\_password](#input\_rancher\_password) | n/a | `string` | n/a | yes | | [rancher\_version](#input\_rancher\_version) | Rancher version to install | `string` | `null` | no | diff --git a/recipes/upstream/aws/rke/main.tf b/recipes/upstream/aws/rke/main.tf index 0e8fdac1..522547ba 100644 --- a/recipes/upstream/aws/rke/main.tf +++ b/recipes/upstream/aws/rke/main.tf @@ -72,14 +72,20 @@ locals { } module "rancher_install" { - source = "../../../../modules/rancher" - dependency = [null_resource.wait-k8s-services-startup] - kubeconfig_file = local.kubeconfig_file - rancher_hostname = local.rancher_hostname - rancher_bootstrap_password = var.rancher_password - rancher_password = var.rancher_password - bootstrap_rancher = var.bootstrap_rancher - rancher_version = var.rancher_version + source = "../../../../modules/rancher" + dependency = [null_resource.wait-k8s-services-startup] + kubeconfig_file = local.kubeconfig_file + rancher_hostname = local.rancher_hostname + rancher_bootstrap_password = var.rancher_password + rancher_password = var.rancher_password + bootstrap_rancher = var.bootstrap_rancher + rancher_version = var.rancher_version + rancher_helm_repository = var.rancher_helm_repository + rancher_helm_repository_username = var.rancher_helm_repository_username + rancher_helm_repository_password = var.rancher_helm_repository_password + cert_manager_helm_repository = var.cert_manager_helm_repository + cert_manager_helm_repository_username = var.cert_manager_helm_repository_username + cert_manager_helm_repository_password = var.cert_manager_helm_repository_password rancher_additional_helm_values = [ "replicas: ${var.instance_count}" ] diff --git a/recipes/upstream/aws/rke/variables.tf b/recipes/upstream/aws/rke/variables.tf index ec2a12e2..8b425798 100644 --- a/recipes/upstream/aws/rke/variables.tf +++ b/recipes/upstream/aws/rke/variables.tf @@ -147,3 +147,39 @@ variable "rancher_version" { type = string default = null } + +variable "rancher_helm_repository" { + description = "Helm repository for Rancher chart" + default = null + type = string +} + +variable "rancher_helm_repository_username" { + description = "Private Rancher helm repository username" + default = null + type = string +} + +variable "rancher_helm_repository_password" { + description = "Private Rancher helm repository password" + default = null + type = string +} + +variable "cert_manager_helm_repository" { + description = "Helm repository for Cert Manager chart" + default = null + type = string +} + +variable "cert_manager_helm_repository_username" { + description = "Private Cert Manager helm repository username" + default = null + type = string +} + +variable "cert_manager_helm_repository_password" { + description = "Private Cert Manager helm repository password" + default = null + type = string +} From ea7c74188051fa05f20a875c99ce81181a3b0541 Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Tue, 2 Jul 2024 12:24:11 +0200 Subject: [PATCH 09/35] Reviewed tests for AWS EC2 x RKE (split-roles) --- tests/recipes/rke/split-roles/aws/README.md | 31 +++++++++++++ tests/recipes/rke/split-roles/aws/docs.md | 44 +++++++++++++++++++ tests/recipes/rke/split-roles/aws/main.tf | 30 +++++-------- tests/recipes/rke/split-roles/aws/outputs.tf | 19 ++++++++ tests/recipes/rke/split-roles/aws/provider.tf | 36 +++++++++++++++ .../recipes/rke/split-roles/aws/variables.tf | 24 ++++++---- 6 files changed, 158 insertions(+), 26 deletions(-) create mode 100644 tests/recipes/rke/split-roles/aws/README.md create mode 100644 tests/recipes/rke/split-roles/aws/docs.md create mode 100644 tests/recipes/rke/split-roles/aws/outputs.tf create mode 100644 tests/recipes/rke/split-roles/aws/provider.tf diff --git a/tests/recipes/rke/split-roles/aws/README.md b/tests/recipes/rke/split-roles/aws/README.md new file mode 100644 index 00000000..d3790e82 --- /dev/null +++ b/tests/recipes/rke/split-roles/aws/README.md @@ -0,0 +1,31 @@ +# RKE | With split roles | AWS + +This module helps to create an RKE cluster with split roles (master, worker) on AWS infrastructure. + +Documentation can be found [here](./docs.md). + +## Usage + +```bash +git clone https://github.com/rancherlabs/tf-rancher-up.git +cd tests/recipes/rke/split-roles/aws +``` + +- Edit `./variables.tf` + - Update the required variables (`prefix`, `aws_region`, `server_nodes_count`, `worker_nodes_count`, and `ssh_username`). +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../../modules/infra/aws/README.md). + +```bash +terraform init --upgrade ; terraform apply --auto-approve +``` + +- Destroy the resources when finished +```bash +terraform destroy --auto-approve + +``` + +See full argument list for each module in use: + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - RKE: https://github.com/rancher/tf-rancher-up/tree/main/modules/distribution/rke + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/tests/recipes/rke/split-roles/aws/docs.md b/tests/recipes/rke/split-roles/aws/docs.md new file mode 100644 index 00000000..48e9c812 --- /dev/null +++ b/tests/recipes/rke/split-roles/aws/docs.md @@ -0,0 +1,44 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | +| [ssh](#requirement\_ssh) | 2.6.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [aws-ec2-upstream-master-nodes](#module\_aws-ec2-upstream-master-nodes) | ../../../../../modules/infra/aws/ec2 | n/a | +| [aws-ec2-upstream-worker-nodes](#module\_aws-ec2-upstream-worker-nodes) | ../../../../../modules/infra/aws/ec2 | n/a | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws\_region](#input\_aws\_region) | n/a | `string` | `"us-east-1"` | no | +| [prefix](#input\_prefix) | n/a | `string` | `"ec2-test"` | no | +| [server\_nodes\_count](#input\_server\_nodes\_count) | n/a | `number` | `3` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `string` | `"ubuntu"` | no | +| [worker\_nodes\_count](#input\_worker\_nodes\_count) | n/a | `number` | `3` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | +| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | +| [security\_group](#output\_security\_group) | n/a | +| [subnet](#output\_subnet) | n/a | +| [vpc](#output\_vpc) | n/a | diff --git a/tests/recipes/rke/split-roles/aws/main.tf b/tests/recipes/rke/split-roles/aws/main.tf index ed638dd3..9f693428 100644 --- a/tests/recipes/rke/split-roles/aws/main.tf +++ b/tests/recipes/rke/split-roles/aws/main.tf @@ -1,21 +1,15 @@ -module "test1_default" { - source = "../../../../../recipes/rke/split-roles/aws" - - prefix = "test1_default" - aws_access_key = var.aws_access_key - aws_secret_key = var.aws_secret_key - aws_region = "us-west-2" - create_ssh_key_pair = true +module "aws-ec2-upstream-master-nodes" { + source = "../../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + instance_count = var.server_nodes_count + ssh_username = var.ssh_username } -module "test2_pass_existing_key" { - source = "../../../../../recipes/rke/split-roles/aws" - - prefix = "test1_default" - aws_access_key = var.aws_access_key - aws_secret_key = var.aws_secret_key - aws_region = "us-west-2" - create_ssh_key_pair = true - ssh_key_pair_name = "junk" - ssh_key_pair_path = "~/somepath" +module "aws-ec2-upstream-worker-nodes" { + source = "../../../../../modules/infra/aws/ec2" + prefix = "${var.prefix}-w" + aws_region = var.aws_region + instance_count = var.worker_nodes_count + ssh_username = var.ssh_username } diff --git a/tests/recipes/rke/split-roles/aws/outputs.tf b/tests/recipes/rke/split-roles/aws/outputs.tf new file mode 100644 index 00000000..02d833fb --- /dev/null +++ b/tests/recipes/rke/split-roles/aws/outputs.tf @@ -0,0 +1,19 @@ +output "instances_private_ip" { + value = concat([module.aws-ec2-upstream-master-nodes.instances_private_ip], [module.aws-ec2-upstream-worker-nodes.instances_private_ip]) +} + +output "instances_public_ip" { + value = concat([module.aws-ec2-upstream-master-nodes.instances_public_ip], [module.aws-ec2-upstream-worker-nodes.instances_public_ip]) +} + +output "vpc" { + value = module.aws-ec2-upstream-master-nodes.vpc[0].id +} + +output "subnet" { + value = module.aws-ec2-upstream-master-nodes.subnet[0].id +} + +output "security_group" { + value = module.aws-ec2-upstream-master-nodes.security_group[0].id +} diff --git a/tests/recipes/rke/split-roles/aws/provider.tf b/tests/recipes/rke/split-roles/aws/provider.tf new file mode 100644 index 00000000..8e915083 --- /dev/null +++ b/tests/recipes/rke/split-roles/aws/provider.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/tests/recipes/rke/split-roles/aws/variables.tf b/tests/recipes/rke/split-roles/aws/variables.tf index 21e0b5af..382f6564 100644 --- a/tests/recipes/rke/split-roles/aws/variables.tf +++ b/tests/recipes/rke/split-roles/aws/variables.tf @@ -1,11 +1,19 @@ -variable "aws_access_key" { - type = string - description = "AWS access key used to create infrastructure" - default = null +variable "prefix" { + default = "ec2-test" } -variable "aws_secret_key" { - type = string - description = "AWS secret key used to create AWS infrastructure" - default = null +variable "aws_region" { + default = "us-east-1" +} + +variable "server_nodes_count" { + default = 3 +} + +variable "worker_nodes_count" { + default = 3 +} + +variable "ssh_username" { + default = "ubuntu" } From 4c917618d20d66b0c70324ee3f5cb6520656f5be Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Tue, 2 Jul 2024 17:41:01 +0200 Subject: [PATCH 10/35] Fixed AWS x K3S README.md file --- recipes/upstream/aws/k3s/README.md | 2 +- recipes/upstream/aws/k3s/docs.md | 6 +++--- recipes/upstream/aws/k3s/terraform.tfvars.example | 12 ++++++------ recipes/upstream/aws/k3s/variables.tf | 6 +++--- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/recipes/upstream/aws/k3s/README.md b/recipes/upstream/aws/k3s/README.md index e80dee31..11bc92ca 100644 --- a/recipes/upstream/aws/k3s/README.md +++ b/recipes/upstream/aws/k3s/README.md @@ -1,4 +1,4 @@ -# Upstream | AWS | EC2 x RKE2 +# Upstream | AWS | EC2 x K3S This module is used to establish a Rancher (local) management cluster using [AWS EC2](https://aws.amazon.com/ec2/) and [K3s](https://docs.k3s.io/). diff --git a/recipes/upstream/aws/k3s/docs.md b/recipes/upstream/aws/k3s/docs.md index 998163cb..056b8b30 100644 --- a/recipes/upstream/aws/k3s/docs.md +++ b/recipes/upstream/aws/k3s/docs.md @@ -47,9 +47,9 @@ | [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | | [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | n/a | `any` | `null` | no | | [k3s\_channel](#input\_k3s\_channel) | K3s channel to use, the latest patch version for the provided minor version will be used | `string` | `null` | no | -| [k3s\_config](#input\_k3s\_config) | Additional RKE2 configuration to add to the config.yaml file | `any` | `null` | no | -| [k3s\_token](#input\_k3s\_token) | Token to use when configuring RKE2 nodes | `any` | `null` | no | -| [k3s\_version](#input\_k3s\_version) | Kubernetes version to use for the RKE2 cluster | `string` | `"v1.28.9+k3s1"` | no | +| [k3s\_config](#input\_k3s\_config) | Additional K3S configuration to add to the config.yaml file | `any` | `null` | no | +| [k3s\_token](#input\_k3s\_token) | Token to use when configuring K3S nodes | `any` | `null` | no | +| [k3s\_version](#input\_k3s\_version) | Kubernetes version to use for the K3S cluster | `string` | `"v1.28.9+k3s1"` | no | | [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | | [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | | [prefix](#input\_prefix) | n/a | `any` | n/a | yes | diff --git a/recipes/upstream/aws/k3s/terraform.tfvars.example b/recipes/upstream/aws/k3s/terraform.tfvars.example index 1ec29de6..fee7a5f6 100644 --- a/recipes/upstream/aws/k3s/terraform.tfvars.example +++ b/recipes/upstream/aws/k3s/terraform.tfvars.example @@ -68,22 +68,22 @@ ssh_username = "ubuntu" ## -- Waiting time (in seconds) # waiting_time = 180 -## -- RKE2 version +## -- K3S version # k3s_version = "v1.28.3+k3sr2" ## -- K3s channel # k3s_channel = -## -- RKE2 token, override the programmatically generated token +## -- K3S token, override the programmatically generated token # k3s_token = "string here" -## -- RKE2 custom config file -# k3s_config = "" +## -- K3S custom config file +# k3s_config = "" -## -- RKE2 KUBECONFIG file path +## -- K3S KUBECONFIG file path # kube_config_path = "" -## -- RKE2 KUBECONFIG file +## -- K3S KUBECONFIG file # kube_config_filename = "" ## -- Bootstrap the Rancher installation diff --git a/recipes/upstream/aws/k3s/variables.tf b/recipes/upstream/aws/k3s/variables.tf index a999344a..c5b932f7 100644 --- a/recipes/upstream/aws/k3s/variables.tf +++ b/recipes/upstream/aws/k3s/variables.tf @@ -117,7 +117,7 @@ variable "waiting_time" { variable "k3s_version" { type = string - description = "Kubernetes version to use for the RKE2 cluster" + description = "Kubernetes version to use for the K3S cluster" default = "v1.28.9+k3s1" #Version compatible with Rancher v2.8.3 } @@ -128,12 +128,12 @@ variable "k3s_channel" { } variable "k3s_token" { - description = "Token to use when configuring RKE2 nodes" + description = "Token to use when configuring K3S nodes" default = null } variable "k3s_config" { - description = "Additional RKE2 configuration to add to the config.yaml file" + description = "Additional K3S configuration to add to the config.yaml file" default = null } From 066cdaf17b572d41f0da6e45e5a4fa301858f82e Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Wed, 3 Jul 2024 18:42:36 +0200 Subject: [PATCH 11/35] Fixed AWS x K3S README.md file --- recipes/upstream/aws/k3s/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/recipes/upstream/aws/k3s/README.md b/recipes/upstream/aws/k3s/README.md index 11bc92ca..efc1c0a4 100644 --- a/recipes/upstream/aws/k3s/README.md +++ b/recipes/upstream/aws/k3s/README.md @@ -16,7 +16,7 @@ cd recipes/upstream/aws/k3s - Update the required variables: - `prefix` to give the resources an identifiable name (eg, your initials or first name) - `aws_region` to suit your region - - `server_nodes_count` to specify the number of Master nodes to create + - `server_nodes_count` to specify the number of Master nodes to create (to maintain ETCD quorum, the value must be 1, 3, or 5) - `worker_nodes_count` to specify the number of Worker nodes to create - `ssh_username` to specify the user used to create the VMs (default "ubuntu") - `rancher_hostname` in order to reach the Rancher console via DNS name @@ -31,7 +31,7 @@ terraform init -upgrade ; terraform apply -target=module.k3s-first-server.tls_pr - Destroy the resources when finished ```bash -terraform destroy -target=module.rancher_install -auto-approve ; terraform destroy -auto-approve +terraform state rm module.rancher_install ; terraform destroy -auto-approve ``` See full argument list for each module in use: From 70ba27ddc842899a1925b636fc6285b3b4ef3912 Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Fri, 12 Jul 2024 08:59:55 +0200 Subject: [PATCH 12/35] Fixed copy/paste issue --- recipes/upstream/aws/rke/variables.tf | 36 --------------------------- 1 file changed, 36 deletions(-) diff --git a/recipes/upstream/aws/rke/variables.tf b/recipes/upstream/aws/rke/variables.tf index 04e46b93..8b425798 100644 --- a/recipes/upstream/aws/rke/variables.tf +++ b/recipes/upstream/aws/rke/variables.tf @@ -183,39 +183,3 @@ variable "cert_manager_helm_repository_password" { default = null type = string } - -variable "rancher_helm_repository" { - description = "Helm repository for Rancher chart" - default = null - type = string -} - -variable "rancher_helm_repository_username" { - description = "Private Rancher helm repository username" - default = null - type = string -} - -variable "rancher_helm_repository_password" { - description = "Private Rancher helm repository password" - default = null - type = string -} - -variable "cert_manager_helm_repository" { - description = "Helm repository for Cert Manager chart" - default = null - type = string -} - -variable "cert_manager_helm_repository_username" { - description = "Private Cert Manager helm repository username" - default = null - type = string -} - -variable "cert_manager_helm_repository_password" { - description = "Private Cert Manager helm repository password" - default = null - type = string -} From 8802eadfeca757334c25170844170bfa2f7dbd5d Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Mon, 15 Jul 2024 10:46:59 +0200 Subject: [PATCH 13/35] Fixed ssh key mngt --- modules/infra/aws/ec2/data.tf | 6 ------ modules/infra/aws/ec2/docs.md | 1 - modules/infra/aws/ec2/main.tf | 2 +- 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/modules/infra/aws/ec2/data.tf b/modules/infra/aws/ec2/data.tf index ba260588..ce8eb122 100644 --- a/modules/infra/aws/ec2/data.tf +++ b/modules/infra/aws/ec2/data.tf @@ -16,9 +16,3 @@ data "aws_ami" "ubuntu" { values = ["hvm"] } } - -# Save the private SSH key in the Terraform data source for later use -data "local_file" "ssh-private-key" { - depends_on = [local_file.private_key_pem] - filename = local.private_ssh_key_path -} diff --git a/modules/infra/aws/ec2/docs.md b/modules/infra/aws/ec2/docs.md index ce74a4eb..68e7b0ba 100644 --- a/modules/infra/aws/ec2/docs.md +++ b/modules/infra/aws/ec2/docs.md @@ -33,7 +33,6 @@ No modules. | [tls_private_key.ssh_private_key](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | | [aws_ami.ubuntu](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | -| [local_file.ssh-private-key](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source | ## Inputs diff --git a/modules/infra/aws/ec2/main.tf b/modules/infra/aws/ec2/main.tf index ebc88eb2..d58d13d3 100644 --- a/modules/infra/aws/ec2/main.tf +++ b/modules/infra/aws/ec2/main.tf @@ -158,7 +158,7 @@ resource "aws_instance" "instance" { type = "ssh" host = var.bastion_host == null ? self.public_ip : self.private_ip user = var.ssh_username - private_key = data.local_file.ssh-private-key.content + private_key = fileexists("${path.cwd}/${var.prefix}-ssh_private_key.pem") ? file("${path.cwd}/${var.prefix}-ssh_private_key.pem") : local.private_ssh_key_path.private_key_pem bastion_host = var.bastion_host != null ? var.bastion_host.address : null bastion_user = var.bastion_host != null ? var.bastion_host.user : null From 62b3416eb11d202ca2eff54d35478b87b801ac26 Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Mon, 15 Jul 2024 15:02:01 +0200 Subject: [PATCH 14/35] Shortened the deploy commands --- recipes/upstream/aws/k3s/README.md | 4 ++-- recipes/upstream/aws/k3s/docs.md | 2 -- recipes/upstream/aws/k3s/main.tf | 2 ++ recipes/upstream/aws/k3s/outputs.tf | 8 -------- recipes/upstream/aws/rke/README.md | 4 ++-- recipes/upstream/aws/rke2/README.md | 4 ++-- recipes/upstream/aws/rke2/docs.md | 5 ----- recipes/upstream/aws/rke2/main.tf | 1 + recipes/upstream/aws/rke2/outputs.tf | 20 -------------------- 9 files changed, 9 insertions(+), 41 deletions(-) diff --git a/recipes/upstream/aws/k3s/README.md b/recipes/upstream/aws/k3s/README.md index efc1c0a4..3476c12a 100644 --- a/recipes/upstream/aws/k3s/README.md +++ b/recipes/upstream/aws/k3s/README.md @@ -26,12 +26,12 @@ cd recipes/upstream/aws/k3s **NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** ```bash -terraform init -upgrade ; terraform apply -target=module.k3s-first-server.tls_private_key.ssh_private_key -target=module.k3s-first-server.local_file.private_key_pem -target=module.k3s-first-server.local_file.public_key_pem -target=module.k3s-first-server.aws_key_pair.key_pair -target=module.k3s-first-server.aws_vpc.vpc -target=module.k3s-first-server.aws_subnet.subnet -target=module.k3s-first-server.aws_security_group.sg_allowall -auto-approve ; terraform apply -auto-approve +terraform init -upgrade && terraform apply -auto-approve ``` - Destroy the resources when finished ```bash -terraform state rm module.rancher_install ; terraform destroy -auto-approve +terraform state rm module.rancher_install && terraform destroy -auto-approve ``` See full argument list for each module in use: diff --git a/recipes/upstream/aws/k3s/docs.md b/recipes/upstream/aws/k3s/docs.md index 056b8b30..21be1a6b 100644 --- a/recipes/upstream/aws/k3s/docs.md +++ b/recipes/upstream/aws/k3s/docs.md @@ -73,7 +73,5 @@ | Name | Description | |------|-------------| -| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | -| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | | [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | | [rancher\_url](#output\_rancher\_url) | Rancher URL | diff --git a/recipes/upstream/aws/k3s/main.tf b/recipes/upstream/aws/k3s/main.tf index 123b84fa..ad06514c 100644 --- a/recipes/upstream/aws/k3s/main.tf +++ b/recipes/upstream/aws/k3s/main.tf @@ -51,6 +51,7 @@ module "k3s-additional" { module "k3s-additional-servers" { source = "../../../../modules/infra/aws/ec2" + count = 0 prefix = "${var.prefix}-additional-server" aws_region = var.aws_region create_ssh_key_pair = local.create_ssh_key_pair @@ -71,6 +72,7 @@ module "k3s-additional-servers" { module "k3s-additional-workers" { source = "../../../../modules/infra/aws/ec2" + count = 0 prefix = "${var.prefix}-worker" aws_region = var.aws_region create_ssh_key_pair = local.create_ssh_key_pair diff --git a/recipes/upstream/aws/k3s/outputs.tf b/recipes/upstream/aws/k3s/outputs.tf index c21cc80f..defc2d99 100644 --- a/recipes/upstream/aws/k3s/outputs.tf +++ b/recipes/upstream/aws/k3s/outputs.tf @@ -1,11 +1,3 @@ -output "instances_private_ip" { - value = concat([module.k3s-first-server.instances_private_ip], [module.k3s-additional-servers.instances_private_ip]) -} - -output "instances_public_ip" { - value = concat([module.k3s-first-server.instances_public_ip], [module.k3s-additional-servers.instances_public_ip]) -} - output "rancher_url" { description = "Rancher URL" value = "https://${module.rancher_install.rancher_hostname}" diff --git a/recipes/upstream/aws/rke/README.md b/recipes/upstream/aws/rke/README.md index 49ff204b..02925908 100644 --- a/recipes/upstream/aws/rke/README.md +++ b/recipes/upstream/aws/rke/README.md @@ -25,12 +25,12 @@ cd recipes/upstream/aws/rke **NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** ```bash -terraform init -upgrade ; terraform apply -target=module.aws-ec2-upstream-cluster.tls_private_key.ssh_private_key -target=module.aws-ec2-upstream-cluster.local_file.private_key_pem -target=module.aws-ec2-upstream-cluster.local_file.public_key_pem -auto-approve ; terraform apply -target=module.aws-ec2-upstream-cluster -target=helm_release.ingress-nginx -target=module.rke -auto-approve ; terraform state rm module.rke.local_file.kube_config_yaml ; terraform apply -auto-approve +terraform init -upgrade && terraform apply -auto-approve ``` - Destroy the resources when finished ```bash -terraform destroy -target=helm_release.ingress-nginx -target=module.rancher_install -auto-approve ; terraform destroy -auto-approve +terraform destroy -target=helm_release.ingress-nginx -target=module.rancher_install -auto-approve && terraform destroy -auto-approve ``` See full argument list for each module in use: diff --git a/recipes/upstream/aws/rke2/README.md b/recipes/upstream/aws/rke2/README.md index 811b29e6..a35b3f9b 100644 --- a/recipes/upstream/aws/rke2/README.md +++ b/recipes/upstream/aws/rke2/README.md @@ -25,12 +25,12 @@ cd recipes/upstream/aws/rke2 **NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** ```bash -terraform init -upgrade ; terraform apply -target=module.rke2-first-server.tls_private_key.ssh_private_key -target=module.rke2-first-server.local_file.private_key_pem -target=module.rke2-first-server.local_file.public_key_pem -target=module.rke2-first-server.aws_key_pair.key_pair -target=module.rke2-first-server.aws_vpc.vpc -target=module.rke2-first-server.aws_subnet.subnet -target=module.rke2-first-server.aws_security_group.sg_allowall -auto-approve ; terraform apply -auto-approve ; terraform apply -target=module.rancher_install -auto-approve +terraform init -upgrade && terraform apply -auto-approve ``` - Destroy the resources when finished ```bash -terraform destroy -target=module.rancher_install -auto-approve ; terraform destroy -auto-approve +terraform destroy -target=module.rancher_install -auto-approve && terraform destroy -auto-approve ``` See full argument list for each module in use: diff --git a/recipes/upstream/aws/rke2/docs.md b/recipes/upstream/aws/rke2/docs.md index 0801c42e..3f128ba4 100644 --- a/recipes/upstream/aws/rke2/docs.md +++ b/recipes/upstream/aws/rke2/docs.md @@ -70,10 +70,5 @@ | Name | Description | |------|-------------| -| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | -| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | | [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | | [rancher\_url](#output\_rancher\_url) | Rancher URL | -| [security\_group](#output\_security\_group) | n/a | -| [subnet](#output\_subnet) | n/a | -| [vpc](#output\_vpc) | n/a | diff --git a/recipes/upstream/aws/rke2/main.tf b/recipes/upstream/aws/rke2/main.tf index 84ae7825..fe336082 100644 --- a/recipes/upstream/aws/rke2/main.tf +++ b/recipes/upstream/aws/rke2/main.tf @@ -49,6 +49,7 @@ module "rke2-additional" { module "rke2-additional-servers" { source = "../../../../modules/infra/aws/ec2" + count = 0 prefix = var.prefix aws_region = var.aws_region create_ssh_key_pair = local.create_ssh_key_pair diff --git a/recipes/upstream/aws/rke2/outputs.tf b/recipes/upstream/aws/rke2/outputs.tf index a85d4257..34a6f90d 100644 --- a/recipes/upstream/aws/rke2/outputs.tf +++ b/recipes/upstream/aws/rke2/outputs.tf @@ -1,23 +1,3 @@ -output "instances_private_ip" { - value = concat([module.rke2-first-server.instances_private_ip], [module.rke2-additional-servers.instances_private_ip]) -} - -output "instances_public_ip" { - value = concat([module.rke2-first-server.instances_public_ip], [module.rke2-additional-servers.instances_public_ip]) -} - -output "vpc" { - value = module.rke2-first-server.vpc[0].id -} - -output "subnet" { - value = module.rke2-first-server.subnet[0].id -} - -output "security_group" { - value = module.rke2-first-server.security_group[0].id -} - # Uncomment for debugging purposes #output "rke2_first_server_config_file" { # value = nonsensitive(module.rke2-first.rke2_user_data) From 7f5bb085e310f268be3eb343a5d6254330cc25ea Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Mon, 15 Jul 2024 20:34:14 +0200 Subject: [PATCH 15/35] Fixed all the AWS recipes --- modules/infra/aws/ec2/docs.md | 1 + modules/infra/aws/ec2/main.tf | 12 ++++++------ modules/infra/aws/ec2/variables.tf | 6 ++++++ recipes/upstream/aws/k3s/docs.md | 1 + recipes/upstream/aws/k3s/main.tf | 6 ++++-- recipes/upstream/aws/k3s/terraform.tfvars.example | 3 +++ recipes/upstream/aws/k3s/variables.tf | 4 ++++ recipes/upstream/aws/rke/main.tf | 3 ++- recipes/upstream/aws/rke/terraform.tfvars.example | 3 +++ recipes/upstream/aws/rke/variables.tf | 2 ++ recipes/upstream/aws/rke2/docs.md | 1 + recipes/upstream/aws/rke2/main.tf | 4 +++- recipes/upstream/aws/rke2/terraform.tfvars.example | 3 +++ recipes/upstream/aws/rke2/variables.tf | 4 ++++ 14 files changed, 43 insertions(+), 10 deletions(-) diff --git a/modules/infra/aws/ec2/docs.md b/modules/infra/aws/ec2/docs.md index 68e7b0ba..3d8aecbb 100644 --- a/modules/infra/aws/ec2/docs.md +++ b/modules/infra/aws/ec2/docs.md @@ -42,6 +42,7 @@ No modules. | [bastion\_host](#input\_bastion\_host) | Bastion host configuration to access the instances |
object({
address = string
user = string
ssh_key = string
ssh_key_path = string
})
| `null` | no | | [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `true` | no | | [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `true` | no | +| [create\_vpc](#input\_create\_vpc) | Specify whether VPC / Subnet should be created for the instances | `bool` | `true` | no | | [iam\_instance\_profile](#input\_iam\_instance\_profile) | Specify IAM Instance Profile to assign to the instances/nodes | `string` | `null` | no | | [instance\_count](#input\_instance\_count) | Number of EC2 instances to create | `number` | `3` | no | | [instance\_disk\_size](#input\_instance\_disk\_size) | Specify root disk size (GB) | `string` | `"80"` | no | diff --git a/modules/infra/aws/ec2/main.tf b/modules/infra/aws/ec2/main.tf index d58d13d3..53c1cbbd 100644 --- a/modules/infra/aws/ec2/main.tf +++ b/modules/infra/aws/ec2/main.tf @@ -30,7 +30,7 @@ resource "aws_key_pair" "key_pair" { } resource "aws_vpc" "vpc" { - count = var.vpc_id == null ? 1 : 0 + count = var.create_vpc ? 1 : 0 cidr_block = var.vpc_ip_cidr_range tags = { @@ -39,7 +39,7 @@ resource "aws_vpc" "vpc" { } resource "aws_subnet" "subnet" { - count = var.subnet_id == null ? 1 : 0 + count = var.create_vpc ? 1 : 0 availability_zone = data.aws_availability_zones.available.names[count.index] # cidr_block = var.subnet_ip_cidr_range[count.index] cidr_block = "10.0.${count.index}.0/24" @@ -52,7 +52,7 @@ resource "aws_subnet" "subnet" { } resource "aws_internet_gateway" "internet-gateway" { - count = var.vpc_id == null ? 1 : 0 + count = var.create_vpc ? 1 : 0 vpc_id = aws_vpc.vpc[0].id tags = { @@ -61,7 +61,7 @@ resource "aws_internet_gateway" "internet-gateway" { } resource "aws_route_table" "route-table" { - count = var.vpc_id == null ? 1 : 0 + count = var.create_vpc ? 1 : 0 vpc_id = aws_vpc.vpc[0].id route { @@ -71,7 +71,7 @@ resource "aws_route_table" "route-table" { } resource "aws_route_table_association" "rt-association" { - count = var.subnet_id == null ? 1 : 0 + count = var.create_vpc ? 1 : 0 subnet_id = var.subnet_id == null ? "${aws_subnet.subnet.*.id[0]}" : var.subnet_id route_table_id = aws_route_table.route-table[0].id @@ -158,7 +158,7 @@ resource "aws_instance" "instance" { type = "ssh" host = var.bastion_host == null ? self.public_ip : self.private_ip user = var.ssh_username - private_key = fileexists("${path.cwd}/${var.prefix}-ssh_private_key.pem") ? file("${path.cwd}/${var.prefix}-ssh_private_key.pem") : local.private_ssh_key_path.private_key_pem + private_key = var.create_ssh_key_pair ? tls_private_key.ssh_private_key[0].private_key_openssh : file("${local.private_ssh_key_path}") bastion_host = var.bastion_host != null ? var.bastion_host.address : null bastion_user = var.bastion_host != null ? var.bastion_host.user : null diff --git a/modules/infra/aws/ec2/variables.tf b/modules/infra/aws/ec2/variables.tf index 0475b3a0..5cfb164a 100644 --- a/modules/infra/aws/ec2/variables.tf +++ b/modules/infra/aws/ec2/variables.tf @@ -68,6 +68,12 @@ variable "ssh_public_key_path" { default = null } +variable "create_vpc" { + type = bool + description = "Specify whether VPC / Subnet should be created for the instances" + default = true +} + variable "vpc_ip_cidr_range" { type = string default = "10.0.0.0/16" diff --git a/recipes/upstream/aws/k3s/docs.md b/recipes/upstream/aws/k3s/docs.md index 21be1a6b..2826f4dc 100644 --- a/recipes/upstream/aws/k3s/docs.md +++ b/recipes/upstream/aws/k3s/docs.md @@ -45,6 +45,7 @@ | [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | | [create\_security\_group](#input\_create\_security\_group) | n/a | `any` | `null` | no | | [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | +| [create\_vpc](#input\_create\_vpc) | n/a | `any` | `null` | no | | [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | n/a | `any` | `null` | no | | [k3s\_channel](#input\_k3s\_channel) | K3s channel to use, the latest patch version for the provided minor version will be used | `string` | `null` | no | | [k3s\_config](#input\_k3s\_config) | Additional K3S configuration to add to the config.yaml file | `any` | `null` | no | diff --git a/recipes/upstream/aws/k3s/main.tf b/recipes/upstream/aws/k3s/main.tf index ad06514c..b6b50821 100644 --- a/recipes/upstream/aws/k3s/main.tf +++ b/recipes/upstream/aws/k3s/main.tf @@ -3,6 +3,7 @@ locals { ssh_key_pair_name = var.ssh_key_pair_name == null ? "tf-rancher-up-${var.prefix}" : var.ssh_key_pair_name local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + create_vpc = var.create_vpc == null ? false : true vpc_id = var.vpc_id == null ? module.k3s-first-server.vpc[0].id : var.vpc_id subnet_id = var.subnet_id == null ? module.k3s-first-server.subnet[0].id : var.subnet_id create_security_group = var.create_security_group == null ? false : true @@ -28,6 +29,7 @@ module "k3s-first-server" { # ssh_key_pair_name = var.ssh_key_pair_name # ssh_private_key_path = var.ssh_private_key_path # ssh_public_key_path = var.ssh_public_key_path + # create_vpc = var.create_vpc # vpc_id = var.vpc_id # subnet_id = var.subnet_id # create_security_group = var.create_security_group @@ -51,13 +53,13 @@ module "k3s-additional" { module "k3s-additional-servers" { source = "../../../../modules/infra/aws/ec2" - count = 0 prefix = "${var.prefix}-additional-server" aws_region = var.aws_region create_ssh_key_pair = local.create_ssh_key_pair ssh_key_pair_name = local.ssh_key_pair_name ssh_private_key_path = local.local_ssh_private_key_path ssh_public_key_path = local.local_ssh_public_key_path + create_vpc = local.create_vpc vpc_id = local.vpc_id subnet_id = local.subnet_id create_security_group = local.create_security_group @@ -72,13 +74,13 @@ module "k3s-additional-servers" { module "k3s-additional-workers" { source = "../../../../modules/infra/aws/ec2" - count = 0 prefix = "${var.prefix}-worker" aws_region = var.aws_region create_ssh_key_pair = local.create_ssh_key_pair ssh_key_pair_name = local.ssh_key_pair_name ssh_private_key_path = local.local_ssh_private_key_path ssh_public_key_path = local.local_ssh_public_key_path + create_vpc = local.create_vpc vpc_id = local.vpc_id subnet_id = local.subnet_id create_security_group = local.create_security_group diff --git a/recipes/upstream/aws/k3s/terraform.tfvars.example b/recipes/upstream/aws/k3s/terraform.tfvars.example index fee7a5f6..9fd47570 100644 --- a/recipes/upstream/aws/k3s/terraform.tfvars.example +++ b/recipes/upstream/aws/k3s/terraform.tfvars.example @@ -27,6 +27,9 @@ aws_region = "" ## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set # ssh_public_key_path = null +## - Specify whether VPC / Subnet should be created for the instances +# create_vpc = null + ## -- Range of private IPs available for the AWS VPC # vpc_ip_cidr_range = "10.0.0.0/16" diff --git a/recipes/upstream/aws/k3s/variables.tf b/recipes/upstream/aws/k3s/variables.tf index c5b932f7..d8c11e83 100644 --- a/recipes/upstream/aws/k3s/variables.tf +++ b/recipes/upstream/aws/k3s/variables.tf @@ -63,6 +63,10 @@ variable "ssh_public_key_path" { default = null } +variable "create_vpc" { + default = null +} + # variable "vpc_ip_cidr_range" {} variable "vpc_id" { diff --git a/recipes/upstream/aws/rke/main.tf b/recipes/upstream/aws/rke/main.tf index bde2537f..036effa1 100644 --- a/recipes/upstream/aws/rke/main.tf +++ b/recipes/upstream/aws/rke/main.tf @@ -6,6 +6,7 @@ module "aws-ec2-upstream-cluster" { # ssh_key_pair_name = var.ssh_key_pair_name # ssh_private_key_path = var.ssh_private_key_path # ssh_public_key_path = var.ssh_public_key_path + # create_vpc = var.create_vpc # vpc_id = var.vpc_id # subnet_id = var.subnet_id # create_security_group = var.create_security_group @@ -89,4 +90,4 @@ module "rancher_install" { rancher_additional_helm_values = [ "replicas: ${var.instance_count}" ] -} \ No newline at end of file +} diff --git a/recipes/upstream/aws/rke/terraform.tfvars.example b/recipes/upstream/aws/rke/terraform.tfvars.example index 7787da60..4a701c8f 100644 --- a/recipes/upstream/aws/rke/terraform.tfvars.example +++ b/recipes/upstream/aws/rke/terraform.tfvars.example @@ -27,6 +27,9 @@ aws_region = "" ## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set # ssh_public_key_path = null +## - Specify whether VPC / Subnet should be created for the instances +# create_vpc = null + ## -- Range of private IPs available for the AWS VPC # vpc_ip_cidr_range = "10.0.0.0/16" diff --git a/recipes/upstream/aws/rke/variables.tf b/recipes/upstream/aws/rke/variables.tf index 8b425798..a9513485 100644 --- a/recipes/upstream/aws/rke/variables.tf +++ b/recipes/upstream/aws/rke/variables.tf @@ -61,6 +61,8 @@ variable "ssh_private_key_path" { # variable "ssh_public_key_path" {} +# variable "create_vpc" {} + # variable "vpc_ip_cidr_range" {} # variable "vpc_id" {} diff --git a/recipes/upstream/aws/rke2/docs.md b/recipes/upstream/aws/rke2/docs.md index 3f128ba4..6cc9f7e4 100644 --- a/recipes/upstream/aws/rke2/docs.md +++ b/recipes/upstream/aws/rke2/docs.md @@ -44,6 +44,7 @@ | [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | | [create\_security\_group](#input\_create\_security\_group) | n/a | `any` | `null` | no | | [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | +| [create\_vpc](#input\_create\_vpc) | n/a | `any` | `null` | no | | [instance\_count](#input\_instance\_count) | n/a | `any` | n/a | yes | | [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | n/a | `any` | `null` | no | | [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | diff --git a/recipes/upstream/aws/rke2/main.tf b/recipes/upstream/aws/rke2/main.tf index fe336082..c0d78aff 100644 --- a/recipes/upstream/aws/rke2/main.tf +++ b/recipes/upstream/aws/rke2/main.tf @@ -3,6 +3,7 @@ locals { ssh_key_pair_name = var.ssh_key_pair_name == null ? "tf-rancher-up-${var.prefix}" : var.ssh_key_pair_name local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + create_vpc = var.create_vpc == null ? false : true vpc_id = var.vpc_id == null ? module.rke2-first-server.vpc[0].id : var.vpc_id subnet_id = var.subnet_id == null ? module.rke2-first-server.subnet[0].id : var.subnet_id create_security_group = var.create_security_group == null ? false : true @@ -27,6 +28,7 @@ module "rke2-first-server" { # ssh_key_pair_name = var.ssh_key_pair_name # ssh_private_key_path = var.ssh_private_key_path # ssh_public_key_path = var.ssh_public_key_path + # create_vpc = var.create_vpc # vpc_id = var.vpc_id # subnet_id = var.subnet_id # create_security_group = var.create_security_group @@ -49,13 +51,13 @@ module "rke2-additional" { module "rke2-additional-servers" { source = "../../../../modules/infra/aws/ec2" - count = 0 prefix = var.prefix aws_region = var.aws_region create_ssh_key_pair = local.create_ssh_key_pair ssh_key_pair_name = local.ssh_key_pair_name ssh_private_key_path = local.local_ssh_private_key_path ssh_public_key_path = local.local_ssh_public_key_path + create_vpc = local.create_vpc vpc_id = local.vpc_id subnet_id = local.subnet_id create_security_group = local.create_security_group diff --git a/recipes/upstream/aws/rke2/terraform.tfvars.example b/recipes/upstream/aws/rke2/terraform.tfvars.example index 3b85cf4e..6aa5b1f3 100644 --- a/recipes/upstream/aws/rke2/terraform.tfvars.example +++ b/recipes/upstream/aws/rke2/terraform.tfvars.example @@ -27,6 +27,9 @@ aws_region = "" ## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set # ssh_public_key_path = null +## - Specify whether VPC / Subnet should be created for the instances +# create_vpc = null + ## -- Range of private IPs available for the AWS VPC # vpc_ip_cidr_range = "10.0.0.0/16" diff --git a/recipes/upstream/aws/rke2/variables.tf b/recipes/upstream/aws/rke2/variables.tf index e4263217..c0607891 100644 --- a/recipes/upstream/aws/rke2/variables.tf +++ b/recipes/upstream/aws/rke2/variables.tf @@ -63,6 +63,10 @@ variable "ssh_public_key_path" { default = null } +variable "create_vpc" { + default = null +} + # variable "vpc_ip_cidr_range" {} variable "vpc_id" { From f43baff44c61e2b41ad7ddd3c143c0dc57f250ce Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Mon, 15 Jul 2024 21:00:58 +0200 Subject: [PATCH 16/35] Fixed RKE split-roles recipe --- recipes/rke/split-roles/aws/README.md | 4 ++-- recipes/rke/split-roles/aws/docs.md | 1 + recipes/rke/split-roles/aws/main.tf | 2 ++ recipes/rke/split-roles/aws/terraform.tfvars.example | 3 +++ recipes/rke/split-roles/aws/variables.tf | 4 ++++ 5 files changed, 12 insertions(+), 2 deletions(-) diff --git a/recipes/rke/split-roles/aws/README.md b/recipes/rke/split-roles/aws/README.md index a7a1f8b1..6de6add0 100644 --- a/recipes/rke/split-roles/aws/README.md +++ b/recipes/rke/split-roles/aws/README.md @@ -26,12 +26,12 @@ cd recipes/rke/split-roles/aws **NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** ```bash -terraform init -upgrade ; terraform apply -target=module.aws-ec2-upstream-master-nodes.tls_private_key.ssh_private_key -target=module.aws-ec2-upstream-master-nodes.local_file.private_key_pem -target=module.aws-ec2-upstream-master-nodes.local_file.public_key_pem -target=module.aws-ec2-upstream-master-nodes.aws_key_pair.key_pair -target=module.aws-ec2-upstream-master-nodes.aws_vpc.vpc -target=module.aws-ec2-upstream-master-nodes.aws_subnet.subnet -target=module.aws-ec2-upstream-master-nodes.aws_security_group.sg_allowall -auto-approve ; terraform apply -auto-approve +terraform init -upgrade && terraform apply -auto-approve ``` - Destroy the resources when finished ```bash -terraform destroy -target=module.rancher_install -auto-approve ; terraform destroy -auto-approve +terraform destroy -target=module.rancher_install -auto-approve && terraform destroy -auto-approve ``` See full argument list for each module in use: diff --git a/recipes/rke/split-roles/aws/docs.md b/recipes/rke/split-roles/aws/docs.md index 75c5341d..8119301a 100644 --- a/recipes/rke/split-roles/aws/docs.md +++ b/recipes/rke/split-roles/aws/docs.md @@ -39,6 +39,7 @@ | [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | | [create\_security\_group](#input\_create\_security\_group) | n/a | `any` | `null` | no | | [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | +| [create\_vpc](#input\_create\_vpc) | n/a | `any` | `null` | no | | [docker\_version](#input\_docker\_version) | Docker version to install on nodes | `string` | `"20.10"` | no | | [ingress\_provider](#input\_ingress\_provider) | Ingress controller provider | `string` | `"nginx"` | no | | [install\_docker](#input\_install\_docker) | Install Docker while creating the instances | `bool` | `true` | no | diff --git a/recipes/rke/split-roles/aws/main.tf b/recipes/rke/split-roles/aws/main.tf index 4c837074..1b02bbf5 100644 --- a/recipes/rke/split-roles/aws/main.tf +++ b/recipes/rke/split-roles/aws/main.tf @@ -3,6 +3,7 @@ locals { ssh_key_pair_name = var.ssh_key_pair_name == null ? "tf-rancher-up-${var.prefix}" : var.ssh_key_pair_name local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + create_vpc = var.create_vpc == null ? false : true vpc_id = var.vpc_id == null ? module.aws-ec2-upstream-master-nodes.vpc[0].id : var.vpc_id subnet_id = var.subnet_id == null ? module.aws-ec2-upstream-master-nodes.subnet[0].id : var.subnet_id create_security_group = var.create_security_group == null ? false : true @@ -32,6 +33,7 @@ module "aws-ec2-upstream-worker-nodes" { ssh_key_pair_name = local.ssh_key_pair_name ssh_private_key_path = local.local_ssh_private_key_path ssh_public_key_path = local.local_ssh_public_key_path + create_vpc = local.create_vpc vpc_id = local.vpc_id subnet_id = local.subnet_id create_security_group = local.create_security_group diff --git a/recipes/rke/split-roles/aws/terraform.tfvars.example b/recipes/rke/split-roles/aws/terraform.tfvars.example index a1847526..e4bc2f48 100644 --- a/recipes/rke/split-roles/aws/terraform.tfvars.example +++ b/recipes/rke/split-roles/aws/terraform.tfvars.example @@ -27,6 +27,9 @@ aws_region = "" ## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set # ssh_public_key_path = null +## - Specify whether VPC / Subnet should be created for the instances +# create_vpc = null + ## -- Range of private IPs available for the AWS VPC # vpc_ip_cidr_range = "10.0.0.0/16" diff --git a/recipes/rke/split-roles/aws/variables.tf b/recipes/rke/split-roles/aws/variables.tf index b3017b3a..28466f01 100644 --- a/recipes/rke/split-roles/aws/variables.tf +++ b/recipes/rke/split-roles/aws/variables.tf @@ -57,6 +57,10 @@ variable "ssh_public_key_path" { default = null } +variable "create_vpc" { + default = null +} + variable "vpc_id" { default = null } From fde837265cffb0ec5e4c7055e2ad193484b2dc81 Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Thu, 25 Jul 2024 19:19:06 +0200 Subject: [PATCH 17/35] One-destroy command - GKE x RKE,RKE2,K3s --- modules/infra/aws/ec2/docs.md | 6 +- modules/infra/aws/ec2/main.tf | 31 +-- recipes/standalone/aws/rke/README.md | 39 ++++ recipes/standalone/aws/rke/docs.md | 70 ++++++ recipes/standalone/aws/rke/main.tf | 98 +++++++++ recipes/standalone/aws/rke/outputs.tf | 17 ++ recipes/standalone/aws/rke/provider.tf | 36 ++++ .../aws/rke/terraform.tfvars.example | 99 +++++++++ recipes/standalone/aws/rke/user_data.tmpl | 9 + recipes/standalone/aws/rke/variables.tf | 203 ++++++++++++++++++ recipes/upstream/aws/k3s/README.md | 2 +- recipes/upstream/aws/k3s/docs.md | 19 +- recipes/upstream/aws/k3s/main.tf | 78 ++++--- recipes/upstream/aws/rke2/README.md | 2 +- recipes/upstream/aws/rke2/docs.md | 19 +- recipes/upstream/aws/rke2/main.tf | 70 +++--- recipes/upstream/aws/rke2/outputs.tf | 4 +- 17 files changed, 696 insertions(+), 106 deletions(-) create mode 100644 recipes/standalone/aws/rke/README.md create mode 100644 recipes/standalone/aws/rke/docs.md create mode 100644 recipes/standalone/aws/rke/main.tf create mode 100644 recipes/standalone/aws/rke/outputs.tf create mode 100644 recipes/standalone/aws/rke/provider.tf create mode 100644 recipes/standalone/aws/rke/terraform.tfvars.example create mode 100644 recipes/standalone/aws/rke/user_data.tmpl create mode 100644 recipes/standalone/aws/rke/variables.tf diff --git a/modules/infra/aws/ec2/docs.md b/modules/infra/aws/ec2/docs.md index 3d8aecbb..d31ae970 100644 --- a/modules/infra/aws/ec2/docs.md +++ b/modules/infra/aws/ec2/docs.md @@ -21,10 +21,10 @@ No modules. | Name | Type | |------|------| | [aws_instance.instance](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/instance) | resource | -| [aws_internet_gateway.internet-gateway](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/internet_gateway) | resource | +| [aws_internet_gateway.internet_gateway](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/internet_gateway) | resource | | [aws_key_pair.key_pair](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource | -| [aws_route_table.route-table](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table) | resource | -| [aws_route_table_association.rt-association](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table_association) | resource | +| [aws_route_table.route_table](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table) | resource | +| [aws_route_table_association.rt_association](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table_association) | resource | | [aws_security_group.sg_allowall](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | | [aws_subnet.subnet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/subnet) | resource | | [aws_vpc.vpc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc) | resource | diff --git a/modules/infra/aws/ec2/main.tf b/modules/infra/aws/ec2/main.tf index 53c1cbbd..5f5b6a6c 100644 --- a/modules/infra/aws/ec2/main.tf +++ b/modules/infra/aws/ec2/main.tf @@ -1,7 +1,6 @@ locals { - new_key_pair_path = var.ssh_private_key_path != null ? var.ssh_private_key_path : "${path.cwd}/${var.prefix}-ssh_private_key.pem" - private_ssh_key_path = fileexists("${path.cwd}/${var.prefix}-ssh_private_key.pem") ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path - public_ssh_key_path = fileexists("${path.cwd}/${var.prefix}-ssh_public_key.pem") ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + private_ssh_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path + public_ssh_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path } resource "tls_private_key" "ssh_private_key" { @@ -11,14 +10,14 @@ resource "tls_private_key" "ssh_private_key" { resource "local_file" "private_key_pem" { count = var.create_ssh_key_pair ? 1 : 0 - filename = local.new_key_pair_path + filename = local.private_ssh_key_path content = tls_private_key.ssh_private_key[0].private_key_openssh file_permission = "0600" } resource "local_file" "public_key_pem" { count = var.create_ssh_key_pair ? 1 : 0 - filename = var.ssh_public_key_path != null ? var.ssh_public_key_path : "${path.cwd}/${var.prefix}-ssh_public_key.pem" + filename = local.public_ssh_key_path content = tls_private_key.ssh_private_key[0].public_key_openssh file_permission = "0600" } @@ -39,6 +38,8 @@ resource "aws_vpc" "vpc" { } resource "aws_subnet" "subnet" { + depends_on = [resource.aws_route_table.route_table[0]] + count = var.create_vpc ? 1 : 0 availability_zone = data.aws_availability_zones.available.names[count.index] # cidr_block = var.subnet_ip_cidr_range[count.index] @@ -51,7 +52,7 @@ resource "aws_subnet" "subnet" { } } -resource "aws_internet_gateway" "internet-gateway" { +resource "aws_internet_gateway" "internet_gateway" { count = var.create_vpc ? 1 : 0 vpc_id = aws_vpc.vpc[0].id @@ -60,27 +61,25 @@ resource "aws_internet_gateway" "internet-gateway" { } } -resource "aws_route_table" "route-table" { +resource "aws_route_table" "route_table" { count = var.create_vpc ? 1 : 0 vpc_id = aws_vpc.vpc[0].id route { cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.internet-gateway[0].id + gateway_id = aws_internet_gateway.internet_gateway[0].id } } -resource "aws_route_table_association" "rt-association" { - count = var.create_vpc ? 1 : 0 - +resource "aws_route_table_association" "rt_association" { + count = var.create_vpc ? 1 : 0 subnet_id = var.subnet_id == null ? "${aws_subnet.subnet.*.id[0]}" : var.subnet_id - route_table_id = aws_route_table.route-table[0].id + route_table_id = aws_route_table.route_table[0].id } resource "aws_security_group" "sg_allowall" { - count = var.create_security_group == true ? 1 : 0 - vpc_id = aws_vpc.vpc[0].id - + count = var.create_security_group ? 1 : 0 + vpc_id = aws_vpc.vpc[0].id name = "${var.prefix}-allow-nodes" description = "Allow traffic for nodes in the cluster" @@ -130,6 +129,8 @@ resource "aws_security_group" "sg_allowall" { } resource "aws_instance" "instance" { + depends_on = [resource.aws_route_table_association.rt_association[0]] + count = var.instance_count ami = data.aws_ami.ubuntu.id instance_type = var.instance_type diff --git a/recipes/standalone/aws/rke/README.md b/recipes/standalone/aws/rke/README.md new file mode 100644 index 00000000..aec12520 --- /dev/null +++ b/recipes/standalone/aws/rke/README.md @@ -0,0 +1,39 @@ +# Upstream | AWS | EC2 x RKE + +This module is used to establish a Rancher (local) management cluster using [AWS EC2](https://aws.amazon.com/ec2/) and [RKE](https://rke.docs.rancher.com/). + +Documentation can be found [here](./docs.md). + +## Usage + +```bash +git clone https://github.com/rancherlabs/tf-rancher-up.git +cd recipes/upstream/aws/rke +``` + +- Copy `./terraform.tfvars.exmaple` to `./terraform.tfvars` +- Edit `./terraform.tfvars` + - Update the required variables: + - `prefix` to give the resources an identifiable name (eg, your initials or first name) + - `aws_region` to suit your region + - `instance_count` to specify the number of instances to create + - `ssh_username` to specify the user used to create the VMs (default "ubuntu") + - `rancher_hostname` in order to reach the Rancher console via DNS name + - `rancher_password` to configure the initial Admin password (the password must be at least 12 characters) +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../modules/infra/aws/README.md). + +**NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** + +```bash +terraform init -upgrade && terraform apply -auto-approve +``` + +- Destroy the resources when finished +```bash +terraform destroy -auto-approve +``` + +See full argument list for each module in use: + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - RKE: https://github.com/rancher/tf-rancher-up/tree/main/modules/distribution/rke + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/recipes/standalone/aws/rke/docs.md b/recipes/standalone/aws/rke/docs.md new file mode 100644 index 00000000..54845232 --- /dev/null +++ b/recipes/standalone/aws/rke/docs.md @@ -0,0 +1,70 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | +| [ssh](#requirement\_ssh) | 2.6.0 | + +## Providers + +| Name | Version | +|------|---------| +| [null](#provider\_null) | n/a | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [aws\_ec2\_upstream\_cluster](#module\_aws\_ec2\_upstream\_cluster) | ../../../../modules/infra/aws/ec2 | n/a | +| [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | +| [rke](#module\_rke) | ../../../../modules/distribution/rke | n/a | + +## Resources + +| Name | Type | +|------|------| +| [null_resource.wait_docker_startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.wait_k8s_services_startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | +| [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | +| [cert\_manager\_helm\_repository](#input\_cert\_manager\_helm\_repository) | Helm repository for Cert Manager chart | `string` | `null` | no | +| [cert\_manager\_helm\_repository\_password](#input\_cert\_manager\_helm\_repository\_password) | Private Cert Manager helm repository password | `string` | `null` | no | +| [cert\_manager\_helm\_repository\_username](#input\_cert\_manager\_helm\_repository\_username) | Private Cert Manager helm repository username | `string` | `null` | no | +| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | +| [create\_vpc](#input\_create\_vpc) | n/a | `any` | `null` | no | +| [docker\_version](#input\_docker\_version) | Docker version to install on nodes | `string` | `"20.10"` | no | +| [ingress\_provider](#input\_ingress\_provider) | Ingress controller provider | `string` | `"nginx"` | no | +| [install\_docker](#input\_install\_docker) | Install Docker while creating the instances | `bool` | `true` | no | +| [instance\_count](#input\_instance\_count) | n/a | `any` | n/a | yes | +| [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | +| [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | +| [prefix](#input\_prefix) | n/a | `any` | n/a | yes | +| [rancher\_helm\_repository](#input\_rancher\_helm\_repository) | Helm repository for Rancher chart | `string` | `null` | no | +| [rancher\_helm\_repository\_password](#input\_rancher\_helm\_repository\_password) | Private Rancher helm repository password | `string` | `null` | no | +| [rancher\_helm\_repository\_username](#input\_rancher\_helm\_repository\_username) | Private Rancher helm repository username | `string` | `null` | no | +| [rancher\_hostname](#input\_rancher\_hostname) | n/a | `any` | n/a | yes | +| [rancher\_password](#input\_rancher\_password) | n/a | `string` | n/a | yes | +| [rancher\_version](#input\_rancher\_version) | Rancher version to install | `string` | `null` | no | +| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | n/a | `any` | `null` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `any` | n/a | yes | +| [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | Waiting time (in seconds) | `number` | `120` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | +| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | +| [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | +| [rancher\_url](#output\_rancher\_url) | Rancher URL | diff --git a/recipes/standalone/aws/rke/main.tf b/recipes/standalone/aws/rke/main.tf new file mode 100644 index 00000000..23190d47 --- /dev/null +++ b/recipes/standalone/aws/rke/main.tf @@ -0,0 +1,98 @@ +locals { + create_ssh_key_pair = var.create_ssh_key_pair == null ? false : true + ssh_key_pair_name = var.ssh_key_pair_name == null ? "tf-rancher-up-${var.prefix}" : var.ssh_key_pair_name + local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path + local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + create_vpc = var.create_vpc == null ? false : true + kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd + kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" +} + +module "aws_ec2_upstream_cluster" { + source = "../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + # create_ssh_key_pair = var.create_ssh_key_pair + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path + # create_vpc = var.create_vpc + # vpc_id = var.vpc_id + # subnet_id = var.subnet_id + # create_security_group = var.create_security_group + instance_count = var.instance_count + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + # instance_security_group_id = var.instance_security_group_id + ssh_username = var.ssh_username + user_data = templatefile("${path.module}/user_data.tmpl", + { + install_docker = var.install_docker + username = var.ssh_username + docker_version = var.docker_version + } + ) + # bastion_host = var.bastion_host + # iam_instance_profile = var.iam_instance_profile + # tags = var.tags +} + +resource "null_resource" "wait_docker_startup" { + depends_on = [module.aws_ec2_upstream_cluster.instances_public_ip] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } +} + +module "rke" { + source = "../../../../modules/distribution/rke" + prefix = var.prefix + dependency = [null_resource.wait_docker_startup] + ssh_private_key_path = local.local_ssh_private_key_path + node_username = var.ssh_username + # kubernetes_version = var.kubernetes_version + + rancher_nodes = [for instance_ips in module.aws_ec2_upstream_cluster.instance_ips : + { + public_ip = instance_ips.public_ip, + private_ip = instance_ips.private_ip, + roles = ["etcd", "controlplane", "worker"], + ssh_key_path = local.local_ssh_private_key_path, + ssh_key = null, + hostname_override = null + } + ] +} + +resource "null_resource" "wait_k8s_services_startup" { + depends_on = [module.rke] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } +} + +locals { + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.aws_ec2_upstream_cluster.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.aws_ec2_upstream_cluster.instances_public_ip[0], "sslip.io"]) + +} + +module "rancher_install" { + source = "../../../../modules/rancher" + dependency = [null_resource.wait_k8s_services_startup] + kubeconfig_file = local.kc_file + rancher_hostname = local.rancher_hostname + rancher_bootstrap_password = var.rancher_password + rancher_password = var.rancher_password + bootstrap_rancher = var.bootstrap_rancher + rancher_version = var.rancher_version + rancher_helm_repository = var.rancher_helm_repository + rancher_helm_repository_username = var.rancher_helm_repository_username + rancher_helm_repository_password = var.rancher_helm_repository_password + cert_manager_helm_repository = var.cert_manager_helm_repository + cert_manager_helm_repository_username = var.cert_manager_helm_repository_username + cert_manager_helm_repository_password = var.cert_manager_helm_repository_password + rancher_additional_helm_values = [ + "replicas: ${var.instance_count}" + ] +} diff --git a/recipes/standalone/aws/rke/outputs.tf b/recipes/standalone/aws/rke/outputs.tf new file mode 100644 index 00000000..222c31b2 --- /dev/null +++ b/recipes/standalone/aws/rke/outputs.tf @@ -0,0 +1,17 @@ +output "instances_public_ip" { + value = module.aws_ec2_upstream_cluster.instances_public_ip +} + +output "instances_private_ip" { + value = module.aws_ec2_upstream_cluster.instances_private_ip +} + +output "rancher_url" { + description = "Rancher URL" + value = "https://${module.rancher_install.rancher_hostname}" +} + +output "rancher_password" { + description = "Rancher Initial Custom Password" + value = var.rancher_password +} diff --git a/recipes/standalone/aws/rke/provider.tf b/recipes/standalone/aws/rke/provider.tf new file mode 100644 index 00000000..8e915083 --- /dev/null +++ b/recipes/standalone/aws/rke/provider.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/recipes/standalone/aws/rke/terraform.tfvars.example b/recipes/standalone/aws/rke/terraform.tfvars.example new file mode 100644 index 00000000..4a701c8f --- /dev/null +++ b/recipes/standalone/aws/rke/terraform.tfvars.example @@ -0,0 +1,99 @@ +###### !! Required variables !! ###### + +## -- The prefix used in front of all AWS resources +prefix = "" + +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = + +## -- AWS Region to create the resources +aws_region = "" + +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html + +## -- Specify if a new SSH key pair needs to be created for the instances +# create_ssh_key_pair = true + +## -- If you want to use an existing key pair, specify its name +# ssh_key_pair_name = null + +#Ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html + +## -- The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_private_key_path = null + +## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_public_key_path = null + +## - Specify whether VPC / Subnet should be created for the instances +# create_vpc = null + +## -- Range of private IPs available for the AWS VPC +# vpc_ip_cidr_range = "10.0.0.0/16" + +## -- AWS VPC used for all resources +# vpc_id = null + +## -- AWS Subnet used for all resources +# subnet_id = null + +## -- AWS Security Group used for all resources +# create_security_group = null + +#Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html + +## -- The number of nodes +instance_count = 3 + +## -- The name of a AWS EC2 machine type +# instance_type = "t3.medium" + +## -- Use spot instances +# spot_instances = false + +## -- Size of the disk attached to each node, specified in GB +# instance_disk_size = 80 + +## -- If the Security Group was created manually, it can be entered here +# instance_security_group_id = null + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" + +## -- Script that will run when the VMs start +# user_data = "" + +## -- Bastion host configuration to access the instances +# bastion_host = null + +## -- IAM Instance Profile to assign to the instances/nodes +# iam_instance_profile = null + +## -- User-provided tags for the resources +# tags = {} + +## -- Install Docker while creating the instance +# install_docker = true + +## -- Docker version to install on nodes +# docker_version = "20.10" + +## -- Waiting time (in seconds) +# waiting_time = 180 + +## -- Override the default k8s version used by RKE +# kubernetes_version = "v1.24.10-rancher4-1" + +## -- K8s Ingress Controller +# ingress_provider = nginx + +## -- Hostname to set when installing Rancher +# rancher_hostname = "rancher" + +## -- Password to set when installing Rancher +rancher_password = "at-least-12-characters" + +## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository +# rancher_version = "2.7.3" diff --git a/recipes/standalone/aws/rke/user_data.tmpl b/recipes/standalone/aws/rke/user_data.tmpl new file mode 100644 index 00000000..e794cfa6 --- /dev/null +++ b/recipes/standalone/aws/rke/user_data.tmpl @@ -0,0 +1,9 @@ +#!/bin/bash + +if [ ${install_docker} = true ] + then + echo 'Installing Docker' + export DEBIAN_FRONTEND=noninteractive + curl -sSL https://releases.rancher.com/install-docker/${docker_version}.sh | sh - + sudo usermod -aG docker ${username} +fi diff --git a/recipes/standalone/aws/rke/variables.tf b/recipes/standalone/aws/rke/variables.tf new file mode 100644 index 00000000..c8465793 --- /dev/null +++ b/recipes/standalone/aws/rke/variables.tf @@ -0,0 +1,203 @@ +variable "prefix" {} + +# variable "aws_access_key" {} + +# variable "aws_secret_key" {} + +# variable "aws_session_token" {} + +variable "aws_region" { + type = string + description = "AWS region used for all resources" + default = "us-east-1" + + validation { + condition = contains([ + "us-east-2", + "us-east-1", + "us-west-1", + "us-west-2", + "af-south-1", + "ap-east-1", + "ap-south-2", + "ap-southeast-3", + "ap-southeast-4", + "ap-south-1", + "ap-northeast-3", + "ap-northeast-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ca-central-1", + "ca-west-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-south-1", + "eu-west-3", + "eu-south-2", + "eu-north-1", + "eu-central-2", + "il-central-1", + "me-south-1", + "me-central-1", + "sa-east-1", + ], var.aws_region) + error_message = "Invalid Region specified!" + } +} + +variable "create_ssh_key_pair" { + default = null +} + +variable "ssh_key_pair_name" { + default = null +} + +variable "ssh_private_key_path" { + default = null +} + +variable "ssh_public_key_path" { + default = null +} + +variable "create_vpc" { + default = null +} + +# variable "vpc_ip_cidr_range" {} + +# variable "vpc_id" {} + +# variable "subnet_id" {} + +# variable "create_security_group" {} + +variable "instance_count" {} + +# variable "instance_type" {} + +# variable "spot_instances" {} + +# variable "instance_disk_size" {} + +# variable "instance_security_group_id" {} + +variable "ssh_username" {} + +variable "user_data" { + description = "User data content for EC2 instance(s)" + default = null +} + +#variable "bastion_host" { +# type = object({ +# address = string +# user = string +# ssh_key = string +# ssh_key_path = string +# }) +# default = null +# description = "Bastion host configuration to access the instances" +#} + +# variable "iam_instance_profile" {} + +# variable "tags" {} + +variable "install_docker" { + type = bool + description = "Install Docker while creating the instances" + default = true +} + +variable "docker_version" { + type = string + description = "Docker version to install on nodes" + default = "20.10" +} + +variable "waiting_time" { + description = "Waiting time (in seconds)" + default = 120 +} + +# variable "kubernetes_version" {} + +variable "ingress_provider" { + description = "Ingress controller provider" + default = "nginx" +} + +variable "bootstrap_rancher" { + description = "Bootstrap the Rancher installation" + type = bool + default = true +} + +variable "rancher_hostname" {} + +variable "kube_config_path" { + description = "The path to write the kubeconfig for the RKE cluster" + type = string + default = null +} + +variable "kube_config_filename" { + description = "Filename to write the kube config" + type = string + default = null +} + +variable "rancher_password" { + type = string + + validation { + condition = length(var.rancher_password) >= 12 + error_message = "The password must be at least 12 characters." + } +} + +variable "rancher_version" { + description = "Rancher version to install" + type = string + default = null +} + +variable "rancher_helm_repository" { + description = "Helm repository for Rancher chart" + default = null + type = string +} + +variable "rancher_helm_repository_username" { + description = "Private Rancher helm repository username" + default = null + type = string +} + +variable "rancher_helm_repository_password" { + description = "Private Rancher helm repository password" + default = null + type = string +} + +variable "cert_manager_helm_repository" { + description = "Helm repository for Cert Manager chart" + default = null + type = string +} + +variable "cert_manager_helm_repository_username" { + description = "Private Cert Manager helm repository username" + default = null + type = string +} + +variable "cert_manager_helm_repository_password" { + description = "Private Cert Manager helm repository password" + default = null + type = string +} diff --git a/recipes/upstream/aws/k3s/README.md b/recipes/upstream/aws/k3s/README.md index 3476c12a..2161f73a 100644 --- a/recipes/upstream/aws/k3s/README.md +++ b/recipes/upstream/aws/k3s/README.md @@ -31,7 +31,7 @@ terraform init -upgrade && terraform apply -auto-approve - Destroy the resources when finished ```bash -terraform state rm module.rancher_install && terraform destroy -auto-approve +terraform destroy -auto-approve ``` See full argument list for each module in use: diff --git a/recipes/upstream/aws/k3s/docs.md b/recipes/upstream/aws/k3s/docs.md index 2826f4dc..2132e2f9 100644 --- a/recipes/upstream/aws/k3s/docs.md +++ b/recipes/upstream/aws/k3s/docs.md @@ -20,22 +20,21 @@ | Name | Source | Version | |------|--------|---------| -| [k3s-additional](#module\_k3s-additional) | ../../../../modules/distribution/k3s | n/a | -| [k3s-additional-servers](#module\_k3s-additional-servers) | ../../../../modules/infra/aws/ec2 | n/a | -| [k3s-additional-workers](#module\_k3s-additional-workers) | ../../../../modules/infra/aws/ec2 | n/a | -| [k3s-first](#module\_k3s-first) | ../../../../modules/distribution/k3s | n/a | -| [k3s-first-server](#module\_k3s-first-server) | ../../../../modules/infra/aws/ec2 | n/a | +| [k3s\_additional](#module\_k3s\_additional) | ../../../../modules/distribution/k3s | n/a | +| [k3s\_additional\_servers](#module\_k3s\_additional\_servers) | ../../../../modules/infra/aws/ec2 | n/a | +| [k3s\_additional\_workers](#module\_k3s\_additional\_workers) | ../../../../modules/infra/aws/ec2 | n/a | +| [k3s\_first](#module\_k3s\_first) | ../../../../modules/distribution/k3s | n/a | +| [k3s\_first\_server](#module\_k3s\_first\_server) | ../../../../modules/infra/aws/ec2 | n/a | | [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | ## Resources | Name | Type | |------|------| -| [local_file.kube-config-yaml](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [local_file.kube-config-yaml-backup](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [null_resource.wait-k8s-services-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [ssh_resource.retrieve-kubeconfig](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource | -| [local_file.ssh-private-key](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source | +| [local_file.kube_config_yaml](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [null_resource.wait_k8s_services_startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [ssh_resource.retrieve_kubeconfig](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource | +| [local_file.ssh_private_key](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source | ## Inputs diff --git a/recipes/upstream/aws/k3s/main.tf b/recipes/upstream/aws/k3s/main.tf index b6b50821..b75e839f 100644 --- a/recipes/upstream/aws/k3s/main.tf +++ b/recipes/upstream/aws/k3s/main.tf @@ -4,16 +4,16 @@ locals { local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path create_vpc = var.create_vpc == null ? false : true - vpc_id = var.vpc_id == null ? module.k3s-first-server.vpc[0].id : var.vpc_id - subnet_id = var.subnet_id == null ? module.k3s-first-server.subnet[0].id : var.subnet_id + vpc_id = var.vpc_id == null ? module.k3s_first_server.vpc[0].id : var.vpc_id + subnet_id = var.subnet_id == null ? module.k3s_first_server.subnet[0].id : var.subnet_id create_security_group = var.create_security_group == null ? false : true - instance_security_group_id = local.create_security_group == "true" ? null : module.k3s-first-server.security_group[0].id + instance_security_group_id = local.create_security_group == "true" ? null : module.k3s_first_server.security_group[0].id kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" kc_file_backup = "${local.kc_file}.backup" } -module "k3s-first" { +module "k3s_first" { source = "../../../../modules/distribution/k3s" k3s_token = var.k3s_token k3s_version = var.k3s_version @@ -21,14 +21,14 @@ module "k3s-first" { k3s_config = var.k3s_config } -module "k3s-first-server" { +module "k3s_first_server" { source = "../../../../modules/infra/aws/ec2" prefix = var.prefix aws_region = var.aws_region # create_ssh_key_pair = var.create_ssh_key_pair - # ssh_key_pair_name = var.ssh_key_pair_name - # ssh_private_key_path = var.ssh_private_key_path - # ssh_public_key_path = var.ssh_public_key_path + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path # create_vpc = var.create_vpc # vpc_id = var.vpc_id # subnet_id = var.subnet_id @@ -39,19 +39,19 @@ module "k3s-first-server" { # instance_disk_size = var.instance_disk_size # instance_security_group_id = var.instance_security_group_id ssh_username = var.ssh_username - user_data = module.k3s-first.k3s_server_user_data + user_data = module.k3s_first.k3s_server_user_data } -module "k3s-additional" { +module "k3s_additional" { source = "../../../../modules/distribution/k3s" - k3s_token = module.k3s-first.k3s_token + k3s_token = module.k3s_first.k3s_token k3s_version = var.k3s_version k3s_channel = var.k3s_channel k3s_config = var.k3s_config - first_server_ip = module.k3s-first-server.instances_private_ip[0] + first_server_ip = module.k3s_first_server.instances_private_ip[0] } -module "k3s-additional-servers" { +module "k3s_additional_servers" { source = "../../../../modules/infra/aws/ec2" prefix = "${var.prefix}-additional-server" aws_region = var.aws_region @@ -69,10 +69,10 @@ module "k3s-additional-servers" { # instance_disk_size = var.instance_disk_size instance_security_group_id = local.instance_security_group_id ssh_username = var.ssh_username - user_data = module.k3s-additional.k3s_server_user_data + user_data = module.k3s_additional.k3s_server_user_data } -module "k3s-additional-workers" { +module "k3s_additional_workers" { source = "../../../../modules/infra/aws/ec2" prefix = "${var.prefix}-worker" aws_region = var.aws_region @@ -90,51 +90,61 @@ module "k3s-additional-workers" { # instance_disk_size = var.instance_disk_size instance_security_group_id = local.instance_security_group_id ssh_username = var.ssh_username - user_data = module.k3s-additional.k3s_worker_user_data + user_data = module.k3s_additional.k3s_worker_user_data } -data "local_file" "ssh-private-key" { - depends_on = [module.k3s-additional-workers] - filename = local.local_ssh_private_key_path +data "local_file" "ssh_private_key" { + depends_on = [module.k3s_additional_workers] + + filename = local.local_ssh_private_key_path } -resource "ssh_resource" "retrieve-kubeconfig" { - host = module.k3s-first-server.instances_public_ip[0] +resource "ssh_resource" "retrieve_kubeconfig" { + depends_on = [data.local_file.ssh_private_key] + + host = module.k3s_first_server.instances_public_ip[0] commands = [ - "sudo sed 's/127.0.0.1/${module.k3s-first-server.instances_public_ip[0]}/g' /etc/rancher/k3s/k3s.yaml" + "sudo sed 's/127.0.0.1/${module.k3s_first_server.instances_public_ip[0]}/g' /etc/rancher/k3s/k3s.yaml" ] user = var.ssh_username - private_key = data.local_file.ssh-private-key.content + private_key = data.local_file.ssh_private_key.content retry_delay = "60s" } -resource "local_file" "kube-config-yaml" { +resource "local_file" "kube_config_yaml" { + depends_on = [ssh_resource.retrieve_kubeconfig] + filename = local.kc_file file_permission = "0600" - content = ssh_resource.retrieve-kubeconfig.result + content = ssh_resource.retrieve_kubeconfig.result } -resource "local_file" "kube-config-yaml-backup" { - filename = local.kc_file_backup - file_permission = "0600" - content = ssh_resource.retrieve-kubeconfig.result +provider "kubernetes" { + config_path = local_file.kube_config_yaml.filename } -resource "null_resource" "wait-k8s-services-startup" { - depends_on = [local_file.kube-config-yaml] +provider "helm" { + kubernetes { + config_path = local_file.kube_config_yaml.filename + } +} + +resource "null_resource" "wait_k8s_services_startup" { + depends_on = [local_file.kube_config_yaml] + provisioner "local-exec" { command = "sleep ${var.waiting_time}" } } locals { - rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.k3s-first-server.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.k3s-first-server.instances_public_ip[0], "sslip.io"]) + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.k3s_first_server.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.k3s_first_server.instances_public_ip[0], "sslip.io"]) } module "rancher_install" { source = "../../../../modules/rancher" - dependency = [null_resource.wait-k8s-services-startup] - kubeconfig_file = local.kc_file + dependency = [null_resource.wait_k8s_services_startup] + kubeconfig_file = local_file.kube_config_yaml.filename rancher_hostname = local.rancher_hostname rancher_bootstrap_password = var.rancher_password rancher_password = var.rancher_password diff --git a/recipes/upstream/aws/rke2/README.md b/recipes/upstream/aws/rke2/README.md index a35b3f9b..ccfe949e 100644 --- a/recipes/upstream/aws/rke2/README.md +++ b/recipes/upstream/aws/rke2/README.md @@ -30,7 +30,7 @@ terraform init -upgrade && terraform apply -auto-approve - Destroy the resources when finished ```bash -terraform destroy -target=module.rancher_install -auto-approve && terraform destroy -auto-approve +terraform destroy -auto-approve ``` See full argument list for each module in use: diff --git a/recipes/upstream/aws/rke2/docs.md b/recipes/upstream/aws/rke2/docs.md index 6cc9f7e4..42af12a9 100644 --- a/recipes/upstream/aws/rke2/docs.md +++ b/recipes/upstream/aws/rke2/docs.md @@ -12,8 +12,8 @@ | Name | Version | |------|---------| -| [local](#provider\_local) | n/a | -| [null](#provider\_null) | n/a | +| [local](#provider\_local) | 2.5.1 | +| [null](#provider\_null) | 3.2.2 | | [ssh](#provider\_ssh) | 2.6.0 | ## Modules @@ -21,19 +21,18 @@ | Name | Source | Version | |------|--------|---------| | [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | -| [rke2-additional](#module\_rke2-additional) | ../../../../modules/distribution/rke2 | n/a | -| [rke2-additional-servers](#module\_rke2-additional-servers) | ../../../../modules/infra/aws/ec2 | n/a | -| [rke2-first](#module\_rke2-first) | ../../../../modules/distribution/rke2 | n/a | -| [rke2-first-server](#module\_rke2-first-server) | ../../../../modules/infra/aws/ec2 | n/a | +| [rke2\_additional](#module\_rke2\_additional) | ../../../../modules/distribution/rke2 | n/a | +| [rke2\_additional\_servers](#module\_rke2\_additional\_servers) | ../../../../modules/infra/aws/ec2 | n/a | +| [rke2\_first](#module\_rke2\_first) | ../../../../modules/distribution/rke2 | n/a | +| [rke2\_first\_server](#module\_rke2\_first\_server) | ../../../../modules/infra/aws/ec2 | n/a | ## Resources | Name | Type | |------|------| -| [local_file.kube-config-yaml](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [local_file.kube-config-yaml-backup](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [null_resource.wait-k8s-services-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [ssh_resource.retrieve-kubeconfig](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource | +| [local_file.kube_config_yaml](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [null_resource.wait_k8s_services_startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [ssh_resource.retrieve_kubeconfig](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource | | [local_file.ssh_private_key](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source | ## Inputs diff --git a/recipes/upstream/aws/rke2/main.tf b/recipes/upstream/aws/rke2/main.tf index c0d78aff..438745ff 100644 --- a/recipes/upstream/aws/rke2/main.tf +++ b/recipes/upstream/aws/rke2/main.tf @@ -4,30 +4,30 @@ locals { local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path create_vpc = var.create_vpc == null ? false : true - vpc_id = var.vpc_id == null ? module.rke2-first-server.vpc[0].id : var.vpc_id - subnet_id = var.subnet_id == null ? module.rke2-first-server.subnet[0].id : var.subnet_id + vpc_id = var.vpc_id == null ? module.rke2_first_server.vpc[0].id : var.vpc_id + subnet_id = var.subnet_id == null ? module.rke2_first_server.subnet[0].id : var.subnet_id create_security_group = var.create_security_group == null ? false : true - instance_security_group_id = local.create_security_group == "true" ? null : module.rke2-first-server.security_group[0].id + instance_security_group_id = local.create_security_group == "true" ? null : module.rke2_first_server.security_group[0].id kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" kc_file_backup = "${local.kc_file}.backup" } -module "rke2-first" { +module "rke2_first" { source = "../../../../modules/distribution/rke2" rke2_token = var.rke2_token rke2_version = var.rke2_version rke2_config = var.rke2_config } -module "rke2-first-server" { +module "rke2_first_server" { source = "../../../../modules/infra/aws/ec2" prefix = var.prefix aws_region = var.aws_region # create_ssh_key_pair = var.create_ssh_key_pair - # ssh_key_pair_name = var.ssh_key_pair_name - # ssh_private_key_path = var.ssh_private_key_path - # ssh_public_key_path = var.ssh_public_key_path + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path # create_vpc = var.create_vpc # vpc_id = var.vpc_id # subnet_id = var.subnet_id @@ -38,18 +38,18 @@ module "rke2-first-server" { # instance_disk_size = var.instance_disk_size # instance_security_group_id = var.instance_security_group_id ssh_username = var.ssh_username - user_data = module.rke2-first.rke2_user_data + user_data = module.rke2_first.rke2_user_data } -module "rke2-additional" { +module "rke2_additional" { source = "../../../../modules/distribution/rke2" - rke2_token = module.rke2-first.rke2_token + rke2_token = module.rke2_first.rke2_token rke2_version = var.rke2_version rke2_config = var.rke2_config - first_server_ip = module.rke2-first-server.instances_private_ip[0] + first_server_ip = module.rke2_first_server.instances_private_ip[0] } -module "rke2-additional-servers" { +module "rke2_additional_servers" { source = "../../../../modules/infra/aws/ec2" prefix = var.prefix aws_region = var.aws_region @@ -67,50 +67,60 @@ module "rke2-additional-servers" { # instance_disk_size = var.instance_disk_size instance_security_group_id = local.instance_security_group_id ssh_username = var.ssh_username - user_data = module.rke2-additional.rke2_user_data + user_data = module.rke2_additional.rke2_user_data } data "local_file" "ssh_private_key" { - depends_on = [module.rke2-first-server] - filename = local.local_ssh_private_key_path + depends_on = [module.rke2_additional_servers] + + filename = local.local_ssh_private_key_path } -resource "ssh_resource" "retrieve-kubeconfig" { - host = module.rke2-first-server.instances_public_ip[0] +resource "ssh_resource" "retrieve_kubeconfig" { + depends_on = [data.local_file.ssh_private_key] + + host = module.rke2_first_server.instances_public_ip[0] commands = [ - "sudo sed 's/127.0.0.1/${module.rke2-first-server.instances_public_ip[0]}/g' /etc/rancher/rke2/rke2.yaml" + "sudo sed 's/127.0.0.1/${module.rke2_first_server.instances_public_ip[0]}/g' /etc/rancher/rke2/rke2.yaml" ] user = var.ssh_username private_key = data.local_file.ssh_private_key.content } -resource "local_file" "kube-config-yaml" { +resource "local_file" "kube_config_yaml" { + depends_on = [ssh_resource.retrieve_kubeconfig] + filename = local.kc_file file_permission = "0600" - content = ssh_resource.retrieve-kubeconfig.result + content = ssh_resource.retrieve_kubeconfig.result } -resource "local_file" "kube-config-yaml-backup" { - filename = local.kc_file_backup - file_permission = "0600" - content = ssh_resource.retrieve-kubeconfig.result +provider "kubernetes" { + config_path = local_file.kube_config_yaml.filename } -resource "null_resource" "wait-k8s-services-startup" { - depends_on = [module.rke2-additional-servers] +provider "helm" { + kubernetes { + config_path = local_file.kube_config_yaml.filename + } +} + +resource "null_resource" "wait_k8s_services_startup" { + depends_on = [local_file.kube_config_yaml] + provisioner "local-exec" { command = "sleep ${var.waiting_time}" } } locals { - rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.rke2-first-server.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.rke2-first-server.instances_public_ip[0], "sslip.io"]) + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.rke2_first_server.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.rke2_first_server.instances_public_ip[0], "sslip.io"]) } module "rancher_install" { source = "../../../../modules/rancher" - dependency = [null_resource.wait-k8s-services-startup] - kubeconfig_file = local.kc_file + dependency = [null_resource.wait_k8s_services_startup] + kubeconfig_file = local_file.kube_config_yaml.filename rancher_hostname = local.rancher_hostname rancher_bootstrap_password = var.rancher_password rancher_password = var.rancher_password diff --git a/recipes/upstream/aws/rke2/outputs.tf b/recipes/upstream/aws/rke2/outputs.tf index 34a6f90d..34d05284 100644 --- a/recipes/upstream/aws/rke2/outputs.tf +++ b/recipes/upstream/aws/rke2/outputs.tf @@ -1,11 +1,11 @@ # Uncomment for debugging purposes #output "rke2_first_server_config_file" { -# value = nonsensitive(module.rke2-first.rke2_user_data) +# value = nonsensitive(module.rke2_first.rke2_user_data) #} # Uncomment for debugging purposes #output "rke2_additional_servers_config_file" { -# value = nonsensitive(module.rke2-additional.rke2_user_data) +# value = nonsensitive(module.rke2_additional.rke2_user_data) #} output "rancher_url" { From 0dd6d5b6f00264a7654163e90f13882441a8ac80 Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Thu, 25 Jul 2024 23:46:07 +0200 Subject: [PATCH 18/35] Revised standalone RKE module for AWS --- recipes/standalone/aws/rke/README.md | 2 +- recipes/standalone/aws/rke/docs.md | 15 +- recipes/standalone/aws/rke/main.tf | 25 ---- recipes/standalone/aws/rke/outputs.tf | 10 +- recipes/standalone/aws/rke/variables.tf | 64 --------- recipes/upstream/aws/rke/README.md | 2 +- recipes/upstream/aws/rke/docs.md | 44 ++++-- recipes/upstream/aws/rke/main.tf | 67 ++------- recipes/upstream/aws/rke/outputs.tf | 4 +- recipes/upstream/aws/rke/variables.tf | 175 ++++++++++++++++++------ 10 files changed, 185 insertions(+), 223 deletions(-) diff --git a/recipes/standalone/aws/rke/README.md b/recipes/standalone/aws/rke/README.md index aec12520..1ba56645 100644 --- a/recipes/standalone/aws/rke/README.md +++ b/recipes/standalone/aws/rke/README.md @@ -1,4 +1,4 @@ -# Upstream | AWS | EC2 x RKE +# Upstream | AWS standalone | EC2 x RKE This module is used to establish a Rancher (local) management cluster using [AWS EC2](https://aws.amazon.com/ec2/) and [RKE](https://rke.docs.rancher.com/). diff --git a/recipes/standalone/aws/rke/docs.md b/recipes/standalone/aws/rke/docs.md index 54845232..4ad01afa 100644 --- a/recipes/standalone/aws/rke/docs.md +++ b/recipes/standalone/aws/rke/docs.md @@ -19,7 +19,6 @@ | Name | Source | Version | |------|--------|---------| | [aws\_ec2\_upstream\_cluster](#module\_aws\_ec2\_upstream\_cluster) | ../../../../modules/infra/aws/ec2 | n/a | -| [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | | [rke](#module\_rke) | ../../../../modules/distribution/rke | n/a | ## Resources @@ -34,25 +33,14 @@ | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | -| [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | -| [cert\_manager\_helm\_repository](#input\_cert\_manager\_helm\_repository) | Helm repository for Cert Manager chart | `string` | `null` | no | -| [cert\_manager\_helm\_repository\_password](#input\_cert\_manager\_helm\_repository\_password) | Private Cert Manager helm repository password | `string` | `null` | no | -| [cert\_manager\_helm\_repository\_username](#input\_cert\_manager\_helm\_repository\_username) | Private Cert Manager helm repository username | `string` | `null` | no | | [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | | [create\_vpc](#input\_create\_vpc) | n/a | `any` | `null` | no | | [docker\_version](#input\_docker\_version) | Docker version to install on nodes | `string` | `"20.10"` | no | -| [ingress\_provider](#input\_ingress\_provider) | Ingress controller provider | `string` | `"nginx"` | no | | [install\_docker](#input\_install\_docker) | Install Docker while creating the instances | `bool` | `true` | no | | [instance\_count](#input\_instance\_count) | n/a | `any` | n/a | yes | | [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | | [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | | [prefix](#input\_prefix) | n/a | `any` | n/a | yes | -| [rancher\_helm\_repository](#input\_rancher\_helm\_repository) | Helm repository for Rancher chart | `string` | `null` | no | -| [rancher\_helm\_repository\_password](#input\_rancher\_helm\_repository\_password) | Private Rancher helm repository password | `string` | `null` | no | -| [rancher\_helm\_repository\_username](#input\_rancher\_helm\_repository\_username) | Private Rancher helm repository username | `string` | `null` | no | -| [rancher\_hostname](#input\_rancher\_hostname) | n/a | `any` | n/a | yes | -| [rancher\_password](#input\_rancher\_password) | n/a | `string` | n/a | yes | -| [rancher\_version](#input\_rancher\_version) | Rancher version to install | `string` | `null` | no | | [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | n/a | `any` | `null` | no | | [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | | [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | n/a | `any` | `null` | no | @@ -66,5 +54,4 @@ |------|-------------| | [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | | [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | -| [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | -| [rancher\_url](#output\_rancher\_url) | Rancher URL | +| [kube\_config\_path](#output\_kube\_config\_path) | n/a | diff --git a/recipes/standalone/aws/rke/main.tf b/recipes/standalone/aws/rke/main.tf index 23190d47..4d7dea06 100644 --- a/recipes/standalone/aws/rke/main.tf +++ b/recipes/standalone/aws/rke/main.tf @@ -71,28 +71,3 @@ resource "null_resource" "wait_k8s_services_startup" { command = "sleep ${var.waiting_time}" } } - -locals { - rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.aws_ec2_upstream_cluster.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.aws_ec2_upstream_cluster.instances_public_ip[0], "sslip.io"]) - -} - -module "rancher_install" { - source = "../../../../modules/rancher" - dependency = [null_resource.wait_k8s_services_startup] - kubeconfig_file = local.kc_file - rancher_hostname = local.rancher_hostname - rancher_bootstrap_password = var.rancher_password - rancher_password = var.rancher_password - bootstrap_rancher = var.bootstrap_rancher - rancher_version = var.rancher_version - rancher_helm_repository = var.rancher_helm_repository - rancher_helm_repository_username = var.rancher_helm_repository_username - rancher_helm_repository_password = var.rancher_helm_repository_password - cert_manager_helm_repository = var.cert_manager_helm_repository - cert_manager_helm_repository_username = var.cert_manager_helm_repository_username - cert_manager_helm_repository_password = var.cert_manager_helm_repository_password - rancher_additional_helm_values = [ - "replicas: ${var.instance_count}" - ] -} diff --git a/recipes/standalone/aws/rke/outputs.tf b/recipes/standalone/aws/rke/outputs.tf index 222c31b2..3ce0f7d3 100644 --- a/recipes/standalone/aws/rke/outputs.tf +++ b/recipes/standalone/aws/rke/outputs.tf @@ -6,12 +6,6 @@ output "instances_private_ip" { value = module.aws_ec2_upstream_cluster.instances_private_ip } -output "rancher_url" { - description = "Rancher URL" - value = "https://${module.rancher_install.rancher_hostname}" -} - -output "rancher_password" { - description = "Rancher Initial Custom Password" - value = var.rancher_password +output "kube_config_path" { + value = local.kc_file } diff --git a/recipes/standalone/aws/rke/variables.tf b/recipes/standalone/aws/rke/variables.tf index c8465793..3d7afebd 100644 --- a/recipes/standalone/aws/rke/variables.tf +++ b/recipes/standalone/aws/rke/variables.tf @@ -126,19 +126,6 @@ variable "waiting_time" { # variable "kubernetes_version" {} -variable "ingress_provider" { - description = "Ingress controller provider" - default = "nginx" -} - -variable "bootstrap_rancher" { - description = "Bootstrap the Rancher installation" - type = bool - default = true -} - -variable "rancher_hostname" {} - variable "kube_config_path" { description = "The path to write the kubeconfig for the RKE cluster" type = string @@ -150,54 +137,3 @@ variable "kube_config_filename" { type = string default = null } - -variable "rancher_password" { - type = string - - validation { - condition = length(var.rancher_password) >= 12 - error_message = "The password must be at least 12 characters." - } -} - -variable "rancher_version" { - description = "Rancher version to install" - type = string - default = null -} - -variable "rancher_helm_repository" { - description = "Helm repository for Rancher chart" - default = null - type = string -} - -variable "rancher_helm_repository_username" { - description = "Private Rancher helm repository username" - default = null - type = string -} - -variable "rancher_helm_repository_password" { - description = "Private Rancher helm repository password" - default = null - type = string -} - -variable "cert_manager_helm_repository" { - description = "Helm repository for Cert Manager chart" - default = null - type = string -} - -variable "cert_manager_helm_repository_username" { - description = "Private Cert Manager helm repository username" - default = null - type = string -} - -variable "cert_manager_helm_repository_password" { - description = "Private Cert Manager helm repository password" - default = null - type = string -} diff --git a/recipes/upstream/aws/rke/README.md b/recipes/upstream/aws/rke/README.md index 02925908..aec12520 100644 --- a/recipes/upstream/aws/rke/README.md +++ b/recipes/upstream/aws/rke/README.md @@ -30,7 +30,7 @@ terraform init -upgrade && terraform apply -auto-approve - Destroy the resources when finished ```bash -terraform destroy -target=helm_release.ingress-nginx -target=module.rancher_install -auto-approve && terraform destroy -auto-approve +terraform destroy -auto-approve ``` See full argument list for each module in use: diff --git a/recipes/upstream/aws/rke/docs.md b/recipes/upstream/aws/rke/docs.md index 6bbae188..63db4c1f 100644 --- a/recipes/upstream/aws/rke/docs.md +++ b/recipes/upstream/aws/rke/docs.md @@ -10,49 +10,63 @@ ## Providers -| Name | Version | -|------|---------| -| [null](#provider\_null) | n/a | +No providers. ## Modules | Name | Source | Version | |------|--------|---------| -| [aws-ec2-upstream-cluster](#module\_aws-ec2-upstream-cluster) | ../../../../modules/infra/aws/ec2 | n/a | | [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | -| [rke](#module\_rke) | ../../../../modules/distribution/rke | n/a | +| [rke\_cluster](#module\_rke\_cluster) | ../../../../recipes/standalone/aws/rke | n/a | ## Resources -| Name | Type | -|------|------| -| [null_resource.wait-docker-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [null_resource.wait-k8s-services-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +No resources. ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| +| [aws\_access\_key](#input\_aws\_access\_key) | AWS access key used to create infrastructure | `string` | `null` | no | | [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | +| [aws\_secret\_key](#input\_aws\_secret\_key) | AWS secret key used to create AWS infrastructure | `string` | `null` | no | +| [bastion\_host](#input\_bastion\_host) | Bastion host configuration to access the instances |
object({
address = string
user = string
ssh_key = string
ssh_key_path = string
})
| `null` | no | | [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | | [cert\_manager\_helm\_repository](#input\_cert\_manager\_helm\_repository) | Helm repository for Cert Manager chart | `string` | `null` | no | | [cert\_manager\_helm\_repository\_password](#input\_cert\_manager\_helm\_repository\_password) | Private Cert Manager helm repository password | `string` | `null` | no | | [cert\_manager\_helm\_repository\_username](#input\_cert\_manager\_helm\_repository\_username) | Private Cert Manager helm repository username | `string` | `null` | no | +| [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `true` | no | +| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `true` | no | +| [create\_vpc](#input\_create\_vpc) | Specify whether VPC / Subnet should be created for the instances | `bool` | `true` | no | | [docker\_version](#input\_docker\_version) | Docker version to install on nodes | `string` | `"20.10"` | no | +| [iam\_instance\_profile](#input\_iam\_instance\_profile) | Specify IAM Instance Profile to assign to the instances/nodes | `string` | `null` | no | | [ingress\_provider](#input\_ingress\_provider) | Ingress controller provider | `string` | `"nginx"` | no | | [install\_docker](#input\_install\_docker) | Install Docker while creating the instances | `bool` | `true` | no | -| [instance\_count](#input\_instance\_count) | n/a | `any` | n/a | yes | -| [prefix](#input\_prefix) | n/a | `any` | n/a | yes | +| [instance\_count](#input\_instance\_count) | Number of EC2 instances to create | `number` | `3` | no | +| [instance\_disk\_size](#input\_instance\_disk\_size) | Specify root disk size (GB) | `string` | `"80"` | no | +| [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | Provide a pre-existing security group ID | `string` | `null` | no | +| [instance\_type](#input\_instance\_type) | Instance type used for all EC2 instances | `string` | `"t3.medium"` | no | +| [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | +| [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | +| [kubernetes\_version](#input\_kubernetes\_version) | Kubernetes version to use for the RKE cluster | `string` | `null` | no | +| [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | `null` | no | | [rancher\_helm\_repository](#input\_rancher\_helm\_repository) | Helm repository for Rancher chart | `string` | `null` | no | | [rancher\_helm\_repository\_password](#input\_rancher\_helm\_repository\_password) | Private Rancher helm repository password | `string` | `null` | no | | [rancher\_helm\_repository\_username](#input\_rancher\_helm\_repository\_username) | Private Rancher helm repository username | `string` | `null` | no | -| [rancher\_hostname](#input\_rancher\_hostname) | n/a | `any` | n/a | yes | +| [rancher\_hostname](#input\_rancher\_hostname) | Hostname to set when installing Rancher | `string` | `null` | no | | [rancher\_password](#input\_rancher\_password) | n/a | `string` | n/a | yes | | [rancher\_version](#input\_rancher\_version) | Rancher version to install | `string` | `null` | no | -| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if `create_ssh_key_pair = false` this variable must be set | `any` | `null` | no | -| [ssh\_username](#input\_ssh\_username) | n/a | `any` | n/a | yes | +| [spot\_instances](#input\_spot\_instances) | Use spot instances | `bool` | `false` | no | +| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | If you want to use an existing key pair, specify its name | `string` | `null` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform) | `string` | `null` | no | +| [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform) | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | Username used for SSH with sudo access | `string` | `"ubuntu"` | no | +| [subnet\_id](#input\_subnet\_id) | VPC Subnet ID to create the instance(s) in | `string` | `null` | no | +| [tag\_begin](#input\_tag\_begin) | When module is being called more than once, begin tagging from this number | `number` | `1` | no | +| [tags](#input\_tags) | User-provided tags for the resources | `map(string)` | `{}` | no | | [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | -| [waiting\_time](#input\_waiting\_time) | Waiting time (in seconds) | `number` | `180` | no | +| [vpc\_id](#input\_vpc\_id) | VPC ID to create the instance(s) in | `string` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | Waiting time (in seconds) | `number` | `120` | no | ## Outputs diff --git a/recipes/upstream/aws/rke/main.tf b/recipes/upstream/aws/rke/main.tf index 036effa1..f0df9804 100644 --- a/recipes/upstream/aws/rke/main.tf +++ b/recipes/upstream/aws/rke/main.tf @@ -1,5 +1,5 @@ -module "aws-ec2-upstream-cluster" { - source = "../../../../modules/infra/aws/ec2" +module "rke_cluster" { + source = "../../../../recipes/standalone/aws/rke" prefix = var.prefix aws_region = var.aws_region # create_ssh_key_pair = var.create_ssh_key_pair @@ -11,71 +11,30 @@ module "aws-ec2-upstream-cluster" { # subnet_id = var.subnet_id # create_security_group = var.create_security_group instance_count = var.instance_count - # instance_type = var.instance_type - # spot_instances = var.spot_instances - # instance_disk_size = var.instance_disk_size + #instance_type = var.instance_type + #spot_instances = var.spot_instances + #instance_disk_size = var.instance_disk_size # instance_security_group_id = var.instance_security_group_id - ssh_username = var.ssh_username - user_data = templatefile("${path.module}/user_data.tmpl", - { - install_docker = var.install_docker - username = var.ssh_username - docker_version = var.docker_version - } - ) + ssh_username = var.ssh_username + install_docker = var.install_docker + docker_version = var.docker_version # bastion_host = var.bastion_host # iam_instance_profile = var.iam_instance_profile # tags = var.tags -} - -resource "null_resource" "wait-docker-startup" { - depends_on = [module.aws-ec2-upstream-cluster.instances_public_ip] - provisioner "local-exec" { - command = "sleep ${var.waiting_time}" - } -} - -locals { - ssh_private_key_path = var.ssh_private_key_path != null ? var.ssh_private_key_path : "${path.cwd}/${var.prefix}-ssh_private_key.pem" -} - -module "rke" { - source = "../../../../modules/distribution/rke" - prefix = var.prefix - dependency = [resource.null_resource.wait-docker-startup] - ssh_private_key_path = local.ssh_private_key_path - node_username = var.ssh_username # kubernetes_version = var.kubernetes_version - - rancher_nodes = [for instance_ips in module.aws-ec2-upstream-cluster.instance_ips : - { - public_ip = instance_ips.public_ip, - private_ip = instance_ips.private_ip, - roles = ["etcd", "controlplane", "worker"], - ssh_key_path = local.ssh_private_key_path, - ssh_key = null, - hostname_override = null - } - ] -} - -resource "null_resource" "wait-k8s-services-startup" { - depends_on = [module.rke] - provisioner "local-exec" { - command = "sleep ${var.waiting_time}" - } + kube_config_path = var.kube_config_path + kube_config_filename = var.kube_config_filename } locals { - kubeconfig_file = "${path.cwd}/${var.prefix}_kube_config.yml" - rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.aws-ec2-upstream-cluster.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.aws-ec2-upstream-cluster.instances_public_ip[0], "sslip.io"]) + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.rke_cluster.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.rke_cluster.instances_public_ip[0], "sslip.io"]) } module "rancher_install" { source = "../../../../modules/rancher" - dependency = [null_resource.wait-k8s-services-startup] - kubeconfig_file = local.kubeconfig_file + dependency = [module.rke_cluster] + kubeconfig_file = module.rke_cluster.kube_config_path rancher_hostname = local.rancher_hostname rancher_bootstrap_password = var.rancher_password rancher_password = var.rancher_password diff --git a/recipes/upstream/aws/rke/outputs.tf b/recipes/upstream/aws/rke/outputs.tf index 3f0a3cc5..25550770 100644 --- a/recipes/upstream/aws/rke/outputs.tf +++ b/recipes/upstream/aws/rke/outputs.tf @@ -1,9 +1,9 @@ output "instances_public_ip" { - value = module.aws-ec2-upstream-cluster.instances_public_ip + value = module.rke_cluster.instances_public_ip } output "instances_private_ip" { - value = module.aws-ec2-upstream-cluster.instances_private_ip + value = module.rke_cluster.instances_private_ip } output "rancher_url" { diff --git a/recipes/upstream/aws/rke/variables.tf b/recipes/upstream/aws/rke/variables.tf index a9513485..d471b68c 100644 --- a/recipes/upstream/aws/rke/variables.tf +++ b/recipes/upstream/aws/rke/variables.tf @@ -1,10 +1,20 @@ -variable "prefix" {} - -# variable "aws_access_key" {} +variable "prefix" { + type = string + description = "Prefix added to names of all resources" + default = null +} -# variable "aws_secret_key" {} +variable "aws_access_key" { + type = string + description = "AWS access key used to create infrastructure" + default = null +} -# variable "aws_session_token" {} +variable "aws_secret_key" { + type = string + description = "AWS secret key used to create AWS infrastructure" + default = null +} variable "aws_region" { type = string @@ -47,61 +57,128 @@ variable "aws_region" { } } -# variable "create_ssh_key_pair" { -# description = "Specify if a new SSH key pair needs to be created for the instances" -# default = true -#} +variable "create_ssh_key_pair" { + type = bool + description = "Specify if a new SSH key pair needs to be created for the instances" + default = true +} -# variable "ssh_key_pair_name" {} +variable "ssh_key_pair_name" { + type = string + description = "If you want to use an existing key pair, specify its name" + default = null +} variable "ssh_private_key_path" { - description = "The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if `create_ssh_key_pair = false` this variable must be set" + type = string + description = "The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform)" default = null } -# variable "ssh_public_key_path" {} - -# variable "create_vpc" {} +variable "ssh_public_key_path" { + description = "The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform)" + default = null +} -# variable "vpc_ip_cidr_range" {} +variable "create_vpc" { + type = bool + description = "Specify whether VPC / Subnet should be created for the instances" + default = true +} -# variable "vpc_id" {} +variable "vpc_id" { + type = string + description = "VPC ID to create the instance(s) in" + default = null +} -# variable "subnet_id" {} +variable "subnet_id" { + type = string + description = "VPC Subnet ID to create the instance(s) in" + default = null +} -# variable "create_security_group" {} +variable "create_security_group" { + type = bool + description = "Should create the security group associated with the instance(s)" + default = true + nullable = false +} -variable "instance_count" {} +variable "instance_count" { + type = number + description = "Number of EC2 instances to create" + default = 3 + nullable = false +} -# variable "instance_type" {} +variable "instance_type" { + type = string + description = "Instance type used for all EC2 instances" + default = "t3.medium" + nullable = false +} -# variable "spot_instances" {} +variable "spot_instances" { + type = bool + description = "Use spot instances" + default = false + nullable = false +} -# variable "instance_disk_size" {} +variable "instance_disk_size" { + type = string + description = "Specify root disk size (GB)" + default = "80" + nullable = false +} -# variable "instance_security_group_id" {} +variable "instance_security_group_id" { + type = string + description = "Provide a pre-existing security group ID" + default = null +} -variable "ssh_username" {} +variable "ssh_username" { + type = string + description = "Username used for SSH with sudo access" + default = "ubuntu" + nullable = false +} variable "user_data" { description = "User data content for EC2 instance(s)" default = null } -#variable "bastion_host" { -# type = object({ -# address = string -# user = string -# ssh_key = string -# ssh_key_path = string -# }) -# default = null -# description = "Bastion host configuration to access the instances" -#} +variable "bastion_host" { + type = object({ + address = string + user = string + ssh_key = string + ssh_key_path = string + }) + default = null + description = "Bastion host configuration to access the instances" +} -# variable "iam_instance_profile" {} +variable "iam_instance_profile" { + type = string + description = "Specify IAM Instance Profile to assign to the instances/nodes" + default = null +} -# variable "tags" {} +variable "tag_begin" { + type = number + description = "When module is being called more than once, begin tagging from this number" + default = 1 +} + +variable "tags" { + description = "User-provided tags for the resources" + type = map(string) + default = {} +} variable "install_docker" { type = bool @@ -117,10 +194,14 @@ variable "docker_version" { variable "waiting_time" { description = "Waiting time (in seconds)" - default = 180 + default = 120 } -# variable "kubernetes_version" {} +variable "kubernetes_version" { + type = string + description = "Kubernetes version to use for the RKE cluster" + default = null +} variable "ingress_provider" { description = "Ingress controller provider" @@ -133,7 +214,23 @@ variable "bootstrap_rancher" { default = true } -variable "rancher_hostname" {} +variable "kube_config_path" { + description = "The path to write the kubeconfig for the RKE cluster" + type = string + default = null +} + +variable "kube_config_filename" { + description = "Filename to write the kube config" + type = string + default = null +} + +variable "rancher_hostname" { + description = "Hostname to set when installing Rancher" + type = string + default = null +} variable "rancher_password" { type = string From 7884538fb0afec0ae33db5fa5d8f1f8fe694f41f Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Fri, 26 Jul 2024 10:37:31 +0200 Subject: [PATCH 19/35] Fixed recipes/standalone/aws/rke/README.md file --- recipes/standalone/aws/rke/README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/recipes/standalone/aws/rke/README.md b/recipes/standalone/aws/rke/README.md index 1ba56645..71e33587 100644 --- a/recipes/standalone/aws/rke/README.md +++ b/recipes/standalone/aws/rke/README.md @@ -18,8 +18,6 @@ cd recipes/upstream/aws/rke - `aws_region` to suit your region - `instance_count` to specify the number of instances to create - `ssh_username` to specify the user used to create the VMs (default "ubuntu") - - `rancher_hostname` in order to reach the Rancher console via DNS name - - `rancher_password` to configure the initial Admin password (the password must be at least 12 characters) - Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../modules/infra/aws/README.md). **NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** From 0f244db8ce9c3146c42e6fc1e3e1f97431b67a95 Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Tue, 30 Jul 2024 08:28:59 +0200 Subject: [PATCH 20/35] Rebase --- modules/infra/aws/README.md | 102 +++++--- modules/infra/aws/ec2/README.md | 217 ++++++++++++++++++ modules/infra/aws/{ => ec2}/data.tf | 2 + modules/infra/aws/{ => ec2}/docs.md | 33 +-- modules/infra/aws/{ => ec2}/main.tf | 75 +++++- modules/infra/aws/ec2/outputs.tf | 30 +++ modules/infra/aws/{ => ec2}/variables.tf | 121 +++++----- modules/infra/aws/{ => ec2}/versions.tf | 0 modules/infra/aws/outputs.tf | 42 ---- modules/infra/aws/provider.tf | 5 - recipes/upstream/aws/k3s/main.tf | 8 + recipes/upstream/aws/k3s/main.tf_bkp | 121 ++++++++++ recipes/upstream/aws/rke/README.md | 95 ++------ recipes/upstream/aws/rke/docs.md | 54 +++-- recipes/upstream/aws/rke/main.tf | 66 +++--- recipes/upstream/aws/rke/outputs.tf | 22 +- recipes/upstream/aws/rke/provider.tf | 36 +++ .../upstream/aws/rke/terraform.tfvars.example | 115 +++++++--- recipes/upstream/aws/rke/variables.tf | 188 +++++++++------ 19 files changed, 912 insertions(+), 420 deletions(-) create mode 100644 modules/infra/aws/ec2/README.md rename modules/infra/aws/{ => ec2}/data.tf (88%) rename modules/infra/aws/{ => ec2}/docs.md (64%) rename modules/infra/aws/{ => ec2}/main.tf (57%) create mode 100644 modules/infra/aws/ec2/outputs.tf rename modules/infra/aws/{ => ec2}/variables.tf (75%) rename modules/infra/aws/{ => ec2}/versions.tf (100%) delete mode 100644 modules/infra/aws/outputs.tf delete mode 100644 modules/infra/aws/provider.tf create mode 100644 recipes/upstream/aws/k3s/main.tf_bkp create mode 100644 recipes/upstream/aws/rke/provider.tf diff --git a/modules/infra/aws/README.md b/modules/infra/aws/README.md index 768097e3..58da987e 100644 --- a/modules/infra/aws/README.md +++ b/modules/infra/aws/README.md @@ -1,49 +1,79 @@ -# Terraform | AWS Infrastructure +# Terraform | AWS - Preparatory steps -Terraform module to provide AWS nodes prepared for creating a kubernetes cluster. +In order for Terraform to run operations on your behalf, you must [install and configure the AWS CLI tool](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html#getting-started-install-instructions). -Basic infrastructure options are provided to be coupled with other modules for example environments. +## Example -Documentation can be found [here](./docs.md). +#### macOS installation and setup for all users -## Examples +```console +curl "https://awscli.amazonaws.com/AWSCLIV2.pkg" -o "AWSCLIV2.pkg" +``` + +```console +sudo installer -pkg AWSCLIV2.pkg -target / +``` + +#### Verify installation + +```console +$ which aws +/usr/local/bin/aws +``` + +```console +$ aws --version +aws-cli/2.13.33 Python/3.11.6 Darwin/23.1.0 exe/x86_64 prompt/off +``` -#### Launch a single instance, create a keypair +#### Setup credentials and configuration -```terraform -module "upstream_cluster" { - source = "git::https://github.com/rancherlabs/tf-rancher-up.git//modules/infra/aws" - aws_region = "us-east-1" - prefix = "example-rancher" - instance_count = 1 - create_ssh_key_pair = true - user_data = | - echo "hello world" -} +##### Option 1 - AWS CLI + +```console +export AWS_ACCESS_KEY_ID= +export AWS_SECRET_ACCESS_KEY= +export AWS_DEFAULT_REGION= +export AWS_DEFAULT_OUTPUT=text ``` -#### Provide an existing SSH key and Security Group +##### Option 2 - Manually creating credential files -```terraform -module "upstream_cluster" { - source = "git::https://github.com/rancherlabs/tf-rancher-up.git//modules/infra/aws" - aws_region = "us-east-1" - prefix = "example-rancher" - instance_count = 1 - ssh_key_pair_name = "rancher-ssh" - instance_security_group = "sg-xxxxx" -} +```console +mkdir ~/.aws ``` -#### Provide an existing VPC and Subnet +```console +cd ~/.aws +``` + +```console +cat > credentials << EOL +[default] +aws_access_key_id = +aws_secret_access_key = +EOL +``` + +```console +cat > config << EOL +[default] +region = +output = text +EOL +``` + +##### Option 3 - IAM Identity Center credentials + +```console +aws configure sso +``` + +```console +export AWS_PROFILE= +``` -```terraform -module "upstream_cluster" { - source = "git::https://github.com/rancherlabs/tf-rancher-up.git//modules/infra/aws" - aws_region = "us-east-1" - prefix = "example-rancher" - instance_count = 1 - vpc_id = "vpc-xxxxx" - subnet_id = "subnet-xxxxxx" -} +##### Verify credentials +```console +aws sts get-caller-identity ``` diff --git a/modules/infra/aws/ec2/README.md b/modules/infra/aws/ec2/README.md new file mode 100644 index 00000000..b383150d --- /dev/null +++ b/modules/infra/aws/ec2/README.md @@ -0,0 +1,217 @@ +# Terraform | AWS EC2 + +Terraform modules to provide VM instances - AWS EC2. + +Documentation can be found [here](./docs.md). + +## Example + +#### Launch three identical VM instances + +```terraform +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + region = var.aws_region +} + +variable "prefix" {} + +variable "aws_region" { + type = string + description = "AWS region used for all resources" + default = "us-east-1" + + validation { + condition = contains([ + "us-east-2", + "us-east-1", + "us-west-1", + "us-west-2", + "af-south-1", + "ap-east-1", + "ap-south-2", + "ap-southeast-3", + "ap-southeast-4", + "ap-south-1", + "ap-northeast-3", + "ap-northeast-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ca-central-1", + "ca-west-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-south-1", + "eu-west-3", + "eu-south-2", + "eu-north-1", + "eu-central-2", + "il-central-1", + "me-south-1", + "me-central-1", + "sa-east-1", + ], var.aws_region) + error_message = "Invalid Region specified!" + } +} + +variable "ssh_private_key_path" { + description = "The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if `create_ssh_key_pair = false` this variable must be set" + default = null +} + +variable "vpc_id" {} + +variable "subnet_id" {} + +variable "instance_count" {} + +variable "ssh_username" {} + +module "aws-ec2-upstream-cluster" { + source = "git::https://github.com/rancher/tf-rancher-up.git//modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + vpc_id = var.vpc_id + subnet_id = var.subnet_id + instance_count = var.instance_count + ssh_username = var.ssh_username +} + +output "instances_public_ip" { + value = module.aws-ec2-upstream-cluster.instances_public_ip +} + +output "instances_private_ip" { + value = module.aws-ec2-upstream-cluster.instances_private_ip +} +``` + +#### Launch two identical VM instances and a dedicated new VPC/Subnet + +```terraform +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + region = var.aws_region +} + +variable "prefix" {} + +variable "aws_region" { + type = string + description = "AWS region used for all resources" + default = "us-east-1" + + validation { + condition = contains([ + "us-east-2", + "us-east-1", + "us-west-1", + "us-west-2", + "af-south-1", + "ap-east-1", + "ap-south-2", + "ap-southeast-3", + "ap-southeast-4", + "ap-south-1", + "ap-northeast-3", + "ap-northeast-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ca-central-1", + "ca-west-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-south-1", + "eu-west-3", + "eu-south-2", + "eu-north-1", + "eu-central-2", + "il-central-1", + "me-south-1", + "me-central-1", + "sa-east-1", + ], var.aws_region) + error_message = "Invalid Region specified!" + } +} + +variable "ssh_private_key_path" { + description = "The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if `create_ssh_key_pair = false` this variable must be set" + default = null +} + +variable "instance_count" {} + +variable "ssh_username" {} + +module "aws-ec2-upstream-cluster" { + source = "git::https://github.com/rancher/tf-rancher-up.git//modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + instance_count = var.instance_count + ssh_username = var.ssh_username +} + +output "instances_public_ip" { + value = module.aws-ec2-upstream-cluster.instances_public_ip +} + +output "instances_private_ip" { + value = module.aws-ec2-upstream-cluster.instances_private_ip +} +``` diff --git a/modules/infra/aws/data.tf b/modules/infra/aws/ec2/data.tf similarity index 88% rename from modules/infra/aws/data.tf rename to modules/infra/aws/ec2/data.tf index 03859e23..ce8eb122 100644 --- a/modules/infra/aws/data.tf +++ b/modules/infra/aws/ec2/data.tf @@ -1,3 +1,5 @@ +data "aws_availability_zones" "available" {} + # TODO: Make the Ubuntu OS version configurable # TODO: Add support for ARM architecture data "aws_ami" "ubuntu" { diff --git a/modules/infra/aws/docs.md b/modules/infra/aws/ec2/docs.md similarity index 64% rename from modules/infra/aws/docs.md rename to modules/infra/aws/ec2/docs.md index 9a4a1c4a..d31ae970 100644 --- a/modules/infra/aws/docs.md +++ b/modules/infra/aws/ec2/docs.md @@ -21,50 +21,53 @@ No modules. | Name | Type | |------|------| | [aws_instance.instance](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/instance) | resource | +| [aws_internet_gateway.internet_gateway](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/internet_gateway) | resource | | [aws_key_pair.key_pair](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource | +| [aws_route_table.route_table](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table) | resource | +| [aws_route_table_association.rt_association](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table_association) | resource | | [aws_security_group.sg_allowall](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_subnet.subnet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/subnet) | resource | +| [aws_vpc.vpc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc) | resource | | [local_file.private_key_pem](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.public_key_pem](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | | [tls_private_key.ssh_private_key](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | | [aws_ami.ubuntu](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [aws\_access\_key](#input\_aws\_access\_key) | AWS access key used to create infrastructure | `string` | `null` | no | | [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | -| [aws\_secret\_key](#input\_aws\_secret\_key) | AWS secret key used to create AWS infrastructure | `string` | `null` | no | | [bastion\_host](#input\_bastion\_host) | Bastion host configuration to access the instances |
object({
address = string
user = string
ssh_key = string
ssh_key_path = string
})
| `null` | no | | [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `true` | no | -| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `false` | no | +| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `true` | no | +| [create\_vpc](#input\_create\_vpc) | Specify whether VPC / Subnet should be created for the instances | `bool` | `true` | no | | [iam\_instance\_profile](#input\_iam\_instance\_profile) | Specify IAM Instance Profile to assign to the instances/nodes | `string` | `null` | no | | [instance\_count](#input\_instance\_count) | Number of EC2 instances to create | `number` | `3` | no | | [instance\_disk\_size](#input\_instance\_disk\_size) | Specify root disk size (GB) | `string` | `"80"` | no | -| [instance\_security\_group](#input\_instance\_security\_group) | Provide a pre-existing security group ID | `string` | `null` | no | +| [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | Provide a pre-existing security group ID | `string` | `null` | no | | [instance\_type](#input\_instance\_type) | Instance type used for all EC2 instances | `string` | `"t3.medium"` | no | | [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | `"rancher-terraform"` | no | | [spot\_instances](#input\_spot\_instances) | Use spot instances | `bool` | `false` | no | -| [ssh\_key](#input\_ssh\_key) | Contents of the private key to connect to the instances. | `string` | `null` | no | -| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | Specify the SSH key name to use (that's already present in AWS) | `string` | `null` | no | -| [ssh\_key\_pair\_path](#input\_ssh\_key\_pair\_path) | Path to the SSH private key used as the key pair (that's already present in AWS) | `string` | `null` | no | -| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | Path to write the generated SSH private key | `string` | `null` | no | +| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | If you want to use an existing key pair, specify its name | `string` | `null` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform) | `string` | `null` | no | +| [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform) | `any` | `null` | no | | [ssh\_username](#input\_ssh\_username) | Username used for SSH with sudo access | `string` | `"ubuntu"` | no | | [subnet\_id](#input\_subnet\_id) | VPC Subnet ID to create the instance(s) in | `string` | `null` | no | -| [tag\_begin](#input\_tag\_begin) | When module is being called mode than once, begin tagging from this number | `number` | `1` | no | +| [tag\_begin](#input\_tag\_begin) | When module is being called more than once, begin tagging from this number | `number` | `1` | no | | [tags](#input\_tags) | User-provided tags for the resources | `map(string)` | `{}` | no | | [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | | [vpc\_id](#input\_vpc\_id) | VPC ID to create the instance(s) in | `string` | `null` | no | +| [vpc\_ip\_cidr\_range](#input\_vpc\_ip\_cidr\_range) | Range of private IPs available for the AWS VPC | `string` | `"10.0.0.0/16"` | no | ## Outputs | Name | Description | |------|-------------| -| [dependency](#output\_dependency) | n/a | | [instance\_ips](#output\_instance\_ips) | n/a | | [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | | [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | -| [node\_username](#output\_node\_username) | n/a | -| [sg-id](#output\_sg-id) | n/a | -| [ssh\_key](#output\_ssh\_key) | n/a | -| [ssh\_key\_pair\_name](#output\_ssh\_key\_pair\_name) | n/a | -| [ssh\_key\_path](#output\_ssh\_key\_path) | n/a | +| [security\_group](#output\_security\_group) | n/a | +| [subnet](#output\_subnet) | n/a | +| [vpc](#output\_vpc) | n/a | diff --git a/modules/infra/aws/main.tf b/modules/infra/aws/ec2/main.tf similarity index 57% rename from modules/infra/aws/main.tf rename to modules/infra/aws/ec2/main.tf index 724f3814..5f5b6a6c 100644 --- a/modules/infra/aws/main.tf +++ b/modules/infra/aws/ec2/main.tf @@ -1,6 +1,6 @@ -# Condition to use an existing keypair if a keypair name and file is also provided locals { - new_key_pair_path = var.ssh_private_key_path != null ? var.ssh_private_key_path : "${path.cwd}/${var.prefix}-ssh_private_key.pem" + private_ssh_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path + public_ssh_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path } resource "tls_private_key" "ssh_private_key" { @@ -10,21 +10,76 @@ resource "tls_private_key" "ssh_private_key" { resource "local_file" "private_key_pem" { count = var.create_ssh_key_pair ? 1 : 0 - filename = local.new_key_pair_path + filename = local.private_ssh_key_path content = tls_private_key.ssh_private_key[0].private_key_openssh file_permission = "0600" } +resource "local_file" "public_key_pem" { + count = var.create_ssh_key_pair ? 1 : 0 + filename = local.public_ssh_key_path + content = tls_private_key.ssh_private_key[0].public_key_openssh + file_permission = "0600" +} + resource "aws_key_pair" "key_pair" { count = var.create_ssh_key_pair ? 1 : 0 key_name = "tf-rancher-up-${var.prefix}" public_key = tls_private_key.ssh_private_key[0].public_key_openssh } -resource "aws_security_group" "sg_allowall" { - count = var.create_security_group ? 1 : 0 - vpc_id = var.vpc_id +resource "aws_vpc" "vpc" { + count = var.create_vpc ? 1 : 0 + cidr_block = var.vpc_ip_cidr_range + + tags = { + Name = "${var.prefix}-vpc" + } +} + +resource "aws_subnet" "subnet" { + depends_on = [resource.aws_route_table.route_table[0]] + + count = var.create_vpc ? 1 : 0 + availability_zone = data.aws_availability_zones.available.names[count.index] + # cidr_block = var.subnet_ip_cidr_range[count.index] + cidr_block = "10.0.${count.index}.0/24" + map_public_ip_on_launch = true + vpc_id = var.vpc_id == null ? aws_vpc.vpc[0].id : var.vpc_id + + tags = { + Name = "${var.prefix}-subnet" + } +} + +resource "aws_internet_gateway" "internet_gateway" { + count = var.create_vpc ? 1 : 0 + vpc_id = aws_vpc.vpc[0].id + + tags = { + Name = "${var.prefix}-ig" + } +} + +resource "aws_route_table" "route_table" { + count = var.create_vpc ? 1 : 0 + vpc_id = aws_vpc.vpc[0].id + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.internet_gateway[0].id + } +} + +resource "aws_route_table_association" "rt_association" { + count = var.create_vpc ? 1 : 0 + subnet_id = var.subnet_id == null ? "${aws_subnet.subnet.*.id[0]}" : var.subnet_id + route_table_id = aws_route_table.route_table[0].id +} + +resource "aws_security_group" "sg_allowall" { + count = var.create_security_group ? 1 : 0 + vpc_id = aws_vpc.vpc[0].id name = "${var.prefix}-allow-nodes" description = "Allow traffic for nodes in the cluster" @@ -74,13 +129,15 @@ resource "aws_security_group" "sg_allowall" { } resource "aws_instance" "instance" { + depends_on = [resource.aws_route_table_association.rt_association[0]] + count = var.instance_count ami = data.aws_ami.ubuntu.id instance_type = var.instance_type - subnet_id = var.subnet_id + subnet_id = var.subnet_id == null ? "${aws_subnet.subnet.*.id[0]}" : var.subnet_id key_name = var.create_ssh_key_pair ? aws_key_pair.key_pair[0].key_name : var.ssh_key_pair_name - vpc_security_group_ids = [var.create_security_group ? aws_security_group.sg_allowall[0].id : var.instance_security_group] + vpc_security_group_ids = [var.create_security_group == true ? aws_security_group.sg_allowall[0].id : var.instance_security_group_id] user_data = var.user_data root_block_device { @@ -102,7 +159,7 @@ resource "aws_instance" "instance" { type = "ssh" host = var.bastion_host == null ? self.public_ip : self.private_ip user = var.ssh_username - private_key = var.create_ssh_key_pair ? tls_private_key.ssh_private_key[0].private_key_pem : (var.ssh_key_pair_path != null ? file(pathexpand(var.ssh_key_pair_path)) : var.ssh_key) + private_key = var.create_ssh_key_pair ? tls_private_key.ssh_private_key[0].private_key_openssh : file("${local.private_ssh_key_path}") bastion_host = var.bastion_host != null ? var.bastion_host.address : null bastion_user = var.bastion_host != null ? var.bastion_host.user : null diff --git a/modules/infra/aws/ec2/outputs.tf b/modules/infra/aws/ec2/outputs.tf new file mode 100644 index 00000000..32aebcb7 --- /dev/null +++ b/modules/infra/aws/ec2/outputs.tf @@ -0,0 +1,30 @@ +output "instances_public_ip" { + value = aws_instance.instance.*.public_ip +} + +output "instances_private_ip" { + value = aws_instance.instance.*.private_ip +} + +output "instance_ips" { + value = [ + for i in aws_instance.instance[*] : + { + public_ip = i.public_ip + private_ip = i.private_ip + private_dns = i.private_dns + } + ] +} + +output "vpc" { + value = aws_vpc.vpc +} + +output "subnet" { + value = aws_subnet.subnet +} + +output "security_group" { + value = aws_security_group.sg_allowall +} diff --git a/modules/infra/aws/variables.tf b/modules/infra/aws/ec2/variables.tf similarity index 75% rename from modules/infra/aws/variables.tf rename to modules/infra/aws/ec2/variables.tf index ea4dc590..5cfb164a 100644 --- a/modules/infra/aws/variables.tf +++ b/modules/infra/aws/ec2/variables.tf @@ -1,13 +1,7 @@ -variable "aws_access_key" { - type = string - description = "AWS access key used to create infrastructure" - default = null -} - -variable "aws_secret_key" { +variable "prefix" { type = string - description = "AWS secret key used to create AWS infrastructure" - default = null + description = "Prefix added to names of all resources" + default = "rancher-terraform" } variable "aws_region" { @@ -51,37 +45,39 @@ variable "aws_region" { } } -variable "prefix" { - type = string - description = "Prefix added to names of all resources" - default = "rancher-terraform" +variable "create_ssh_key_pair" { + type = bool + description = "Specify if a new SSH key pair needs to be created for the instances" + default = true } -variable "tag_begin" { - type = number - description = "When module is being called mode than once, begin tagging from this number" - default = 1 +variable "ssh_key_pair_name" { + type = string + description = "If you want to use an existing key pair, specify its name" + default = null } -variable "instance_type" { +variable "ssh_private_key_path" { type = string - description = "Instance type used for all EC2 instances" - default = "t3.medium" - nullable = false + description = "The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform)" + default = null } -variable "instance_disk_size" { - type = string - description = "Specify root disk size (GB)" - default = "80" - nullable = false +variable "ssh_public_key_path" { + description = "The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform)" + default = null } -variable "instance_count" { - type = number - description = "Number of EC2 instances to create" - default = 3 - nullable = false +variable "create_vpc" { + type = bool + description = "Specify whether VPC / Subnet should be created for the instances" + default = true +} + +variable "vpc_ip_cidr_range" { + type = string + default = "10.0.0.0/16" + description = "Range of private IPs available for the AWS VPC" } variable "vpc_id" { @@ -96,50 +92,42 @@ variable "subnet_id" { default = null } -variable "create_ssh_key_pair" { +variable "create_security_group" { type = bool - description = "Specify if a new SSH key pair needs to be created for the instances" - default = false + description = "Should create the security group associated with the instance(s)" + default = true nullable = false } -variable "ssh_key_pair_name" { - type = string - description = "Specify the SSH key name to use (that's already present in AWS)" - default = null +variable "instance_count" { + type = number + description = "Number of EC2 instances to create" + default = 3 + nullable = false } -variable "ssh_key_pair_path" { +variable "instance_type" { type = string - description = "Path to the SSH private key used as the key pair (that's already present in AWS)" - default = null + description = "Instance type used for all EC2 instances" + default = "t3.medium" + nullable = false } -# Used in CI/CD as we don't store the SSH key local. It would read from a secret and -# the contents are passed on directly. Used when create_ssh_key_pair is false and -# ssh_key_pair_name is null -variable "ssh_key" { - type = string - description = "Contents of the private key to connect to the instances." - default = null - sensitive = true +variable "spot_instances" { + type = bool + description = "Use spot instances" + default = false + nullable = false } -variable "ssh_private_key_path" { +variable "instance_disk_size" { type = string - description = "Path to write the generated SSH private key" - default = null -} - -variable "create_security_group" { - type = bool - description = "Should create the security group associated with the instance(s)" - default = true + description = "Specify root disk size (GB)" + default = "80" nullable = false } -# TODO: Add a check based on above value -variable "instance_security_group" { +variable "instance_security_group_id" { type = string description = "Provide a pre-existing security group ID" default = null @@ -152,13 +140,6 @@ variable "ssh_username" { nullable = false } -variable "spot_instances" { - type = bool - description = "Use spot instances" - default = false - nullable = false -} - variable "user_data" { description = "User data content for EC2 instance(s)" default = null @@ -181,6 +162,12 @@ variable "iam_instance_profile" { default = null } +variable "tag_begin" { + type = number + description = "When module is being called more than once, begin tagging from this number" + default = 1 +} + variable "tags" { description = "User-provided tags for the resources" type = map(string) diff --git a/modules/infra/aws/versions.tf b/modules/infra/aws/ec2/versions.tf similarity index 100% rename from modules/infra/aws/versions.tf rename to modules/infra/aws/ec2/versions.tf diff --git a/modules/infra/aws/outputs.tf b/modules/infra/aws/outputs.tf deleted file mode 100644 index e638dc76..00000000 --- a/modules/infra/aws/outputs.tf +++ /dev/null @@ -1,42 +0,0 @@ -output "dependency" { - value = var.instance_count != 0 ? aws_instance.instance[0].arn : null -} - -output "instances_public_ip" { - value = aws_instance.instance.*.public_ip -} - -output "instances_private_ip" { - value = aws_instance.instance.*.private_ip -} - -output "instance_ips" { - value = [ - for i in aws_instance.instance[*] : - { - public_ip = i.public_ip - private_ip = i.private_ip - private_dns = i.private_dns - } - ] -} - -output "node_username" { - value = var.ssh_username -} - -output "ssh_key" { - value = var.create_ssh_key_pair ? tls_private_key.ssh_private_key[0].private_key_openssh : (var.ssh_key_pair_path != null ? file(pathexpand(var.ssh_key_pair_path)) : var.ssh_key) -} - -output "ssh_key_path" { - value = var.create_ssh_key_pair ? local_file.private_key_pem[0].filename : var.ssh_key_pair_path -} - -output "ssh_key_pair_name" { - value = var.create_ssh_key_pair ? aws_key_pair.key_pair[0].key_name : var.ssh_key_pair_name -} - -output "sg-id" { - value = var.create_security_group ? aws_security_group.sg_allowall[0].id : var.instance_security_group -} \ No newline at end of file diff --git a/modules/infra/aws/provider.tf b/modules/infra/aws/provider.tf deleted file mode 100644 index f14e1d72..00000000 --- a/modules/infra/aws/provider.tf +++ /dev/null @@ -1,5 +0,0 @@ -provider "aws" { - access_key = var.aws_access_key != null ? var.aws_access_key : null - secret_key = var.aws_secret_key != null ? var.aws_secret_key : null - region = var.aws_region -} \ No newline at end of file diff --git a/recipes/upstream/aws/k3s/main.tf b/recipes/upstream/aws/k3s/main.tf index c30b9afa..009517a1 100644 --- a/recipes/upstream/aws/k3s/main.tf +++ b/recipes/upstream/aws/k3s/main.tf @@ -1,4 +1,12 @@ +# Setup local variables locals { + vpc = var.vpc == null ? "${var.prefix}-vpc" : var.vpc + subnet = var.subnet == null ? "${var.prefix}-subnet" : var.subnet + create_firewall = var.create_firewall == null ? false : true + private_ssh_key_path = fileexists("${path.cwd}/${var.prefix}-ssh_private_key.pem") ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path + public_ssh_key_path = fileexists("${path.cwd}/${var.prefix}-ssh_public_key.pem") ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path +} + kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" kc_file_backup = "${local.kc_file}.backup" diff --git a/recipes/upstream/aws/k3s/main.tf_bkp b/recipes/upstream/aws/k3s/main.tf_bkp new file mode 100644 index 00000000..c30b9afa --- /dev/null +++ b/recipes/upstream/aws/k3s/main.tf_bkp @@ -0,0 +1,121 @@ +locals { + kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd + kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" + kc_file_backup = "${local.kc_file}.backup" +} + +module "k3s_first" { + source = "../../../../modules/distribution/k3s" + k3s_token = var.k3s_token + k3s_version = var.k3s_version + k3s_channel = var.k3s_channel + k3s_config = var.k3s_config +} + +module "k3s_first_server" { + source = "../../../../modules/infra/aws" + prefix = var.prefix + instance_count = 1 + instance_type = var.instance_type + instance_disk_size = var.instance_disk_size + create_ssh_key_pair = var.create_ssh_key_pair + ssh_key_pair_name = var.ssh_key_pair_name + ssh_key_pair_path = var.ssh_key_pair_path + ssh_username = var.ssh_username + spot_instances = var.spot_instances + aws_region = var.aws_region + create_security_group = var.create_security_group + instance_security_group = var.instance_security_group + subnet_id = var.subnet_id + user_data = module.k3s_first.k3s_server_user_data +} + +module "k3s_additional" { + source = "../../../../modules/distribution/k3s" + k3s_token = module.k3s_first.k3s_token + k3s_version = var.k3s_version + k3s_channel = var.k3s_channel + k3s_config = var.k3s_config + first_server_ip = module.k3s_first_server.instances_private_ip[0] +} + +module "k3s_additional_servers" { + source = "../../../../modules/infra/aws" + prefix = var.prefix + instance_count = var.server_instance_count - 1 + instance_type = var.instance_type + instance_disk_size = var.instance_disk_size + create_ssh_key_pair = false + ssh_key_pair_name = module.k3s_first_server.ssh_key_pair_name + ssh_key_pair_path = pathexpand(module.k3s_first_server.ssh_key_path) + ssh_username = var.ssh_username + spot_instances = var.spot_instances + tag_begin = 2 + aws_region = var.aws_region + create_security_group = false + instance_security_group = module.k3s_first_server.sg-id + subnet_id = var.subnet_id + user_data = module.k3s_additional.k3s_server_user_data +} + + +module "k3s_workers" { + source = "../../../../modules/infra/aws" + prefix = var.prefix + instance_count = var.worker_instance_count + instance_type = var.instance_type + instance_disk_size = var.instance_disk_size + create_ssh_key_pair = false + ssh_key_pair_name = module.k3s_first_server.ssh_key_pair_name + ssh_key_pair_path = pathexpand(module.k3s_first_server.ssh_key_path) + ssh_username = var.ssh_username + spot_instances = var.spot_instances + aws_region = var.aws_region + create_security_group = false + instance_security_group = module.k3s_first_server.sg-id + subnet_id = var.subnet_id + user_data = module.k3s_additional.k3s_worker_user_data +} + + +data "local_file" "ssh_private_key" { + depends_on = [module.k3s_first_server] + filename = pathexpand(module.k3s_first_server.ssh_key_path) +} + +resource "ssh_resource" "retrieve_kubeconfig" { + host = module.k3s_first_server.instances_public_ip[0] + commands = [ + "sudo sed 's/127.0.0.1/${module.k3s_first_server.instances_public_ip[0]}/g' /etc/rancher/k3s/k3s.yaml" + ] + user = var.ssh_username + private_key = data.local_file.ssh_private_key.content +} + +resource "local_file" "kube_config_yaml" { + filename = local.kc_file + content = ssh_resource.retrieve_kubeconfig.result + file_permission = "0600" +} + +resource "local_file" "kube_config_yaml_backup" { + filename = local.kc_file_backup + content = ssh_resource.retrieve_kubeconfig.result + file_permission = "0600" +} + +locals { + rancher_hostname = join(".", ["rancher", module.k3s_first_server.instances_public_ip[0], "sslip.io"]) +} + +module "rancher_install" { + source = "../../../../modules/rancher" + dependency = var.server_instance_count > 1 ? module.k3s_additional_servers.dependency : module.k3s_first_server.dependency + kubeconfig_file = local_file.kube_config_yaml.filename + rancher_hostname = local.rancher_hostname + rancher_replicas = min(var.rancher_replicas, var.server_instance_count) + rancher_bootstrap_password = var.rancher_bootstrap_password + rancher_password = var.rancher_password + rancher_version = var.rancher_version + wait = var.wait +} diff --git a/recipes/upstream/aws/rke/README.md b/recipes/upstream/aws/rke/README.md index ae7ac039..aec12520 100644 --- a/recipes/upstream/aws/rke/README.md +++ b/recipes/upstream/aws/rke/README.md @@ -1,6 +1,6 @@ -# Upstream | AWS | RKE +# Upstream | AWS | EC2 x RKE -This module is used to establish a Rancher (local) management cluster using AWS and RKE. +This module is used to establish a Rancher (local) management cluster using [AWS EC2](https://aws.amazon.com/ec2/) and [RKE](https://rke.docs.rancher.com/). Documentation can be found [here](./docs.md). @@ -11,90 +11,29 @@ git clone https://github.com/rancherlabs/tf-rancher-up.git cd recipes/upstream/aws/rke ``` -- Copy `terraform.tfvars.example` to `terraform.tfvars` -- Edit `terraform.tfvars` +- Copy `./terraform.tfvars.exmaple` to `./terraform.tfvars` +- Edit `./terraform.tfvars` - Update the required variables: - - `aws_region` to suit your region - `prefix` to give the resources an identifiable name (eg, your initials or first name) - - Recommended: `spot_instances` can be set to `true` to use spot instances -- Check your AWS credentials are configured in `~/.aws/credentials`, terraform will use these by default. Refer the [`aws configure`](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html#cli-configure-files-methods) command on how to do this. -- If you don't want to configure AWS credentials using `aws configure` in above step, uncomment `aws_access_key` and `aws_secret_key` in `terraform.tfvars` and input the required keys there. -- If an HA cluster need to be deployed, change the `instance_count` variable to 3 or more. -- There are more optional variables which can be tweaked under `terraform.tfvars`. - -**NOTE** you may need to use ` terraform init -upgrade` to upgrade provider versions - -Execute the below commands to start deployment. - -```bash -terraform init -terraform plan -terraform apply -``` - -The login details will be displayed in the screen once the deployment is successful. It will have the details as below. - -```bash -rancher_hostname = "https://rancher..sslip.io" -rancher_password = "initial-admin-password" -``` + - `aws_region` to suit your region + - `instance_count` to specify the number of instances to create + - `ssh_username` to specify the user used to create the VMs (default "ubuntu") + - `rancher_hostname` in order to reach the Rancher console via DNS name + - `rancher_password` to configure the initial Admin password (the password must be at least 12 characters) +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../modules/infra/aws/README.md). -- If storing multiple AWS credentials in `~/.aws/credentials`, set the profile when running terraform. +**NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** ```bash -AWS_PROFILE= terraform plan -AWS_PROFILE= terraform apply +terraform init -upgrade && terraform apply -auto-approve ``` -- Destroy the resources when cluster is no more needed. +- Destroy the resources when finished ```bash -terraform destroy +terraform destroy -auto-approve ``` -**IMPORTANT**: Please retire the services which are deployed using these terraform modules within 48 hours. Soon there will be automation to retire the service automatically after 48 hours but till that is in place it will be the users responsibility to not keep it running more than 48 hours. - -### Advanced - -Target a specific resource/module to action the changes only for that resource/module - -For example, target only the `rke_cluster` resource to re-run the equivalent of `rke up` - -```bash -terraform apply -target module.rke.rke_cluster.this -target module.rke.local_file.kube_config_yaml -``` - -This also updates the kube_config generated by RKE. - -### Notes - -A log file for the RKE provisioning is written to `rke.log` - See full argument list for each module in use: - - [AWS](../../../../modules/infra/aws) - - [RKE](../../../../modules/distribution/rke) - - [Rancher](../../../../modules/rancher) - -### Known Issues -- Terraform plan shows below warnings which can be ignored: - -```bash -Warning: Value for undeclared variable - -The root module does not declare a variable named "ssh_private_key_path" but a value was found in file "terraform.tfvars". If you meant to use this value, add a "variable" block to the configuration. - -Invalid attribute in provider configuration - -with module.rancher_install.provider["registry.terraform.io/hashicorp/kubernetes"], -on ../../../../modules/rancher/provider.tf line 7, in provider "kubernetes": -7: provider "kubernetes" { -``` -- Terraform apply shows below warnings and errors. Please rerun the terraform apply again and it will be successful [(Issue #22)](#22). - -```bash -Warning: - -Helm release "rancher" was created but has a failed status. Use the `helm` command to investigate the error, correct it, then run Terraform again. - -Error: 1 error occurred: -* Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": failed to call webhook: Post "https://rke2-ingress-nginx-controller-admission.kube-system.svc:443/networking/v1/ingresses?timeout=10s": no endpoints available for service "rke2-ingress-nginx-controller-admission" -``` + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - RKE: https://github.com/rancher/tf-rancher-up/tree/main/modules/distribution/rke + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/recipes/upstream/aws/rke/docs.md b/recipes/upstream/aws/rke/docs.md index 15a21777..63db4c1f 100644 --- a/recipes/upstream/aws/rke/docs.md +++ b/recipes/upstream/aws/rke/docs.md @@ -1,6 +1,12 @@ ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | +| [ssh](#requirement\_ssh) | 2.6.0 | ## Providers @@ -11,7 +17,7 @@ No providers. | Name | Source | Version | |------|--------|---------| | [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | -| [rke](#module\_rke) | ../../../../recipes/standalone/aws/rke | n/a | +| [rke\_cluster](#module\_rke\_cluster) | ../../../../recipes/standalone/aws/rke | n/a | ## Resources @@ -24,35 +30,43 @@ No resources. | [aws\_access\_key](#input\_aws\_access\_key) | AWS access key used to create infrastructure | `string` | `null` | no | | [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | | [aws\_secret\_key](#input\_aws\_secret\_key) | AWS secret key used to create AWS infrastructure | `string` | `null` | no | +| [bastion\_host](#input\_bastion\_host) | Bastion host configuration to access the instances |
object({
address = string
user = string
ssh_key = string
ssh_key_path = string
})
| `null` | no | +| [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | | [cert\_manager\_helm\_repository](#input\_cert\_manager\_helm\_repository) | Helm repository for Cert Manager chart | `string` | `null` | no | | [cert\_manager\_helm\_repository\_password](#input\_cert\_manager\_helm\_repository\_password) | Private Cert Manager helm repository password | `string` | `null` | no | | [cert\_manager\_helm\_repository\_username](#input\_cert\_manager\_helm\_repository\_username) | Private Cert Manager helm repository username | `string` | `null` | no | -| [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `null` | no | -| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `null` | no | -| [dependency](#input\_dependency) | An optional variable to add a dependency from another resource (not used) | `any` | `null` | no | +| [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `true` | no | +| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `true` | no | +| [create\_vpc](#input\_create\_vpc) | Specify whether VPC / Subnet should be created for the instances | `bool` | `true` | no | | [docker\_version](#input\_docker\_version) | Docker version to install on nodes | `string` | `"20.10"` | no | -| [install\_docker](#input\_install\_docker) | Should install docker while creating the instance | `bool` | `true` | no | -| [instance\_count](#input\_instance\_count) | Number of EC2 instances to create | `number` | `null` | no | -| [instance\_disk\_size](#input\_instance\_disk\_size) | Specify root disk size (GB) | `string` | `null` | no | -| [instance\_security\_group](#input\_instance\_security\_group) | Provide a pre-existing security group ID | `string` | `null` | no | -| [instance\_type](#input\_instance\_type) | Instance type used for all EC2 instances | `string` | `null` | no | +| [iam\_instance\_profile](#input\_iam\_instance\_profile) | Specify IAM Instance Profile to assign to the instances/nodes | `string` | `null` | no | +| [ingress\_provider](#input\_ingress\_provider) | Ingress controller provider | `string` | `"nginx"` | no | +| [install\_docker](#input\_install\_docker) | Install Docker while creating the instances | `bool` | `true` | no | +| [instance\_count](#input\_instance\_count) | Number of EC2 instances to create | `number` | `3` | no | +| [instance\_disk\_size](#input\_instance\_disk\_size) | Specify root disk size (GB) | `string` | `"80"` | no | +| [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | Provide a pre-existing security group ID | `string` | `null` | no | +| [instance\_type](#input\_instance\_type) | Instance type used for all EC2 instances | `string` | `"t3.medium"` | no | | [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | | [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | | [kubernetes\_version](#input\_kubernetes\_version) | Kubernetes version to use for the RKE cluster | `string` | `null` | no | | [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | `null` | no | -| [rancher\_bootstrap\_password](#input\_rancher\_bootstrap\_password) | Password to use for bootstrapping Rancher (min 12 characters) | `string` | `"initial-admin-password"` | no | | [rancher\_helm\_repository](#input\_rancher\_helm\_repository) | Helm repository for Rancher chart | `string` | `null` | no | | [rancher\_helm\_repository\_password](#input\_rancher\_helm\_repository\_password) | Private Rancher helm repository password | `string` | `null` | no | | [rancher\_helm\_repository\_username](#input\_rancher\_helm\_repository\_username) | Private Rancher helm repository username | `string` | `null` | no | -| [rancher\_password](#input\_rancher\_password) | Password to use for Rancher (min 12 characters) | `string` | `null` | no | -| [rancher\_replicas](#input\_rancher\_replicas) | Value for replicas when installing the Rancher helm chart | `number` | `3` | no | +| [rancher\_hostname](#input\_rancher\_hostname) | Hostname to set when installing Rancher | `string` | `null` | no | +| [rancher\_password](#input\_rancher\_password) | n/a | `string` | n/a | yes | | [rancher\_version](#input\_rancher\_version) | Rancher version to install | `string` | `null` | no | -| [spot\_instances](#input\_spot\_instances) | Use spot instances | `bool` | `null` | no | -| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | Specify the SSH key name to use (that's already present in AWS) | `string` | `null` | no | -| [ssh\_key\_pair\_path](#input\_ssh\_key\_pair\_path) | Path to the SSH private key used as the key pair (that's already present in AWS) | `string` | `null` | no | +| [spot\_instances](#input\_spot\_instances) | Use spot instances | `bool` | `false` | no | +| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | If you want to use an existing key pair, specify its name | `string` | `null` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform) | `string` | `null` | no | +| [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform) | `any` | `null` | no | | [ssh\_username](#input\_ssh\_username) | Username used for SSH with sudo access | `string` | `"ubuntu"` | no | | [subnet\_id](#input\_subnet\_id) | VPC Subnet ID to create the instance(s) in | `string` | `null` | no | -| [wait](#input\_wait) | An optional wait before installing the Rancher helm chart | `string` | `"20s"` | no | +| [tag\_begin](#input\_tag\_begin) | When module is being called more than once, begin tagging from this number | `number` | `1` | no | +| [tags](#input\_tags) | User-provided tags for the resources | `map(string)` | `{}` | no | +| [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | +| [vpc\_id](#input\_vpc\_id) | VPC ID to create the instance(s) in | `string` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | Waiting time (in seconds) | `number` | `120` | no | ## Outputs @@ -60,7 +74,5 @@ No resources. |------|-------------| | [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | | [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | -| [rancher\_admin\_token](#output\_rancher\_admin\_token) | Rancher API token for the admin user | -| [rancher\_bootstrap\_password](#output\_rancher\_bootstrap\_password) | n/a | -| [rancher\_hostname](#output\_rancher\_hostname) | n/a | -| [rancher\_url](#output\_rancher\_url) | n/a | +| [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | +| [rancher\_url](#output\_rancher\_url) | Rancher URL | diff --git a/recipes/upstream/aws/rke/main.tf b/recipes/upstream/aws/rke/main.tf index 5def4ea9..f0df9804 100644 --- a/recipes/upstream/aws/rke/main.tf +++ b/recipes/upstream/aws/rke/main.tf @@ -1,52 +1,52 @@ -module "rke" { - source = "../../../../recipes/standalone/aws/rke" - - aws_access_key = var.aws_access_key - aws_secret_key = var.aws_secret_key - aws_region = var.aws_region - - dependency = var.dependency - prefix = var.prefix - instance_count = var.instance_count - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - spot_instances = var.spot_instances - install_docker = var.install_docker - docker_version = var.docker_version - - subnet_id = var.subnet_id - create_ssh_key_pair = var.create_ssh_key_pair - create_security_group = var.create_security_group - instance_security_group = var.instance_security_group - - ssh_username = var.ssh_username - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - +module "rke_cluster" { + source = "../../../../recipes/standalone/aws/rke" + prefix = var.prefix + aws_region = var.aws_region + # create_ssh_key_pair = var.create_ssh_key_pair + # ssh_key_pair_name = var.ssh_key_pair_name + # ssh_private_key_path = var.ssh_private_key_path + # ssh_public_key_path = var.ssh_public_key_path + # create_vpc = var.create_vpc + # vpc_id = var.vpc_id + # subnet_id = var.subnet_id + # create_security_group = var.create_security_group + instance_count = var.instance_count + #instance_type = var.instance_type + #spot_instances = var.spot_instances + #instance_disk_size = var.instance_disk_size + # instance_security_group_id = var.instance_security_group_id + ssh_username = var.ssh_username + install_docker = var.install_docker + docker_version = var.docker_version + # bastion_host = var.bastion_host + # iam_instance_profile = var.iam_instance_profile + # tags = var.tags + # kubernetes_version = var.kubernetes_version kube_config_path = var.kube_config_path kube_config_filename = var.kube_config_filename - kubernetes_version = var.kubernetes_version - } locals { - rancher_hostname = join(".", ["rancher", module.rke.instances_public_ip[0], "sslip.io"]) + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.rke_cluster.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.rke_cluster.instances_public_ip[0], "sslip.io"]) + } module "rancher_install" { source = "../../../../modules/rancher" - dependency = module.rke.dependency - kubeconfig_file = module.rke.kubeconfig_filename + dependency = [module.rke_cluster] + kubeconfig_file = module.rke_cluster.kube_config_path rancher_hostname = local.rancher_hostname - rancher_replicas = min(var.rancher_replicas, var.instance_count) - rancher_bootstrap_password = var.rancher_bootstrap_password + rancher_bootstrap_password = var.rancher_password rancher_password = var.rancher_password + bootstrap_rancher = var.bootstrap_rancher rancher_version = var.rancher_version - wait = var.wait rancher_helm_repository = var.rancher_helm_repository rancher_helm_repository_username = var.rancher_helm_repository_username rancher_helm_repository_password = var.rancher_helm_repository_password cert_manager_helm_repository = var.cert_manager_helm_repository cert_manager_helm_repository_username = var.cert_manager_helm_repository_username cert_manager_helm_repository_password = var.cert_manager_helm_repository_password + rancher_additional_helm_values = [ + "replicas: ${var.instance_count}" + ] } diff --git a/recipes/upstream/aws/rke/outputs.tf b/recipes/upstream/aws/rke/outputs.tf index c4b35f64..25550770 100644 --- a/recipes/upstream/aws/rke/outputs.tf +++ b/recipes/upstream/aws/rke/outputs.tf @@ -1,25 +1,17 @@ output "instances_public_ip" { - value = module.rke.instances_public_ip + value = module.rke_cluster.instances_public_ip } output "instances_private_ip" { - value = module.rke.instances_private_ip -} - -output "rancher_hostname" { - value = local.rancher_hostname + value = module.rke_cluster.instances_private_ip } output "rancher_url" { - value = "https://${local.rancher_hostname}" -} - -output "rancher_bootstrap_password" { - value = var.rancher_bootstrap_password + description = "Rancher URL" + value = "https://${module.rancher_install.rancher_hostname}" } -output "rancher_admin_token" { - description = "Rancher API token for the admin user" - value = module.rancher_install.rancher_admin_token - sensitive = true +output "rancher_password" { + description = "Rancher Initial Custom Password" + value = var.rancher_password } diff --git a/recipes/upstream/aws/rke/provider.tf b/recipes/upstream/aws/rke/provider.tf new file mode 100644 index 00000000..8e915083 --- /dev/null +++ b/recipes/upstream/aws/rke/provider.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/recipes/upstream/aws/rke/terraform.tfvars.example b/recipes/upstream/aws/rke/terraform.tfvars.example index c155c6ef..4a701c8f 100644 --- a/recipes/upstream/aws/rke/terraform.tfvars.example +++ b/recipes/upstream/aws/rke/terraform.tfvars.example @@ -1,48 +1,99 @@ ###### !! Required variables !! ###### -## -- Terraform will use the default ~/.aws/credentials file or environment variables to determine the access/secret keys. Uncomment the below only if necessary. -# aws_access_key = "ACCESS_KEY_HERE" -# aws_secret_key = "SECRET_KEY_HERE" +## -- The prefix used in front of all AWS resources +prefix = "" -## -- AWS region to create the resources, uncomment one or adjust as needed -# aws_region = "us-east-1" # US, Virginia -# aws_region = "us-west-2" # US, Oregon -# aws_region = "eu-west-1" # EU, Ireland -# aws_region = "eu-west-1" # EU, Frankfurt -# aws_region = "ap-southeast-2" # AU, Sydney -# aws_region = "ap-south-1" # IN, Mumbai +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = -## -- Set the prefix for the name tag on instancrease created. A default prefix (rancher-terraform) if not provided. -prefix = "my-name-here" +## -- AWS Region to create the resources +aws_region = "" -###### !! Optional variables !! ###### +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html -## -- Password to set when installing Rancher, otherwise use default (initial-admin-password) -# rancher_password = "at-least-12-characters" +## -- Specify if a new SSH key pair needs to be created for the instances +# create_ssh_key_pair = true -## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository -# rancher_version = "2.7.3" +## -- If you want to use an existing key pair, specify its name +# ssh_key_pair_name = null -## -- Override the default k8s version used by RKE -# kubernetes_version = "v1.24.10-rancher4-1" +#Ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html + +## -- The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_private_key_path = null + +## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_public_key_path = null + +## - Specify whether VPC / Subnet should be created for the instances +# create_vpc = null + +## -- Range of private IPs available for the AWS VPC +# vpc_ip_cidr_range = "10.0.0.0/16" -## -- Number and type of EC2 instances to launch -instance_count = 1 +## -- AWS VPC used for all resources +# vpc_id = null + +## -- AWS Subnet used for all resources +# subnet_id = null + +## -- AWS Security Group used for all resources +# create_security_group = null + +#Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html + +## -- The number of nodes +instance_count = 3 + +## -- The name of a AWS EC2 machine type # instance_type = "t3.medium" ## -- Use spot instances # spot_instances = false -##### SSH -## -- (A) Create a new keypair in AWS -create_ssh_key_pair = true -## -- Override the default (./${prefix}_ssh_private_key.pem) path where this SSH key is written -# ssh_private_key_path = "/path/to/private/key.pem" +## -- Size of the disk attached to each node, specified in GB +# instance_disk_size = 80 + +## -- If the Security Group was created manually, it can be entered here +# instance_security_group_id = null + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" -## -- (B) Provide an existing keypair name in AWS to use for nodes, the matching private key file for this keypair also must be provided so RKE can SSH to the launched nodes -# ssh_key_pair_name = "aws_keypair_name" -# ssh_key_pair_path = "/path/to/private/key.pem" -##### +## -- Script that will run when the VMs start +# user_data = "" -## -- Override the default (${prefix}_kube_config.yml) kubeconfig file/path -# kube_config_path = "~/.kube/rancher-terraform.yml" \ No newline at end of file +## -- Bastion host configuration to access the instances +# bastion_host = null + +## -- IAM Instance Profile to assign to the instances/nodes +# iam_instance_profile = null + +## -- User-provided tags for the resources +# tags = {} + +## -- Install Docker while creating the instance +# install_docker = true + +## -- Docker version to install on nodes +# docker_version = "20.10" + +## -- Waiting time (in seconds) +# waiting_time = 180 + +## -- Override the default k8s version used by RKE +# kubernetes_version = "v1.24.10-rancher4-1" + +## -- K8s Ingress Controller +# ingress_provider = nginx + +## -- Hostname to set when installing Rancher +# rancher_hostname = "rancher" + +## -- Password to set when installing Rancher +rancher_password = "at-least-12-characters" + +## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository +# rancher_version = "2.7.3" diff --git a/recipes/upstream/aws/rke/variables.tf b/recipes/upstream/aws/rke/variables.tf index a31f6f05..d471b68c 100644 --- a/recipes/upstream/aws/rke/variables.tf +++ b/recipes/upstream/aws/rke/variables.tf @@ -1,5 +1,6 @@ -variable "dependency" { - description = "An optional variable to add a dependency from another resource (not used)" +variable "prefix" { + type = string + description = "Prefix added to names of all resources" default = null } @@ -56,141 +57,194 @@ variable "aws_region" { } } -variable "prefix" { - type = string - description = "Prefix added to names of all resources" - default = null +variable "create_ssh_key_pair" { + type = bool + description = "Specify if a new SSH key pair needs to be created for the instances" + default = true } -variable "instance_count" { - type = number - description = "Number of EC2 instances to create" +variable "ssh_key_pair_name" { + type = string + description = "If you want to use an existing key pair, specify its name" default = null } -variable "instance_type" { +variable "ssh_private_key_path" { type = string - description = "Instance type used for all EC2 instances" + description = "The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform)" default = null } -variable "instance_disk_size" { - type = string - description = "Specify root disk size (GB)" +variable "ssh_public_key_path" { + description = "The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform)" default = null } -variable "kube_config_path" { - description = "The path to write the kubeconfig for the RKE cluster" - type = string - default = null +variable "create_vpc" { + type = bool + description = "Specify whether VPC / Subnet should be created for the instances" + default = true } -variable "kube_config_filename" { - description = "Filename to write the kube config" +variable "vpc_id" { type = string + description = "VPC ID to create the instance(s) in" default = null } -variable "kubernetes_version" { +variable "subnet_id" { type = string - description = "Kubernetes version to use for the RKE cluster" + description = "VPC Subnet ID to create the instance(s) in" default = null } -variable "install_docker" { +variable "create_security_group" { type = bool - description = "Should install docker while creating the instance" + description = "Should create the security group associated with the instance(s)" default = true + nullable = false } -variable "docker_version" { +variable "instance_count" { + type = number + description = "Number of EC2 instances to create" + default = 3 + nullable = false +} + +variable "instance_type" { type = string - description = "Docker version to install on nodes" - default = "20.10" + description = "Instance type used for all EC2 instances" + default = "t3.medium" + nullable = false +} + +variable "spot_instances" { + type = bool + description = "Use spot instances" + default = false + nullable = false } -variable "rancher_bootstrap_password" { - description = "Password to use for bootstrapping Rancher (min 12 characters)" - default = "initial-admin-password" +variable "instance_disk_size" { type = string + description = "Specify root disk size (GB)" + default = "80" + nullable = false } -variable "rancher_password" { - description = "Password to use for Rancher (min 12 characters)" +variable "instance_security_group_id" { + type = string + description = "Provide a pre-existing security group ID" default = null +} + +variable "ssh_username" { type = string + description = "Username used for SSH with sudo access" + default = "ubuntu" + nullable = false +} - validation { - condition = length(var.rancher_password) >= 12 - error_message = "The password provided for Rancher (rancher_password) must be at least 12 characters" - } +variable "user_data" { + description = "User data content for EC2 instance(s)" + default = null } -variable "rancher_version" { - description = "Rancher version to install" +variable "bastion_host" { + type = object({ + address = string + user = string + ssh_key = string + ssh_key_path = string + }) default = null + description = "Bastion host configuration to access the instances" +} + +variable "iam_instance_profile" { type = string + description = "Specify IAM Instance Profile to assign to the instances/nodes" + default = null } -variable "rancher_replicas" { - description = "Value for replicas when installing the Rancher helm chart" - default = 3 +variable "tag_begin" { type = number + description = "When module is being called more than once, begin tagging from this number" + default = 1 } -variable "create_ssh_key_pair" { +variable "tags" { + description = "User-provided tags for the resources" + type = map(string) + default = {} +} + +variable "install_docker" { type = bool - description = "Specify if a new SSH key pair needs to be created for the instances" - default = null + description = "Install Docker while creating the instances" + default = true } -variable "ssh_key_pair_name" { +variable "docker_version" { type = string - description = "Specify the SSH key name to use (that's already present in AWS)" - default = null + description = "Docker version to install on nodes" + default = "20.10" +} + +variable "waiting_time" { + description = "Waiting time (in seconds)" + default = 120 } -variable "ssh_key_pair_path" { +variable "kubernetes_version" { type = string - description = "Path to the SSH private key used as the key pair (that's already present in AWS)" + description = "Kubernetes version to use for the RKE cluster" default = null } -variable "ssh_username" { - type = string - description = "Username used for SSH with sudo access" - default = "ubuntu" +variable "ingress_provider" { + description = "Ingress controller provider" + default = "nginx" } -variable "spot_instances" { +variable "bootstrap_rancher" { + description = "Bootstrap the Rancher installation" type = bool - description = "Use spot instances" - default = null + default = true } -variable "subnet_id" { +variable "kube_config_path" { + description = "The path to write the kubeconfig for the RKE cluster" type = string - description = "VPC Subnet ID to create the instance(s) in" default = null } -variable "create_security_group" { - type = bool - description = "Should create the security group associated with the instance(s)" +variable "kube_config_filename" { + description = "Filename to write the kube config" + type = string default = null } -# TODO: Add a check based on above value -variable "instance_security_group" { +variable "rancher_hostname" { + description = "Hostname to set when installing Rancher" type = string - description = "Provide a pre-existing security group ID" default = null } -variable "wait" { - description = "An optional wait before installing the Rancher helm chart" - default = "20s" +variable "rancher_password" { + type = string + + validation { + condition = length(var.rancher_password) >= 12 + error_message = "The password must be at least 12 characters." + } +} + +variable "rancher_version" { + description = "Rancher version to install" + type = string + default = null } variable "rancher_helm_repository" { From 65beba84d2f4f2aa6543ceee42b1b082c728a454 Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Tue, 25 Jun 2024 16:37:12 +0200 Subject: [PATCH 21/35] Reviewed tests for AWS EC2 and AWS EC2 x RKE x Rancher --- tests/modules/infra/aws/README.md | 1 - tests/modules/infra/aws/ec2/README.md | 32 +++++++ tests/modules/infra/aws/ec2/docs.md | 40 ++++++++ tests/modules/infra/aws/ec2/main.tf | 7 ++ tests/modules/infra/aws/ec2/outputs.tf | 7 ++ tests/modules/infra/aws/ec2/provider.tf | 36 +++++++ .../infra/aws/ec2/terraform.tfvars.example | 20 ++++ tests/modules/infra/aws/ec2/user_data.tmpl | 9 ++ tests/modules/infra/aws/ec2/variables.tf | 19 ++++ tests/modules/infra/aws/main.tf | 29 ------ tests/recipes/upstream/aws/rke/README.md | 31 ++++++ tests/recipes/upstream/aws/rke/docs.md | 57 +++++++++++ tests/recipes/upstream/aws/rke/main.tf | 79 +++++++++++++-- tests/recipes/upstream/aws/rke/outputs.tf | 17 ++++ tests/recipes/upstream/aws/rke/provider.tf | 36 +++++++ .../upstream/aws/rke/terraform.tfvars.example | 96 +++++++++++++++++++ tests/recipes/upstream/aws/rke/user_data.tmpl | 9 ++ tests/recipes/upstream/aws/rke/variables.tf | 59 ++++++++++-- 18 files changed, 536 insertions(+), 48 deletions(-) delete mode 100644 tests/modules/infra/aws/README.md create mode 100644 tests/modules/infra/aws/ec2/README.md create mode 100644 tests/modules/infra/aws/ec2/docs.md create mode 100644 tests/modules/infra/aws/ec2/main.tf create mode 100644 tests/modules/infra/aws/ec2/outputs.tf create mode 100644 tests/modules/infra/aws/ec2/provider.tf create mode 100644 tests/modules/infra/aws/ec2/terraform.tfvars.example create mode 100644 tests/modules/infra/aws/ec2/user_data.tmpl create mode 100644 tests/modules/infra/aws/ec2/variables.tf delete mode 100644 tests/modules/infra/aws/main.tf create mode 100644 tests/recipes/upstream/aws/rke/README.md create mode 100644 tests/recipes/upstream/aws/rke/docs.md create mode 100644 tests/recipes/upstream/aws/rke/outputs.tf create mode 100644 tests/recipes/upstream/aws/rke/provider.tf create mode 100644 tests/recipes/upstream/aws/rke/terraform.tfvars.example create mode 100644 tests/recipes/upstream/aws/rke/user_data.tmpl diff --git a/tests/modules/infra/aws/README.md b/tests/modules/infra/aws/README.md deleted file mode 100644 index bb1fa9d2..00000000 --- a/tests/modules/infra/aws/README.md +++ /dev/null @@ -1 +0,0 @@ -This directory has code to test the aws infra [module](../../../../modules/infra/aws). diff --git a/tests/modules/infra/aws/ec2/README.md b/tests/modules/infra/aws/ec2/README.md new file mode 100644 index 00000000..b27c012d --- /dev/null +++ b/tests/modules/infra/aws/ec2/README.md @@ -0,0 +1,32 @@ +# TEST - AWS EC2 instances deploy + +This directory has code to test the AWS EC2 [module](../../../../../modules/infra/aws/ec2). + +Documentation can be found [here](./docs.md). + +## Usage + +```bash +git clone https://github.com/rancherlabs/tf-rancher-up.git +cd test/modules/infra/aws/ec2 +``` + +- Edit `./variables.tf` + - Update the required variables: + - `prefix` to give the resources an identifiable name (eg, your initials or first name) + - `aws_region` to suit your region + - `instance_count` to specify the number of instances to create + - `ssh_username` to specify the user used to create the VMs (default "ubuntu") +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../../modules/infra/aws/README.md). + +```bash +terraform init --upgrade ; terraform apply --auto-approve +``` + +- Destroy the resources when finished +```bash +terraform destroy --auto-approve +``` + +See full argument list for each module in use: + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 diff --git a/tests/modules/infra/aws/ec2/docs.md b/tests/modules/infra/aws/ec2/docs.md new file mode 100644 index 00000000..42859dea --- /dev/null +++ b/tests/modules/infra/aws/ec2/docs.md @@ -0,0 +1,40 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | +| [ssh](#requirement\_ssh) | 2.6.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [aws-ec2-upstream-cluster](#module\_aws-ec2-upstream-cluster) | ../../../../../modules/infra/aws/ec2 | n/a | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws\_region](#input\_aws\_region) | n/a | `string` | `"us-east-1"` | no | +| [instance\_count](#input\_instance\_count) | n/a | `number` | `1` | no | +| [prefix](#input\_prefix) | n/a | `string` | `"ec2-test"` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `string` | `"ubuntu"` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | +| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | diff --git a/tests/modules/infra/aws/ec2/main.tf b/tests/modules/infra/aws/ec2/main.tf new file mode 100644 index 00000000..f3f90174 --- /dev/null +++ b/tests/modules/infra/aws/ec2/main.tf @@ -0,0 +1,7 @@ +module "aws-ec2-upstream-cluster" { + source = "../../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + instance_count = var.instance_count + ssh_username = var.ssh_username +} diff --git a/tests/modules/infra/aws/ec2/outputs.tf b/tests/modules/infra/aws/ec2/outputs.tf new file mode 100644 index 00000000..28474230 --- /dev/null +++ b/tests/modules/infra/aws/ec2/outputs.tf @@ -0,0 +1,7 @@ +output "instances_public_ip" { + value = module.aws-ec2-upstream-cluster.instances_public_ip +} + +output "instances_private_ip" { + value = module.aws-ec2-upstream-cluster.instances_private_ip +} diff --git a/tests/modules/infra/aws/ec2/provider.tf b/tests/modules/infra/aws/ec2/provider.tf new file mode 100644 index 00000000..8e915083 --- /dev/null +++ b/tests/modules/infra/aws/ec2/provider.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/tests/modules/infra/aws/ec2/terraform.tfvars.example b/tests/modules/infra/aws/ec2/terraform.tfvars.example new file mode 100644 index 00000000..f5d4fd1e --- /dev/null +++ b/tests/modules/infra/aws/ec2/terraform.tfvars.example @@ -0,0 +1,20 @@ +###### !! Required variables !! ###### + +## -- The prefix used in front of all AWS resources +prefix = "" + +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = + +## -- AWS Region to create the resources +aws_region = "" + +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html + +## -- The number of nodes +instance_count = 1 + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" diff --git a/tests/modules/infra/aws/ec2/user_data.tmpl b/tests/modules/infra/aws/ec2/user_data.tmpl new file mode 100644 index 00000000..e794cfa6 --- /dev/null +++ b/tests/modules/infra/aws/ec2/user_data.tmpl @@ -0,0 +1,9 @@ +#!/bin/bash + +if [ ${install_docker} = true ] + then + echo 'Installing Docker' + export DEBIAN_FRONTEND=noninteractive + curl -sSL https://releases.rancher.com/install-docker/${docker_version}.sh | sh - + sudo usermod -aG docker ${username} +fi diff --git a/tests/modules/infra/aws/ec2/variables.tf b/tests/modules/infra/aws/ec2/variables.tf new file mode 100644 index 00000000..efdb192c --- /dev/null +++ b/tests/modules/infra/aws/ec2/variables.tf @@ -0,0 +1,19 @@ +variable "prefix" { + default = "ec2-test" +} + +variable "aws_region" { + default = "us-east-1" +} + +variable "ssh_private_key_path" { + default = null +} + +variable "instance_count" { + default = 1 +} + +variable "ssh_username" { + default = "ubuntu" +} diff --git a/tests/modules/infra/aws/main.tf b/tests/modules/infra/aws/main.tf deleted file mode 100644 index ab6acd57..00000000 --- a/tests/modules/infra/aws/main.tf +++ /dev/null @@ -1,29 +0,0 @@ -module "test1_all_defaults" { - source = "../../../../modules/infra/aws" - - instance_count = 1 - create_security_group = false - create_ssh_key_pair = true - instance_security_group = "default" -} - -module "test2_specify_sg" { - source = "../../../../modules/infra/aws" - - instance_count = 1 - create_security_group = false - create_ssh_key_pair = true - instance_security_group = "default" -} - -resource "aws_vpc" "for_test3" { - -} - -module "test3_specify_dynamic_vpc" { - source = "../../../../modules/infra/aws" - - instance_count = 1 - create_ssh_key_pair = true - vpc_id = aws_vpc.for_test3.id -} diff --git a/tests/recipes/upstream/aws/rke/README.md b/tests/recipes/upstream/aws/rke/README.md new file mode 100644 index 00000000..d06f70fe --- /dev/null +++ b/tests/recipes/upstream/aws/rke/README.md @@ -0,0 +1,31 @@ +# Upstream | AWS | EC2 x RKE + +This directory contains the code for testing the AWS EC2 x RKE x Rancher modules. + +Documentation can be found [here](./docs.md). + +## Usage + +```bash +git clone https://github.com/rancherlabs/tf-rancher-up.git +cd tests/recipes/upstream/aws/rke +``` + +- Edit `./variables.tf` + - Update the required variables (`prefix`, `aws_region`, `ssh_private_key_path`, `instance_count`, `ssh_username`, `user_data`, `install_docker`, `docker_version`, `waiting_time`, `ingress_provider`, `bootstrap_rancher`, `rancher_hostname`, and `rancher_password`). +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../../modules/infra/aws/README.md). + +```bash +terraform init --upgrade ; terraform apply --auto-approve +``` + +- Destroy the resources when finished +```bash +terraform destroy --auto-approve + +``` + +See full argument list for each module in use: + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - RKE: https://github.com/rancher/tf-rancher-up/tree/main/modules/distribution/rke + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/tests/recipes/upstream/aws/rke/docs.md b/tests/recipes/upstream/aws/rke/docs.md new file mode 100644 index 00000000..5d51fd1b --- /dev/null +++ b/tests/recipes/upstream/aws/rke/docs.md @@ -0,0 +1,57 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | +| [ssh](#requirement\_ssh) | 2.6.0 | + +## Providers + +| Name | Version | +|------|---------| +| [null](#provider\_null) | n/a | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [aws-ec2-upstream-cluster](#module\_aws-ec2-upstream-cluster) | ../../../../../modules/infra/aws/ec2 | n/a | +| [rancher\_install](#module\_rancher\_install) | ../../../../../modules/rancher | n/a | +| [rke](#module\_rke) | ../../../../../modules/distribution/rke | n/a | + +## Resources + +| Name | Type | +|------|------| +| [null_resource.wait-docker-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.wait-k8s-services-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws\_region](#input\_aws\_region) | n/a | `string` | `"us-east-1"` | no | +| [bootstrap\_rancher](#input\_bootstrap\_rancher) | n/a | `bool` | `true` | no | +| [docker\_version](#input\_docker\_version) | n/a | `string` | `"20.10"` | no | +| [ingress\_provider](#input\_ingress\_provider) | n/a | `string` | `"nginx"` | no | +| [install\_docker](#input\_install\_docker) | n/a | `bool` | `true` | no | +| [instance\_count](#input\_instance\_count) | n/a | `number` | `1` | no | +| [prefix](#input\_prefix) | n/a | `string` | `"ec2-test"` | no | +| [rancher\_hostname](#input\_rancher\_hostname) | n/a | `string` | `"rancher"` | no | +| [rancher\_password](#input\_rancher\_password) | n/a | `string` | `"at-least-12-characters"` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `string` | `"ubuntu"` | no | +| [user\_data](#input\_user\_data) | n/a | `any` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | n/a | `number` | `180` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | +| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | +| [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | +| [rancher\_url](#output\_rancher\_url) | Rancher URL | diff --git a/tests/recipes/upstream/aws/rke/main.tf b/tests/recipes/upstream/aws/rke/main.tf index 5491f35e..b870cd4f 100644 --- a/tests/recipes/upstream/aws/rke/main.tf +++ b/tests/recipes/upstream/aws/rke/main.tf @@ -1,11 +1,70 @@ -module "test1_default" { - source = "../../../../../recipes/upstream/aws/rke" - - prefix = "test1_default" - aws_access_key = var.aws_access_key - aws_secret_key = var.aws_secret_key - aws_region = "us-west-2" - create_ssh_key_pair = true - rancher_password = "this-is-an-insecure-password" - instance_count = 1 +module "aws-ec2-upstream-cluster" { + source = "../../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + instance_count = var.instance_count + ssh_username = var.ssh_username + user_data = templatefile("${path.module}/user_data.tmpl", + { + install_docker = var.install_docker + username = var.ssh_username + docker_version = var.docker_version + } + ) +} + +resource "null_resource" "wait-docker-startup" { + depends_on = [module.aws-ec2-upstream-cluster.instances_public_ip] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } +} + +locals { + ssh_private_key_path = var.ssh_private_key_path != null ? var.ssh_private_key_path : "${path.cwd}/${var.prefix}-ssh_private_key.pem" +} + +module "rke" { + source = "../../../../../modules/distribution/rke" + prefix = var.prefix + dependency = [resource.null_resource.wait-docker-startup] + ssh_private_key_path = local.ssh_private_key_path + node_username = var.ssh_username + + rancher_nodes = [for instance_ips in module.aws-ec2-upstream-cluster.instance_ips : + { + public_ip = instance_ips.public_ip, + private_ip = instance_ips.private_ip, + roles = ["etcd", "controlplane", "worker"], + ssh_key_path = local.ssh_private_key_path, + ssh_key = null, + hostname_override = null + } + ] +} + +resource "null_resource" "wait-k8s-services-startup" { + depends_on = [module.rke] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } +} + +locals { + kubeconfig_file = "${path.cwd}/${var.prefix}_kube_config.yml" + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.aws-ec2-upstream-cluster.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.aws-ec2-upstream-cluster.instances_public_ip[0], "sslip.io"]) + +} + +module "rancher_install" { + source = "../../../../../modules/rancher" + dependency = [null_resource.wait-k8s-services-startup] + kubeconfig_file = local.kubeconfig_file + rancher_hostname = local.rancher_hostname + rancher_bootstrap_password = var.rancher_password + rancher_password = var.rancher_password + bootstrap_rancher = var.bootstrap_rancher + rancher_additional_helm_values = [ + "replicas: ${var.instance_count}" + ] } diff --git a/tests/recipes/upstream/aws/rke/outputs.tf b/tests/recipes/upstream/aws/rke/outputs.tf new file mode 100644 index 00000000..3f0a3cc5 --- /dev/null +++ b/tests/recipes/upstream/aws/rke/outputs.tf @@ -0,0 +1,17 @@ +output "instances_public_ip" { + value = module.aws-ec2-upstream-cluster.instances_public_ip +} + +output "instances_private_ip" { + value = module.aws-ec2-upstream-cluster.instances_private_ip +} + +output "rancher_url" { + description = "Rancher URL" + value = "https://${module.rancher_install.rancher_hostname}" +} + +output "rancher_password" { + description = "Rancher Initial Custom Password" + value = var.rancher_password +} diff --git a/tests/recipes/upstream/aws/rke/provider.tf b/tests/recipes/upstream/aws/rke/provider.tf new file mode 100644 index 00000000..8e915083 --- /dev/null +++ b/tests/recipes/upstream/aws/rke/provider.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/tests/recipes/upstream/aws/rke/terraform.tfvars.example b/tests/recipes/upstream/aws/rke/terraform.tfvars.example new file mode 100644 index 00000000..7787da60 --- /dev/null +++ b/tests/recipes/upstream/aws/rke/terraform.tfvars.example @@ -0,0 +1,96 @@ +###### !! Required variables !! ###### + +## -- The prefix used in front of all AWS resources +prefix = "" + +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = + +## -- AWS Region to create the resources +aws_region = "" + +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html + +## -- Specify if a new SSH key pair needs to be created for the instances +# create_ssh_key_pair = true + +## -- If you want to use an existing key pair, specify its name +# ssh_key_pair_name = null + +#Ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html + +## -- The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_private_key_path = null + +## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_public_key_path = null + +## -- Range of private IPs available for the AWS VPC +# vpc_ip_cidr_range = "10.0.0.0/16" + +## -- AWS VPC used for all resources +# vpc_id = null + +## -- AWS Subnet used for all resources +# subnet_id = null + +## -- AWS Security Group used for all resources +# create_security_group = null + +#Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html + +## -- The number of nodes +instance_count = 3 + +## -- The name of a AWS EC2 machine type +# instance_type = "t3.medium" + +## -- Use spot instances +# spot_instances = false + +## -- Size of the disk attached to each node, specified in GB +# instance_disk_size = 80 + +## -- If the Security Group was created manually, it can be entered here +# instance_security_group_id = null + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" + +## -- Script that will run when the VMs start +# user_data = "" + +## -- Bastion host configuration to access the instances +# bastion_host = null + +## -- IAM Instance Profile to assign to the instances/nodes +# iam_instance_profile = null + +## -- User-provided tags for the resources +# tags = {} + +## -- Install Docker while creating the instance +# install_docker = true + +## -- Docker version to install on nodes +# docker_version = "20.10" + +## -- Waiting time (in seconds) +# waiting_time = 180 + +## -- Override the default k8s version used by RKE +# kubernetes_version = "v1.24.10-rancher4-1" + +## -- K8s Ingress Controller +# ingress_provider = nginx + +## -- Hostname to set when installing Rancher +# rancher_hostname = "rancher" + +## -- Password to set when installing Rancher +rancher_password = "at-least-12-characters" + +## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository +# rancher_version = "2.7.3" diff --git a/tests/recipes/upstream/aws/rke/user_data.tmpl b/tests/recipes/upstream/aws/rke/user_data.tmpl new file mode 100644 index 00000000..e794cfa6 --- /dev/null +++ b/tests/recipes/upstream/aws/rke/user_data.tmpl @@ -0,0 +1,9 @@ +#!/bin/bash + +if [ ${install_docker} = true ] + then + echo 'Installing Docker' + export DEBIAN_FRONTEND=noninteractive + curl -sSL https://releases.rancher.com/install-docker/${docker_version}.sh | sh - + sudo usermod -aG docker ${username} +fi diff --git a/tests/recipes/upstream/aws/rke/variables.tf b/tests/recipes/upstream/aws/rke/variables.tf index 21e0b5af..bca65038 100644 --- a/tests/recipes/upstream/aws/rke/variables.tf +++ b/tests/recipes/upstream/aws/rke/variables.tf @@ -1,11 +1,54 @@ -variable "aws_access_key" { - type = string - description = "AWS access key used to create infrastructure" - default = null +variable "prefix" { + default = "ec2-test" } -variable "aws_secret_key" { - type = string - description = "AWS secret key used to create AWS infrastructure" - default = null +variable "aws_region" { + default = "us-east-1" +} + +variable "ssh_private_key_path" { + default = null +} + +variable "instance_count" { + default = 1 +} + +variable "ssh_username" { + default = "ubuntu" +} + +variable "user_data" { + default = null +} + +variable "install_docker" { + type = bool + default = true +} + +variable "docker_version" { + type = string + default = "20.10" +} + +variable "waiting_time" { + default = 180 +} + +variable "ingress_provider" { + default = "nginx" +} + +variable "bootstrap_rancher" { + type = bool + default = true +} + +variable "rancher_hostname" { + default = "rancher" +} + +variable "rancher_password" { + default = "at-least-12-characters" } From 990b1baafca565de1d65271309adbe87fda8efcd Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Tue, 30 Jul 2024 08:32:49 +0200 Subject: [PATCH 22/35] Rebase --- recipes/rke/split-roles/aws/README.md | 35 +++ recipes/rke/split-roles/aws/docs.md | 88 +++---- recipes/rke/split-roles/aws/main.tf | 116 ++++----- recipes/rke/split-roles/aws/outputs.tf | 31 ++- recipes/rke/split-roles/aws/provider.tf | 36 +++ .../split-roles/aws/terraform.tfvars.example | 108 +++++++++ recipes/rke/split-roles/aws/variables.tf | 227 ++++++------------ 7 files changed, 386 insertions(+), 255 deletions(-) create mode 100644 recipes/rke/split-roles/aws/provider.tf create mode 100644 recipes/rke/split-roles/aws/terraform.tfvars.example diff --git a/recipes/rke/split-roles/aws/README.md b/recipes/rke/split-roles/aws/README.md index ff4456c4..ba37426a 100644 --- a/recipes/rke/split-roles/aws/README.md +++ b/recipes/rke/split-roles/aws/README.md @@ -3,3 +3,38 @@ This module helps to create an RKE cluster with split roles (master, worker) on AWS infrastructure. Documentation can be found [here](./docs.md). + +## Usage + +```bash +git clone https://github.com/rancherlabs/tf-rancher-up.git +cd recipes/rke/split-roles/aws +``` + +- Copy `./terraform.tfvars.exmaple` to `./terraform.tfvars` +- Edit `./terraform.tfvars` + - Update the required variables: + - `prefix` to give the resources an identifiable name (eg, your initials or first name) + - `aws_region` to suit your region + - `master_node_count` to specify the number of Master nodes to create + - `worker_node_count` to specify the number of Worker nodes to create + - `ssh_username` to specify the user used to create the VMs (default "ubuntu") + - `rancher_hostname` in order to reach the Rancher console via DNS name + - `rancher_password` to configure the initial Admin password (the password must be at least 12 characters) +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../modules/infra/aws/README.md). + +**NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** + +```bash +terraform init -upgrade ; terraform apply -target=module.aws-ec2-upstream-master-nodes.tls_private_key.ssh_private_key -target=module.aws-ec2-upstream-master-nodes.local_file.private_key_pem -target=module.aws-ec2-upstream-master-nodes.local_file.public_key_pem -target=module.aws-ec2-upstream-master-nodes.aws_key_pair.key_pair -target=module.aws-ec2-upstream-master-nodes.aws_vpc.vpc -target=module.aws-ec2-upstream-master-nodes.aws_subnet.subnet -target=module.aws-ec2-upstream-master-nodes.aws_security_group.sg_allowall -auto-approve ; terraform apply -auto-approve +``` + +- Destroy the resources when finished +```bash +terraform destroy -auto-approve +``` + +See full argument list for each module in use: + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - RKE: https://github.com/rancher/tf-rancher-up/tree/main/modules/distribution/rke + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/recipes/rke/split-roles/aws/docs.md b/recipes/rke/split-roles/aws/docs.md index 8d44c5e5..ed7c80b4 100644 --- a/recipes/rke/split-roles/aws/docs.md +++ b/recipes/rke/split-roles/aws/docs.md @@ -1,66 +1,70 @@ ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | +| [ssh](#requirement\_ssh) | 2.6.0 | ## Providers -No providers. +| Name | Version | +|------|---------| +| [null](#provider\_null) | n/a | ## Modules | Name | Source | Version | |------|--------|---------| -| [master\_nodes](#module\_master\_nodes) | ../../../../modules/infra/aws | n/a | +| [aws-ec2-upstream-master-nodes](#module\_aws-ec2-upstream-master-nodes) | ../../../../modules/infra/aws/ec2 | n/a | +| [aws-ec2-upstream-worker-nodes](#module\_aws-ec2-upstream-worker-nodes) | ../../../../modules/infra/aws/ec2 | n/a | | [rke](#module\_rke) | ../../../../modules/distribution/rke | n/a | -| [worker\_nodes](#module\_worker\_nodes) | ../../../../modules/infra/aws | n/a | ## Resources -No resources. +| Name | Type | +|------|------| +| [null_resource.wait-docker-startup-m](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.wait-docker-startup-w](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.wait-k8s-services-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [aws\_access\_key](#input\_aws\_access\_key) | Enter your AWS access key | `string` | n/a | yes | -| [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | n/a | yes | -| [aws\_secret\_key](#input\_aws\_secret\_key) | Enter your AWS secret key | `string` | n/a | yes | -| [bastion\_host](#input\_bastion\_host) | Bastion host configuration to access the RKE nodes |
object({
address = string
user = string
ssh_key_path = string
ssh_key = string
})
| `null` | no | -| [cloud\_provider](#input\_cloud\_provider) | Specify the cloud provider name | `string` | `null` | no | -| [create\_kubeconfig\_file](#input\_create\_kubeconfig\_file) | Boolean flag to generate a kubeconfig file (mostly used for dev only) | `bool` | `true` | no | -| [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `true` | no | -| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `false` | no | -| [dependency](#input\_dependency) | An optional variable to add a dependency from another resource (not used) | `any` | `null` | no | -| [docker\_version](#input\_docker\_version) | Docker version to install on nodes | `string` | `"23.0.6"` | no | -| [install\_docker](#input\_install\_docker) | Should install docker while creating the instance | `bool` | `true` | no | -| [instance\_security\_group](#input\_instance\_security\_group) | Provide a pre-existing security group ID | `string` | `null` | no | -| [instance\_security\_group\_name](#input\_instance\_security\_group\_name) | Provide a pre-existing security group name | `string` | `null` | no | -| [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | -| [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | -| [kubernetes\_version](#input\_kubernetes\_version) | Kubernetes version to use for the RKE cluster | `string` | `null` | no | -| [master\_nodes\_count](#input\_master\_nodes\_count) | Number of master nodes to create | `number` | `1` | no | -| [master\_nodes\_iam\_instance\_profile](#input\_master\_nodes\_iam\_instance\_profile) | Specify IAM instance profile to attach to master nodes | `string` | `null` | no | -| [master\_nodes\_instance\_disk\_size](#input\_master\_nodes\_instance\_disk\_size) | Disk size used for all master nodes (in GB) | `string` | `"80"` | no | -| [master\_nodes\_instance\_type](#input\_master\_nodes\_instance\_type) | Instance type used for all master nodes | `string` | `"t3.medium"` | no | -| [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | n/a | yes | -| [ssh\_key](#input\_ssh\_key) | Contents of the private key to connect to the instances. | `string` | `null` | no | -| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | Specify the SSH key name to use (that's already present in AWS) | `string` | `null` | no | -| [ssh\_key\_pair\_path](#input\_ssh\_key\_pair\_path) | Path to the SSH private key used as the key pair (that's already present in AWS) | `string` | `null` | no | -| [ssh\_username](#input\_ssh\_username) | Username used for SSH with sudo access | `string` | `"ubuntu"` | no | -| [subnet\_id](#input\_subnet\_id) | VPC Subnet ID to create the instance(s) in | `string` | `null` | no | -| [tags](#input\_tags) | User-provided tags for the resources | `map(string)` | `{}` | no | -| [vpc\_id](#input\_vpc\_id) | VPC ID to create the instance(s) in | `string` | `null` | no | -| [vpc\_zone](#input\_vpc\_zone) | VPC zone | `string` | `null` | no | -| [worker\_nodes\_count](#input\_worker\_nodes\_count) | Number of worker nodes to create | `number` | `1` | no | -| [worker\_nodes\_iam\_instance\_profile](#input\_worker\_nodes\_iam\_instance\_profile) | Specify IAM instance profile to attach to worker nodes | `string` | `null` | no | -| [worker\_nodes\_instance\_disk\_size](#input\_worker\_nodes\_instance\_disk\_size) | Disk size used for all worker nodes (in GB) | `string` | `"80"` | no | -| [worker\_nodes\_instance\_type](#input\_worker\_nodes\_instance\_type) | Instance type used for all worker nodes | `string` | `"t3.large"` | no | +| [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | +| [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | +| [create\_security\_group](#input\_create\_security\_group) | n/a | `any` | `null` | no | +| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | +| [docker\_version](#input\_docker\_version) | Docker version to install on nodes | `string` | `"20.10"` | no | +| [ingress\_provider](#input\_ingress\_provider) | Ingress controller provider | `string` | `"nginx"` | no | +| [install\_docker](#input\_install\_docker) | Install Docker while creating the instances | `bool` | `true` | no | +| [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | n/a | `any` | `null` | no | +| [master\_nodes\_count](#input\_master\_nodes\_count) | n/a | `any` | n/a | yes | +| [prefix](#input\_prefix) | n/a | `any` | n/a | yes | +| [rancher\_hostname](#input\_rancher\_hostname) | n/a | `any` | n/a | yes | +| [rancher\_password](#input\_rancher\_password) | n/a | `string` | n/a | yes | +| [rancher\_version](#input\_rancher\_version) | Rancher version to install | `string` | `null` | no | +| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | n/a | `any` | `null` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `any` | n/a | yes | +| [subnet\_id](#input\_subnet\_id) | n/a | `any` | `null` | no | +| [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | +| [vpc\_id](#input\_vpc\_id) | n/a | `any` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | Waiting time (in seconds) | `number` | `180` | no | +| [worker\_nodes\_count](#input\_worker\_nodes\_count) | n/a | `any` | n/a | yes | ## Outputs | Name | Description | |------|-------------| -| [credentials](#output\_credentials) | n/a | -| [dependency](#output\_dependency) | n/a | -| [kube\_config\_yaml](#output\_kube\_config\_yaml) | n/a | -| [kubeconfig\_file](#output\_kubeconfig\_file) | n/a | +| [master\_instances\_private\_ip](#output\_master\_instances\_private\_ip) | n/a | +| [master\_instances\_public\_ip](#output\_master\_instances\_public\_ip) | n/a | +| [security\_group](#output\_security\_group) | n/a | +| [subnet](#output\_subnet) | n/a | +| [vpc](#output\_vpc) | n/a | +| [worker\_instances\_private\_ip](#output\_worker\_instances\_private\_ip) | n/a | +| [worker\_instances\_public\_ip](#output\_worker\_instances\_public\_ip) | n/a | diff --git a/recipes/rke/split-roles/aws/main.tf b/recipes/rke/split-roles/aws/main.tf index f7ab3d8c..7691fa2a 100644 --- a/recipes/rke/split-roles/aws/main.tf +++ b/recipes/rke/split-roles/aws/main.tf @@ -1,20 +1,20 @@ -module "master_nodes" { - source = "../../../../modules/infra/aws" +locals { + create_ssh_key_pair = var.create_ssh_key_pair == null ? false : true + ssh_key_pair_name = var.ssh_key_pair_name == null ? "tf-rancher-up-${var.prefix}" : var.ssh_key_pair_name + local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path + local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + vpc_id = var.vpc_id == null ? module.aws-ec2-upstream-master-nodes.vpc[0].id : var.vpc_id + subnet_id = var.subnet_id == null ? module.aws-ec2-upstream-master-nodes.subnet[0].id : var.subnet_id + create_security_group = var.create_security_group == null ? false : true + instance_security_group_id = local.create_security_group == "true" ? null : module.aws-ec2-upstream-master-nodes.security_group[0].id +} - prefix = "${var.prefix}-m" - instance_count = var.master_nodes_count - instance_type = var.master_nodes_instance_type - instance_disk_size = var.master_nodes_instance_disk_size - create_ssh_key_pair = var.create_ssh_key_pair - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - ssh_key = var.ssh_key - ssh_username = var.ssh_username - aws_region = var.aws_region - create_security_group = var.create_security_group - instance_security_group = var.instance_security_group - subnet_id = var.subnet_id - bastion_host = var.bastion_host +module "aws-ec2-upstream-master-nodes" { + source = "../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + instance_count = var.master_nodes_count + ssh_username = var.ssh_username user_data = templatefile("${path.module}/user_data.tmpl", { install_docker = var.install_docker @@ -22,27 +22,22 @@ module "master_nodes" { docker_version = var.docker_version } ) - iam_instance_profile = var.master_nodes_iam_instance_profile != null ? var.master_nodes_iam_instance_profile : null - tags = var.tags } -module "worker_nodes" { - source = "../../../../modules/infra/aws" - - prefix = "${var.prefix}-w" - instance_count = var.worker_nodes_count - instance_type = var.worker_nodes_instance_type - instance_disk_size = var.worker_nodes_instance_disk_size - create_ssh_key_pair = var.create_ssh_key_pair - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - ssh_key = var.ssh_key - ssh_username = var.ssh_username - aws_region = var.aws_region - create_security_group = var.create_security_group - instance_security_group = var.instance_security_group - subnet_id = var.subnet_id - bastion_host = var.bastion_host +module "aws-ec2-upstream-worker-nodes" { + source = "../../../../modules/infra/aws/ec2" + prefix = "${var.prefix}-w" + aws_region = var.aws_region + create_ssh_key_pair = local.create_ssh_key_pair + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path + vpc_id = local.vpc_id + subnet_id = local.subnet_id + create_security_group = local.create_security_group + instance_count = var.worker_nodes_count + instance_security_group_id = local.instance_security_group_id + ssh_username = var.ssh_username user_data = templatefile("${path.module}/user_data.tmpl", { install_docker = var.install_docker @@ -50,45 +45,58 @@ module "worker_nodes" { docker_version = var.docker_version } ) - iam_instance_profile = var.worker_nodes_iam_instance_profile != null ? var.worker_nodes_iam_instance_profile : null - tags = var.tags +} + +resource "null_resource" "wait-docker-startup-m" { + depends_on = [module.aws-ec2-upstream-master-nodes.instances_public_ip] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } +} + +resource "null_resource" "wait-docker-startup-w" { + depends_on = [module.aws-ec2-upstream-worker-nodes.instances_public_ip] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } } locals { - master_nodes = [for instance_ips in module.master_nodes.instance_ips : + ssh_private_key_path = var.ssh_private_key_path != null ? var.ssh_private_key_path : "${path.cwd}/${var.prefix}-ssh_private_key.pem" + master_nodes = [for instance_ips in module.aws-ec2-upstream-master-nodes.instance_ips : { public_ip = instance_ips.public_ip, private_ip = instance_ips.private_ip, roles = ["etcd", "controlplane"], - ssh_key_path = var.ssh_key_pair_path, - ssh_key = var.ssh_key - node_username = module.master_nodes.node_username, + ssh_key_path = local.ssh_private_key_path, + ssh_key = null, hostname_override = instance_ips.private_dns } ] - worker_nodes = [for instance_ips in module.worker_nodes.instance_ips : + worker_nodes = [for instance_ips in module.aws-ec2-upstream-worker-nodes.instance_ips : { public_ip = instance_ips.public_ip, private_ip = instance_ips.private_ip, roles = ["worker"], - ssh_key_path = var.ssh_key_pair_path, - ssh_key = var.ssh_key - node_username = module.worker_nodes.node_username + ssh_key_path = local.ssh_private_key_path, + ssh_key = null, hostname_override = instance_ips.private_dns } ] } module "rke" { - source = "../../../../modules/distribution/rke" - prefix = var.prefix - node_username = var.ssh_username - create_kubeconfig_file = var.create_kubeconfig_file - kube_config_path = var.kube_config_path - kube_config_filename = var.kube_config_filename - kubernetes_version = var.kubernetes_version - bastion_host = var.bastion_host - cloud_provider = var.cloud_provider + source = "../../../../modules/distribution/rke" + prefix = var.prefix + ssh_private_key_path = local.ssh_private_key_path + node_username = var.ssh_username rancher_nodes = concat(local.master_nodes, local.worker_nodes) } + +resource "null_resource" "wait-k8s-services-startup" { + depends_on = [module.rke] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } +} diff --git a/recipes/rke/split-roles/aws/outputs.tf b/recipes/rke/split-roles/aws/outputs.tf index a974b706..b8272b66 100644 --- a/recipes/rke/split-roles/aws/outputs.tf +++ b/recipes/rke/split-roles/aws/outputs.tf @@ -1,18 +1,27 @@ -output "dependency" { - value = [ - var.master_nodes_count != 0 ? module.master_nodes[*].instance_ips : null, - var.worker_nodes_count != 0 ? module.worker_nodes[*].instance_ips : null - ] +output "master_instances_public_ip" { + value = module.aws-ec2-upstream-master-nodes.instances_public_ip } -output "kubeconfig_file" { - value = module.rke.rke_kubeconfig_filename +output "master_instances_private_ip" { + value = module.aws-ec2-upstream-master-nodes.instances_private_ip } -output "kube_config_yaml" { - value = module.rke.kube_config_yaml +output "worker_instances_public_ip" { + value = module.aws-ec2-upstream-worker-nodes.instances_public_ip } -output "credentials" { - value = module.rke.credentials +output "worker_instances_private_ip" { + value = module.aws-ec2-upstream-worker-nodes.instances_private_ip +} + +output "vpc" { + value = module.aws-ec2-upstream-master-nodes.vpc[0].id +} + +output "subnet" { + value = module.aws-ec2-upstream-master-nodes.subnet[0].id +} + +output "security_group" { + value = module.aws-ec2-upstream-master-nodes.security_group[0].id } diff --git a/recipes/rke/split-roles/aws/provider.tf b/recipes/rke/split-roles/aws/provider.tf new file mode 100644 index 00000000..8e915083 --- /dev/null +++ b/recipes/rke/split-roles/aws/provider.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/recipes/rke/split-roles/aws/terraform.tfvars.example b/recipes/rke/split-roles/aws/terraform.tfvars.example new file mode 100644 index 00000000..d0527b51 --- /dev/null +++ b/recipes/rke/split-roles/aws/terraform.tfvars.example @@ -0,0 +1,108 @@ +###### !! Required variables !! ###### + +## -- The prefix used in front of all AWS resources +prefix = "" + +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = + +## -- AWS Region to create the resources +aws_region = "" + +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html + +## -- Specify if a new SSH key pair needs to be created for the instances +# create_ssh_key_pair = true + +## -- If you want to use an existing key pair, specify its name +# ssh_key_pair_name = null + +#Ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html + +## -- The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_private_key_path = null + +## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_public_key_path = null + +## -- Range of private IPs available for the AWS VPC +# vpc_ip_cidr_range = "10.0.0.0/16" + +## -- AWS VPC used for all resources +# vpc_id = null + +## -- AWS Subnet used for all resources +# subnet_id = null + +## -- AWS Security Group used for all resources +# create_security_group = null + +#Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html + +## -- The number of Master nodes +master_nodes_count = 1 + +## -- The number of Worker nodes +worker_nodes_count = 1 + +## -- Master nodes type +# master_nodes_type = "t3.medium" + +## -- Worker nodes type +# worker_nodes_type = "t3.medium" + +## -- Use spot instances +# spot_instances = false + +## -- Master nodes disk size (GB) +# master_nodes_disk_size = 80 + +## -- Worker nodes disk size (GB) +# worker_nodes_disk_size = 80 + +## -- If the Security Group was created manually, it can be entered here +# instance_security_group_id = null + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" + +## -- Script that will run when the VMs start +# user_data = "" + +## -- Bastion host configuration to access the instances +# bastion_host = null + +## -- IAM Instance Profile to assign to the Master nodes +# master_nodes_iam_instance_profile = null + +## -- IAM Instance Profile to assign to the Worker nodes +# worker_nodes_iam_instance_profile = null + +## -- User-provided tags for the resources +# tags = {} + +## -- Install Docker while creating the instance +# install_docker = true + +## -- Docker version to install on nodes +# docker_version = "20.10" + +## -- Waiting time (in seconds) +# waiting_time = 180 + +## -- Override the default k8s version used by RKE +# kubernetes_version = "v1.24.10-rancher4-1" + +## -- K8s Ingress Controller +# ingress_provider = nginx + +## -- Hostname to set when installing Rancher +# rancher_hostname = "rancher" + +## -- Password to set when installing Rancher +rancher_password = "at-least-12-characters" + +## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository +# rancher_version = "2.7.3" diff --git a/recipes/rke/split-roles/aws/variables.tf b/recipes/rke/split-roles/aws/variables.tf index dabc8251..0aaf7084 100644 --- a/recipes/rke/split-roles/aws/variables.tf +++ b/recipes/rke/split-roles/aws/variables.tf @@ -1,199 +1,130 @@ -variable "aws_access_key" { - type = string - description = "Enter your AWS access key" -} - -variable "aws_secret_key" { - type = string - description = "Enter your AWS secret key" - sensitive = true -} +variable "prefix" {} variable "aws_region" { type = string description = "AWS region used for all resources" + default = "us-east-1" + + validation { + condition = contains([ + "us-east-2", + "us-east-1", + "us-west-1", + "us-west-2", + "af-south-1", + "ap-east-1", + "ap-south-2", + "ap-southeast-3", + "ap-southeast-4", + "ap-south-1", + "ap-northeast-3", + "ap-northeast-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ca-central-1", + "ca-west-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-south-1", + "eu-west-3", + "eu-south-2", + "eu-north-1", + "eu-central-2", + "il-central-1", + "me-south-1", + "me-central-1", + "sa-east-1", + ], var.aws_region) + error_message = "Invalid Region specified!" + } } -variable "vpc_zone" { - type = string - description = "VPC zone" - default = null +variable "create_ssh_key_pair" { + default = null } -variable "subnet_id" { - type = string - description = "VPC Subnet ID to create the instance(s) in" - default = null +variable "ssh_key_pair_name" { + default = null } -variable "vpc_id" { - type = string - description = "VPC ID to create the instance(s) in" - default = null +variable "ssh_private_key_path" { + default = null } -variable "create_security_group" { - type = bool - description = "Should create the security group associated with the instance(s)" - default = true +variable "ssh_public_key_path" { + default = null } -# TODO: Add a check based on above value -variable "instance_security_group" { - type = string - description = "Provide a pre-existing security group ID" - default = null +variable "vpc_id" { + default = null } -variable "instance_security_group_name" { - type = string - description = "Provide a pre-existing security group name" - default = null +variable "subnet_id" { + default = null } -variable "prefix" { - type = string - description = "Prefix added to names of all resources" +variable "create_security_group" { + default = null } -variable "master_nodes_count" { - type = number - description = "Number of master nodes to create" - default = 1 -} +variable "master_nodes_count" {} -variable "worker_nodes_count" { - type = number - description = "Number of worker nodes to create" - default = 1 -} +variable "worker_nodes_count" {} -variable "kube_config_path" { - description = "The path to write the kubeconfig for the RKE cluster" - type = string - default = null +variable "instance_security_group_id" { + default = null } -variable "kube_config_filename" { - description = "Filename to write the kube config" - type = string - default = null -} +variable "ssh_username" {} -variable "kubernetes_version" { - type = string - description = "Kubernetes version to use for the RKE cluster" +variable "user_data" { + description = "User data content for EC2 instance(s)" default = null } variable "install_docker" { type = bool - description = "Should install docker while creating the instance" + description = "Install Docker while creating the instances" default = true } variable "docker_version" { type = string description = "Docker version to install on nodes" - default = "23.0.6" + default = "20.10" } -variable "create_ssh_key_pair" { - type = bool - description = "Specify if a new SSH key pair needs to be created for the instances" - default = false +variable "waiting_time" { + description = "Waiting time (in seconds)" + default = 180 } -variable "ssh_key_pair_name" { - type = string - description = "Specify the SSH key name to use (that's already present in AWS)" - default = null +variable "ingress_provider" { + description = "Ingress controller provider" + default = "nginx" } -variable "ssh_key_pair_path" { - type = string - description = "Path to the SSH private key used as the key pair (that's already present in AWS)" - default = null -} - -variable "ssh_key" { - type = string - description = "Contents of the private key to connect to the instances." - default = null - sensitive = true -} - -variable "bastion_host" { - type = object({ - address = string - user = string - ssh_key_path = string - ssh_key = string - }) - default = null - description = "Bastion host configuration to access the RKE nodes" -} - -variable "ssh_username" { - type = string - description = "Username used for SSH with sudo access" - default = "ubuntu" -} - -variable "master_nodes_instance_type" { - type = string - description = "Instance type used for all master nodes" - default = "t3.medium" -} - -variable "master_nodes_instance_disk_size" { - type = string - description = "Disk size used for all master nodes (in GB)" - default = "80" -} - -variable "worker_nodes_instance_type" { - type = string - description = "Instance type used for all worker nodes" - default = "t3.large" +variable "bootstrap_rancher" { + description = "Bootstrap the Rancher installation" + type = bool + default = true } -variable "worker_nodes_instance_disk_size" { - type = string - description = "Disk size used for all worker nodes (in GB)" - default = "80" -} +variable "rancher_hostname" {} -variable "dependency" { - description = "An optional variable to add a dependency from another resource (not used)" - default = null -} +variable "rancher_password" { + type = string -variable "master_nodes_iam_instance_profile" { - description = "Specify IAM instance profile to attach to master nodes" - default = null - type = string + validation { + condition = length(var.rancher_password) >= 12 + error_message = "The password must be at least 12 characters." + } } -variable "worker_nodes_iam_instance_profile" { - description = "Specify IAM instance profile to attach to worker nodes" - default = null - type = string -} - -variable "tags" { - description = "User-provided tags for the resources" - type = map(string) - default = {} -} - -variable "cloud_provider" { - description = "Specify the cloud provider name" +variable "rancher_version" { + description = "Rancher version to install" type = string default = null } - -variable "create_kubeconfig_file" { - description = "Boolean flag to generate a kubeconfig file (mostly used for dev only)" - default = true -} From 5b51d4c3ca41aeef6293867be0910a740c5f6a7e Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Thu, 27 Jun 2024 18:24:56 +0200 Subject: [PATCH 23/35] Reviewed tests for AWS EC2 and AWS EC2 x RKE2 x Rancher --- recipes/upstream/aws/rke2/README.md | 81 ++------- recipes/upstream/aws/rke2/docs.md | 65 ++++---- recipes/upstream/aws/rke2/main.tf | 132 ++++++++------- recipes/upstream/aws/rke2/outputs.tf | 40 +++-- recipes/upstream/aws/rke2/provider.tf | 30 +++- .../aws/rke2/terraform.tfvars.example | 112 +++++++++---- recipes/upstream/aws/rke2/variables.tf | 154 ++++++++---------- 7 files changed, 331 insertions(+), 283 deletions(-) diff --git a/recipes/upstream/aws/rke2/README.md b/recipes/upstream/aws/rke2/README.md index 4dd089e5..811b29e6 100644 --- a/recipes/upstream/aws/rke2/README.md +++ b/recipes/upstream/aws/rke2/README.md @@ -1,6 +1,6 @@ -# Upstream | AWS | RKE2 +# Upstream | AWS | EC2 x RKE2 -This module is used to establish a Rancher (local) management cluster using AWS and RKE2. +This module is used to establish a Rancher (local) management cluster using [AWS EC2](https://aws.amazon.com/ec2/) and [RKE2](https://docs.rke2.io/). Documentation can be found [here](./docs.md). @@ -11,76 +11,29 @@ git clone https://github.com/rancherlabs/tf-rancher-up.git cd recipes/upstream/aws/rke2 ``` -- Copy `terraform.tfvars.example` to `terraform.tfvars` -- Edit `terraform.tfvars` +- Copy `./terraform.tfvars.exmaple` to `./terraform.tfvars` +- Edit `./terraform.tfvars` - Update the required variables: - - `aws_region` to suit your region - `prefix` to give the resources an identifiable name (eg, your initials or first name) - - Recommended: `spot_instances` can be set to `true` to use spot instances -- Check your AWS credentials are configured in `~/.aws/credentials`, terraform will use these by default. Refer the [`aws configure`](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html#cli-configure-files-methods) command on how to do this. -- If you don't want to configure AWS credentials using `aws configure` in above step, uncomment `aws_access_key` and `aws_secret_key` in `terraform.tfvars` and input the required keys there. -- If an HA cluster need to be deployed, change the `instance_count` variable to 3 or more. -- There are more optional variables which can be tweaked under `terraform.tfvars`. - -**NOTE** you may need to use ` terraform init -upgrade` to upgrade provider versions - -Execute the below commands to start deployment. - -```bash -terraform init -terraform plan -terraform apply -``` -The login details will be displayed in the screen once the deployment is successful. It will have the details as below. - -```bash -rancher_hostname = "https://rancher..sslip.io" -rancher_password = "initial-admin-password" -``` + - `aws_region` to suit your region + - `instance_count` to specify the number of instances to create + - `ssh_username` to specify the user used to create the VMs (default "ubuntu") + - `rancher_hostname` in order to reach the Rancher console via DNS name + - `rancher_password` to configure the initial Admin password (the password must be at least 12 characters) +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../modules/infra/aws/README.md). -- If storing multiple AWS credentials in `~/.aws/credentials`, set the profile when running terraform. +**NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** ```bash -AWS_PROFILE= terraform plan -AWS_PROFILE= terraform apply +terraform init -upgrade ; terraform apply -target=module.rke2-first-server.tls_private_key.ssh_private_key -target=module.rke2-first-server.local_file.private_key_pem -target=module.rke2-first-server.local_file.public_key_pem -target=module.rke2-first-server.aws_key_pair.key_pair -target=module.rke2-first-server.aws_vpc.vpc -target=module.rke2-first-server.aws_subnet.subnet -target=module.rke2-first-server.aws_security_group.sg_allowall -auto-approve ; terraform apply -auto-approve ; terraform apply -target=module.rancher_install -auto-approve ``` -- Destroy the resources when cluster is no more needed. +- Destroy the resources when finished ```bash -terraform destroy +terraform destroy -target=module.rancher_install -auto-approve ; terraform destroy -auto-approve ``` -**IMPORTANT**: Please retire the services which are deployed using these terraform modules within 48 hours. Soon there will be automation to retire the service automatically after 48 hours but till that is in place it will be the users responsibility to not keep it running more than 48 hours. - -### Notes - -The user data automatically sets up each node for use with kubectl (also alias to k) and crictl when logged in. See full argument list for each module in use: - - [AWS](../../../../modules/infra/aws) - - [RKE2](../../../../modules/distribution/rke2) - - [Rancher](../../../../modules/rancher) - -### Known Issues -- Terraform plan shows below warnings which can be ignored: - -```bash -Warning: Value for undeclared variable - -The root module does not declare a variable named "ssh_private_key_path" but a value was found in file "terraform.tfvars". If you meant to use this value, add a "variable" block to the configuration. - -Invalid attribute in provider configuration - -with module.rancher_install.provider["registry.terraform.io/hashicorp/kubernetes"], -on ../../../../modules/rancher/provider.tf line 7, in provider "kubernetes": -7: provider "kubernetes" { -``` -- Terraform apply shows below warnings and errors. Please rerun terraform apply again, and it will be successful[(Issue #22)](#22). - -```bash -Warning: - -Helm release "rancher" was created but has a failed status. Use the `helm` command to investigate the error, correct it, then run Terraform again. - -Error: 1 error occurred: -* Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": failed to call webhook: Post "https://rke2-ingress-nginx-controller-admission.kube-system.svc:443/networking/v1/ingresses?timeout=10s": no endpoints available for service "rke2-ingress-nginx-controller-admission" -``` + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - RKE2: https://github.com/rancher/tf-rancher-up/tree/main/modules/distribution/rke2 + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/recipes/upstream/aws/rke2/docs.md b/recipes/upstream/aws/rke2/docs.md index 36b24c76..0801c42e 100644 --- a/recipes/upstream/aws/rke2/docs.md +++ b/recipes/upstream/aws/rke2/docs.md @@ -2,6 +2,10 @@ | Name | Version | |------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | | [ssh](#requirement\_ssh) | 2.6.0 | ## Providers @@ -9,6 +13,7 @@ | Name | Version | |------|---------| | [local](#provider\_local) | n/a | +| [null](#provider\_null) | n/a | | [ssh](#provider\_ssh) | 2.6.0 | ## Modules @@ -16,49 +21,50 @@ | Name | Source | Version | |------|--------|---------| | [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | -| [rke2\_additional](#module\_rke2\_additional) | ../../../../modules/distribution/rke2 | n/a | -| [rke2\_additional\_servers](#module\_rke2\_additional\_servers) | ../../../../modules/infra/aws | n/a | -| [rke2\_first](#module\_rke2\_first) | ../../../../modules/distribution/rke2 | n/a | -| [rke2\_first\_server](#module\_rke2\_first\_server) | ../../../../modules/infra/aws | n/a | +| [rke2-additional](#module\_rke2-additional) | ../../../../modules/distribution/rke2 | n/a | +| [rke2-additional-servers](#module\_rke2-additional-servers) | ../../../../modules/infra/aws/ec2 | n/a | +| [rke2-first](#module\_rke2-first) | ../../../../modules/distribution/rke2 | n/a | +| [rke2-first-server](#module\_rke2-first-server) | ../../../../modules/infra/aws/ec2 | n/a | ## Resources | Name | Type | |------|------| -| [local_file.kube_config_yaml](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [local_file.kube_config_yaml_backup](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [ssh_resource.retrieve_kubeconfig](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource | +| [local_file.kube-config-yaml](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.kube-config-yaml-backup](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [null_resource.wait-k8s-services-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [ssh_resource.retrieve-kubeconfig](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource | | [local_file.ssh_private_key](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [aws\_access\_key](#input\_aws\_access\_key) | AWS access key used to create infrastructure | `string` | `null` | no | | [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | -| [aws\_secret\_key](#input\_aws\_secret\_key) | AWS secret key used to create AWS infrastructure | `string` | `null` | no | -| [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `null` | no | -| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `null` | no | -| [instance\_count](#input\_instance\_count) | Number of EC2 instances to create | `number` | `null` | no | -| [instance\_disk\_size](#input\_instance\_disk\_size) | Specify root disk size (GB) | `string` | `null` | no | -| [instance\_security\_group](#input\_instance\_security\_group) | Provide a pre-existing security group ID | `string` | `null` | no | -| [instance\_type](#input\_instance\_type) | Instance type used for all EC2 instances | `string` | `null` | no | +| [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | +| [create\_security\_group](#input\_create\_security\_group) | n/a | `any` | `null` | no | +| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | +| [instance\_count](#input\_instance\_count) | n/a | `any` | n/a | yes | +| [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | n/a | `any` | `null` | no | | [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | | [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | -| [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | `null` | no | -| [rancher\_bootstrap\_password](#input\_rancher\_bootstrap\_password) | Password to use for bootstrapping Rancher (min 12 characters) | `string` | `"initial-admin-password"` | no | -| [rancher\_password](#input\_rancher\_password) | Password to use for Rancher (min 12 characters) | `string` | `null` | no | -| [rancher\_replicas](#input\_rancher\_replicas) | Value for replicas when installing the Rancher helm chart | `number` | `3` | no | +| [prefix](#input\_prefix) | n/a | `any` | n/a | yes | +| [rancher\_hostname](#input\_rancher\_hostname) | n/a | `any` | n/a | yes | +| [rancher\_ingress\_class\_name](#input\_rancher\_ingress\_class\_name) | Rancher ingressClassName value | `string` | `"nginx"` | no | +| [rancher\_password](#input\_rancher\_password) | n/a | `string` | n/a | yes | +| [rancher\_service\_type](#input\_rancher\_service\_type) | Rancher serviceType value | `string` | `"ClusterIP"` | no | | [rancher\_version](#input\_rancher\_version) | Rancher version to install | `string` | `null` | no | | [rke2\_config](#input\_rke2\_config) | Additional RKE2 configuration to add to the config.yaml file | `any` | `null` | no | | [rke2\_token](#input\_rke2\_token) | Token to use when configuring RKE2 nodes | `any` | `null` | no | | [rke2\_version](#input\_rke2\_version) | Kubernetes version to use for the RKE2 cluster | `string` | `null` | no | -| [spot\_instances](#input\_spot\_instances) | Use spot instances | `bool` | `null` | no | -| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | Specify the SSH key name to use (that's already present in AWS) | `string` | `null` | no | -| [ssh\_key\_pair\_path](#input\_ssh\_key\_pair\_path) | Path to the SSH private key used as the key pair (that's already present in AWS) | `string` | `null` | no | -| [ssh\_username](#input\_ssh\_username) | Username used for SSH with sudo access | `string` | `"ubuntu"` | no | -| [subnet\_id](#input\_subnet\_id) | VPC Subnet ID to create the instance(s) in | `string` | `null` | no | -| [wait](#input\_wait) | An optional wait before installing the Rancher helm chart | `string` | `"20s"` | no | +| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | n/a | `any` | `null` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `any` | n/a | yes | +| [subnet\_id](#input\_subnet\_id) | n/a | `any` | `null` | no | +| [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | +| [vpc\_id](#input\_vpc\_id) | n/a | `any` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | Waiting time (in seconds) | `number` | `180` | no | ## Outputs @@ -66,7 +72,8 @@ |------|-------------| | [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | | [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | -| [rancher\_admin\_token](#output\_rancher\_admin\_token) | Rancher API token for the admin user | -| [rancher\_hostname](#output\_rancher\_hostname) | n/a | -| [rancher\_password](#output\_rancher\_password) | n/a | -| [rancher\_url](#output\_rancher\_url) | n/a | +| [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | +| [rancher\_url](#output\_rancher\_url) | Rancher URL | +| [security\_group](#output\_security\_group) | n/a | +| [subnet](#output\_subnet) | n/a | +| [vpc](#output\_vpc) | n/a | diff --git a/recipes/upstream/aws/rke2/main.tf b/recipes/upstream/aws/rke2/main.tf index b4c6cc0a..be1aa4bc 100644 --- a/recipes/upstream/aws/rke2/main.tf +++ b/recipes/upstream/aws/rke2/main.tf @@ -1,99 +1,121 @@ locals { - kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd - kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" - kc_file_backup = "${local.kc_file}.backup" + create_ssh_key_pair = var.create_ssh_key_pair == null ? false : true + ssh_key_pair_name = var.ssh_key_pair_name == null ? "tf-rancher-up-${var.prefix}" : var.ssh_key_pair_name + local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path + local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + vpc_id = var.vpc_id == null ? module.rke2-first-server.vpc[0].id : var.vpc_id + subnet_id = var.subnet_id == null ? module.rke2-first-server.subnet[0].id : var.subnet_id + create_security_group = var.create_security_group == null ? false : true + instance_security_group_id = local.create_security_group == "true" ? null : module.rke2-first-server.security_group[0].id + kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd + kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" + kc_file_backup = "${local.kc_file}.backup" } -module "rke2_first" { +module "rke2-first" { source = "../../../../modules/distribution/rke2" rke2_token = var.rke2_token rke2_version = var.rke2_version rke2_config = var.rke2_config } -module "rke2_first_server" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = 1 - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = var.create_ssh_key_pair - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - ssh_username = var.ssh_username - spot_instances = var.spot_instances - aws_region = var.aws_region - create_security_group = var.create_security_group - instance_security_group = var.ssh_key_pair_name - subnet_id = var.subnet_id - user_data = module.rke2_first.rke2_user_data +module "rke2-first-server" { + source = "../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + # create_ssh_key_pair = var.create_ssh_key_pair + # ssh_key_pair_name = var.ssh_key_pair_name + # ssh_private_key_path = var.ssh_private_key_path + # ssh_public_key_path = var.ssh_public_key_path + # vpc_id = var.vpc_id + # subnet_id = var.subnet_id + # create_security_group = var.create_security_group + instance_count = 1 + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + # instance_security_group_id = var.instance_security_group_id + ssh_username = var.ssh_username + user_data = module.rke2-first.rke2_user_data } -module "rke2_additional" { +module "rke2-additional" { source = "../../../../modules/distribution/rke2" - rke2_token = module.rke2_first.rke2_token + rke2_token = module.rke2-first.rke2_token rke2_version = var.rke2_version rke2_config = var.rke2_config - first_server_ip = module.rke2_first_server.instances_private_ip[0] + first_server_ip = module.rke2-first-server.instances_private_ip[0] } -module "rke2_additional_servers" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = var.instance_count - 1 - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = false - ssh_key_pair_name = module.rke2_first_server.ssh_key_pair_name - ssh_key_pair_path = module.rke2_first_server.ssh_key_path - ssh_username = var.ssh_username - spot_instances = var.spot_instances - tag_begin = 2 - aws_region = var.aws_region - create_security_group = false - instance_security_group = module.rke2_first_server.sg-id - subnet_id = var.subnet_id - user_data = module.rke2_additional.rke2_user_data +module "rke2-additional-servers" { + source = "../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + create_ssh_key_pair = local.create_ssh_key_pair + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path + vpc_id = local.vpc_id + subnet_id = local.subnet_id + create_security_group = local.create_security_group + instance_count = var.instance_count + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + instance_security_group_id = local.instance_security_group_id + ssh_username = var.ssh_username + user_data = module.rke2-additional.rke2_user_data } data "local_file" "ssh_private_key" { - depends_on = [module.rke2_first_server] - filename = module.rke2_first_server.ssh_key_path + depends_on = [module.rke2-first-server] + filename = local.local_ssh_private_key_path } -resource "ssh_resource" "retrieve_kubeconfig" { - host = module.rke2_first_server.instances_public_ip[0] +resource "ssh_resource" "retrieve-kubeconfig" { + host = module.rke2-first-server.instances_public_ip[0] commands = [ - "sudo sed 's/127.0.0.1/${module.rke2_first_server.instances_public_ip[0]}/g' /etc/rancher/rke2/rke2.yaml" + "sudo sed 's/127.0.0.1/${module.rke2-first-server.instances_public_ip[0]}/g' /etc/rancher/rke2/rke2.yaml" ] user = var.ssh_username private_key = data.local_file.ssh_private_key.content } -resource "local_file" "kube_config_yaml" { +resource "local_file" "kube-config-yaml" { filename = local.kc_file - content = ssh_resource.retrieve_kubeconfig.result file_permission = "0600" + content = ssh_resource.retrieve-kubeconfig.result } -resource "local_file" "kube_config_yaml_backup" { +resource "local_file" "kube-config-yaml-backup" { filename = local.kc_file_backup - content = ssh_resource.retrieve_kubeconfig.result file_permission = "0600" + content = ssh_resource.retrieve-kubeconfig.result +} + +resource "null_resource" "wait-k8s-services-startup" { + depends_on = [module.rke2-additional-servers] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } } locals { - rancher_hostname = join(".", ["rancher", module.rke2_first_server.instances_public_ip[0], "sslip.io"]) + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.rke2-first-server.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.rke2-first-server.instances_public_ip[0], "sslip.io"]) } module "rancher_install" { source = "../../../../modules/rancher" - dependency = var.instance_count > 1 ? module.rke2_additional_servers.dependency : module.rke2_first_server.dependency - kubeconfig_file = local_file.kube_config_yaml.filename + dependency = [null_resource.wait-k8s-services-startup] + kubeconfig_file = local.kc_file rancher_hostname = local.rancher_hostname - rancher_replicas = min(var.rancher_replicas, var.instance_count) - rancher_bootstrap_password = var.rancher_bootstrap_password + rancher_bootstrap_password = var.rancher_password rancher_password = var.rancher_password + bootstrap_rancher = var.bootstrap_rancher rancher_version = var.rancher_version - wait = var.wait + rancher_additional_helm_values = [ + "replicas: ${var.instance_count}", + "ingress.ingressClassName: ${var.rancher_ingress_class_name}", + "service.type: ${var.rancher_service_type}" + ] } diff --git a/recipes/upstream/aws/rke2/outputs.tf b/recipes/upstream/aws/rke2/outputs.tf index 25659cfc..a85d4257 100644 --- a/recipes/upstream/aws/rke2/outputs.tf +++ b/recipes/upstream/aws/rke2/outputs.tf @@ -1,25 +1,39 @@ +output "instances_private_ip" { + value = concat([module.rke2-first-server.instances_private_ip], [module.rke2-additional-servers.instances_private_ip]) +} + output "instances_public_ip" { - value = concat([module.rke2_first_server.instances_public_ip], [module.rke2_additional_servers.instances_public_ip]) + value = concat([module.rke2-first-server.instances_public_ip], [module.rke2-additional-servers.instances_public_ip]) } -output "instances_private_ip" { - value = concat([module.rke2_first_server.instances_private_ip], [module.rke2_additional_servers.instances_private_ip]) +output "vpc" { + value = module.rke2-first-server.vpc[0].id } -output "rancher_hostname" { - value = local.rancher_hostname +output "subnet" { + value = module.rke2-first-server.subnet[0].id } -output "rancher_url" { - value = "https://${local.rancher_hostname}" +output "security_group" { + value = module.rke2-first-server.security_group[0].id } -output "rancher_password" { - value = var.rancher_bootstrap_password +# Uncomment for debugging purposes +#output "rke2_first_server_config_file" { +# value = nonsensitive(module.rke2-first.rke2_user_data) +#} + +# Uncomment for debugging purposes +#output "rke2_additional_servers_config_file" { +# value = nonsensitive(module.rke2-additional.rke2_user_data) +#} + +output "rancher_url" { + description = "Rancher URL" + value = "https://${module.rancher_install.rancher_hostname}" } -output "rancher_admin_token" { - description = "Rancher API token for the admin user" - value = module.rancher_install.rancher_admin_token - sensitive = true +output "rancher_password" { + description = "Rancher Initial Custom Password" + value = var.rancher_password } diff --git a/recipes/upstream/aws/rke2/provider.tf b/recipes/upstream/aws/rke2/provider.tf index 6997a762..8e915083 100644 --- a/recipes/upstream/aws/rke2/provider.tf +++ b/recipes/upstream/aws/rke2/provider.tf @@ -1,8 +1,36 @@ terraform { required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + ssh = { source = "loafoe/ssh" version = "2.6.0" } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } } -} \ No newline at end of file + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/recipes/upstream/aws/rke2/terraform.tfvars.example b/recipes/upstream/aws/rke2/terraform.tfvars.example index f084ca75..3b85cf4e 100644 --- a/recipes/upstream/aws/rke2/terraform.tfvars.example +++ b/recipes/upstream/aws/rke2/terraform.tfvars.example @@ -1,51 +1,99 @@ ###### !! Required variables !! ###### -## -- Terraform will use the default ~/.aws/credentials file or environment variables to determine the access/secret keys. Uncomment the below only if necessary. -# aws_access_key = "ACCESS_KEY_HERE" -# aws_secret_key = "SECRET_KEY_HERE" +## -- The prefix used in front of all AWS resources +prefix = "" -## -- AWS region to create the resources, uncomment one or adjust as needed -# aws_region = "us-east-1" # US, Virginia -# aws_region = "us-west-2" # US, Oregon -# aws_region = "eu-west-1" # EU, Ireland -# aws_region = "eu-west-1" # EU, Frankfurt -# aws_region = "ap-southeast-2" # AU, Sydney -# aws_region = "ap-south-1" # IN, Mumbai +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = -## -- Set the prefix for the name tag on instancrease created. A default prefix (rancher-terraform) if not provided. -prefix = "my-name-here" +## -- AWS Region to create the resources +aws_region = "" -###### !! Optional variables !! ###### +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html -## -- Password to set when installing Rancher, otherwise use default (initial-admin-password) -# rancher_password = "at-least-12-characters" +## -- Specify if a new SSH key pair needs to be created for the instances +# create_ssh_key_pair = true -## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository -# rancher_version = "2.7.3" +## -- If you want to use an existing key pair, specify its name +# ssh_key_pair_name = null + +#Ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html + +## -- The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_private_key_path = null + +## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_public_key_path = null + +## -- Range of private IPs available for the AWS VPC +# vpc_ip_cidr_range = "10.0.0.0/16" -## -- Override the default k8s version used by RKE2 -# rke2_version = "v1.25.10+rke2r1" +## -- AWS VPC used for all resources +# vpc_id = null -## -- Number and type of EC2 instances to launch -instance_count = 1 +## -- AWS Subnet used for all resources +# subnet_id = null + +## -- AWS Security Group used for all resources +# create_security_group = null + +#Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html + +## -- The number of nodes +instance_count = 3 + +## -- The name of a AWS EC2 machine type # instance_type = "t3.medium" ## -- Use spot instances # spot_instances = false +## -- Size of the disk attached to each node, specified in GB +# instance_disk_size = 80 + +## -- If the Security Group was created manually, it can be entered here +# instance_security_group_id = null + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" + +## -- Script that will run when the VMs start +# user_data = "" + +## -- Waiting time (in seconds) +# waiting_time = 180 + +## -- RKE2 version +# rke2_version = "v1.28.3+rke2r2" + ## -- RKE2 token, override the programmatically generated token # rke2_token = "string here" -##### SSH -## -- (A) Create a new keypair in AWS -create_ssh_key_pair = true -## -- Override the default (./${prefix}_ssh_private_key.pem) path where this SSH key is written -# ssh_private_key_path = "/path/to/private/key.pem" +## -- RKE2 custom config file +# rke2_config = "" + +## -- RKE2 KUBECONFIG file path +# kube_config_path = "" + +## -- RKE2 KUBECONFIG file +# kube_config_filename = "" + +## -- Bootstrap the Rancher installation +# bootstrap_rancher = false + +## -- Hostname to set when installing Rancher +rancher_hostname = "rancher" + +## -- Password to set when installing Rancher +rancher_password = "at-least-12-characters" + +## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository +# rancher_version = "2.7.3" -## -- (B) Provide an existing keypair name in AWS to use for nodes, the matching private key file for this keypair also must be provided so RKE can SSH to the launched nodes -# ssh_key_pair_name = "aws_keypair_name" -# ssh_key_pair_path = "/path/to/private/key.pem" -##### +## -- Rancher ingressClassName value +# rancher_ingress_class_name = "nginx" -## -- Override the default (${prefix}_kube_config.yml) kubeconfig file/path -# kube_config_path = "~/.kube/rancher-terraform.yml" +## -- Rancher serviceType value +# rancher_service_type = "ClusterIP" diff --git a/recipes/upstream/aws/rke2/variables.tf b/recipes/upstream/aws/rke2/variables.tf index 3e27b687..e4263217 100644 --- a/recipes/upstream/aws/rke2/variables.tf +++ b/recipes/upstream/aws/rke2/variables.tf @@ -1,14 +1,10 @@ -variable "aws_access_key" { - type = string - description = "AWS access key used to create infrastructure" - default = null -} +variable "prefix" {} -variable "aws_secret_key" { - type = string - description = "AWS secret key used to create AWS infrastructure" - default = null -} +# variable "aws_access_key" {} + +# variable "aws_secret_key" {} + +# variable "aws_session_token" {} variable "aws_region" { type = string @@ -51,30 +47,60 @@ variable "aws_region" { } } -variable "prefix" { - type = string - description = "Prefix added to names of all resources" - default = null +variable "create_ssh_key_pair" { + default = null } -variable "instance_count" { - type = number - description = "Number of EC2 instances to create" - default = null +variable "ssh_key_pair_name" { + default = null } -variable "instance_type" { - type = string - description = "Instance type used for all EC2 instances" - default = null +variable "ssh_private_key_path" { + default = null } -variable "instance_disk_size" { - type = string - description = "Specify root disk size (GB)" +variable "ssh_public_key_path" { + default = null +} + +# variable "vpc_ip_cidr_range" {} + +variable "vpc_id" { + default = null +} + +variable "subnet_id" { + default = null +} + +variable "create_security_group" { + default = null +} + +variable "instance_count" {} + +# variable "instance_type" {} + +# variable "spot_instances" {} + +# variable "instance_disk_size" {} + +variable "instance_security_group_id" { + default = null +} + +variable "ssh_username" {} + +variable "user_data" { + description = "User data content for EC2 instance(s)" default = null } +variable "waiting_time" { + description = "Waiting time (in seconds)" + default = 180 +} + variable "rke2_version" { type = string description = "Kubernetes version to use for the RKE2 cluster" @@ -103,85 +129,35 @@ variable "kube_config_filename" { default = null } -variable "rancher_bootstrap_password" { - description = "Password to use for bootstrapping Rancher (min 12 characters)" - default = "initial-admin-password" - type = string +variable "bootstrap_rancher" { + description = "Bootstrap the Rancher installation" + type = bool + default = true } +variable "rancher_hostname" {} + variable "rancher_password" { - description = "Password to use for Rancher (min 12 characters)" - default = null - type = string + type = string validation { condition = length(var.rancher_password) >= 12 - error_message = "The password provided for Rancher (rancher_password) must be at least 12 characters" + error_message = "The password must be at least 12 characters." } } variable "rancher_version" { description = "Rancher version to install" - default = null - type = string -} - -variable "rancher_replicas" { - description = "Value for replicas when installing the Rancher helm chart" - default = 3 - type = number -} - -variable "create_ssh_key_pair" { - type = bool - description = "Specify if a new SSH key pair needs to be created for the instances" - default = null -} - -variable "ssh_key_pair_name" { - type = string - description = "Specify the SSH key name to use (that's already present in AWS)" - default = null -} - -variable "ssh_key_pair_path" { type = string - description = "Path to the SSH private key used as the key pair (that's already present in AWS)" default = null } -variable "ssh_username" { - type = string - description = "Username used for SSH with sudo access" - default = "ubuntu" -} - -variable "spot_instances" { - type = bool - description = "Use spot instances" - default = null -} - -variable "subnet_id" { - type = string - description = "VPC Subnet ID to create the instance(s) in" - default = null -} - -variable "create_security_group" { - type = bool - description = "Should create the security group associated with the instance(s)" - default = null -} - -# TODO: Add a check based on above value -variable "instance_security_group" { - type = string - description = "Provide a pre-existing security group ID" - default = null +variable "rancher_ingress_class_name" { + description = "Rancher ingressClassName value" + default = "nginx" } -variable "wait" { - description = "An optional wait before installing the Rancher helm chart" - default = "20s" +variable "rancher_service_type" { + description = "Rancher serviceType value" + default = "ClusterIP" } From 307a94f4465a174d427612626d409f92003c5f86 Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Fri, 28 Jun 2024 17:08:02 +0200 Subject: [PATCH 24/35] Fixed code in path recipes/rke/split-roles/aws - Added Rancher deployment --- recipes/rke/split-roles/aws/README.md | 2 +- recipes/rke/split-roles/aws/docs.md | 11 +++---- recipes/rke/split-roles/aws/main.tf | 30 +++++++++++++++---- recipes/rke/split-roles/aws/outputs.tf | 26 ++++++++-------- .../split-roles/aws/terraform.tfvars.example | 10 +++---- recipes/rke/split-roles/aws/variables.tf | 14 ++++++++- 6 files changed, 64 insertions(+), 29 deletions(-) diff --git a/recipes/rke/split-roles/aws/README.md b/recipes/rke/split-roles/aws/README.md index ba37426a..0a90c9b5 100644 --- a/recipes/rke/split-roles/aws/README.md +++ b/recipes/rke/split-roles/aws/README.md @@ -31,7 +31,7 @@ terraform init -upgrade ; terraform apply -target=module.aws-ec2-upstream-master - Destroy the resources when finished ```bash -terraform destroy -auto-approve +terraform destroy -target=module.rancher_install -auto-approve ; terraform destroy -auto-approve ``` See full argument list for each module in use: diff --git a/recipes/rke/split-roles/aws/docs.md b/recipes/rke/split-roles/aws/docs.md index ed7c80b4..75c5341d 100644 --- a/recipes/rke/split-roles/aws/docs.md +++ b/recipes/rke/split-roles/aws/docs.md @@ -20,6 +20,7 @@ |------|--------|---------| | [aws-ec2-upstream-master-nodes](#module\_aws-ec2-upstream-master-nodes) | ../../../../modules/infra/aws/ec2 | n/a | | [aws-ec2-upstream-worker-nodes](#module\_aws-ec2-upstream-worker-nodes) | ../../../../modules/infra/aws/ec2 | n/a | +| [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | | [rke](#module\_rke) | ../../../../modules/distribution/rke | n/a | ## Resources @@ -42,11 +43,11 @@ | [ingress\_provider](#input\_ingress\_provider) | Ingress controller provider | `string` | `"nginx"` | no | | [install\_docker](#input\_install\_docker) | Install Docker while creating the instances | `bool` | `true` | no | | [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | n/a | `any` | `null` | no | -| [master\_nodes\_count](#input\_master\_nodes\_count) | n/a | `any` | n/a | yes | | [prefix](#input\_prefix) | n/a | `any` | n/a | yes | | [rancher\_hostname](#input\_rancher\_hostname) | n/a | `any` | n/a | yes | | [rancher\_password](#input\_rancher\_password) | n/a | `string` | n/a | yes | | [rancher\_version](#input\_rancher\_version) | Rancher version to install | `string` | `null` | no | +| [server\_nodes\_count](#input\_server\_nodes\_count) | The number of Server nodes | `number` | `3` | no | | [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | n/a | `any` | `null` | no | | [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | | [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | n/a | `any` | `null` | no | @@ -61,10 +62,10 @@ | Name | Description | |------|-------------| -| [master\_instances\_private\_ip](#output\_master\_instances\_private\_ip) | n/a | -| [master\_instances\_public\_ip](#output\_master\_instances\_public\_ip) | n/a | +| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | +| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | +| [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | +| [rancher\_url](#output\_rancher\_url) | Rancher URL | | [security\_group](#output\_security\_group) | n/a | | [subnet](#output\_subnet) | n/a | | [vpc](#output\_vpc) | n/a | -| [worker\_instances\_private\_ip](#output\_worker\_instances\_private\_ip) | n/a | -| [worker\_instances\_public\_ip](#output\_worker\_instances\_public\_ip) | n/a | diff --git a/recipes/rke/split-roles/aws/main.tf b/recipes/rke/split-roles/aws/main.tf index 7691fa2a..4c837074 100644 --- a/recipes/rke/split-roles/aws/main.tf +++ b/recipes/rke/split-roles/aws/main.tf @@ -13,7 +13,7 @@ module "aws-ec2-upstream-master-nodes" { source = "../../../../modules/infra/aws/ec2" prefix = var.prefix aws_region = var.aws_region - instance_count = var.master_nodes_count + instance_count = var.server_nodes_count ssh_username = var.ssh_username user_data = templatefile("${path.module}/user_data.tmpl", { @@ -63,14 +63,14 @@ resource "null_resource" "wait-docker-startup-w" { locals { ssh_private_key_path = var.ssh_private_key_path != null ? var.ssh_private_key_path : "${path.cwd}/${var.prefix}-ssh_private_key.pem" - master_nodes = [for instance_ips in module.aws-ec2-upstream-master-nodes.instance_ips : + server_nodes = [for instance_ips in module.aws-ec2-upstream-master-nodes.instance_ips : { public_ip = instance_ips.public_ip, private_ip = instance_ips.private_ip, roles = ["etcd", "controlplane"], ssh_key_path = local.ssh_private_key_path, ssh_key = null, - hostname_override = instance_ips.private_dns + hostname_override = null } ] worker_nodes = [for instance_ips in module.aws-ec2-upstream-worker-nodes.instance_ips : @@ -80,7 +80,7 @@ locals { roles = ["worker"], ssh_key_path = local.ssh_private_key_path, ssh_key = null, - hostname_override = instance_ips.private_dns + hostname_override = null } ] } @@ -90,8 +90,9 @@ module "rke" { prefix = var.prefix ssh_private_key_path = local.ssh_private_key_path node_username = var.ssh_username + ingress_provider = var.ingress_provider - rancher_nodes = concat(local.master_nodes, local.worker_nodes) + rancher_nodes = concat(local.server_nodes, local.worker_nodes) } resource "null_resource" "wait-k8s-services-startup" { @@ -100,3 +101,22 @@ resource "null_resource" "wait-k8s-services-startup" { command = "sleep ${var.waiting_time}" } } + +locals { + kubeconfig_file = "${path.cwd}/${var.prefix}_kube_config.yml" + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.aws-ec2-upstream-worker-nodes.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.aws-ec2-upstream-worker-nodes.instances_public_ip[0], "sslip.io"]) +} + +module "rancher_install" { + source = "../../../../modules/rancher" + dependency = [null_resource.wait-k8s-services-startup] + kubeconfig_file = local.kubeconfig_file + rancher_hostname = local.rancher_hostname + rancher_bootstrap_password = var.rancher_password + rancher_password = var.rancher_password + bootstrap_rancher = var.bootstrap_rancher + rancher_version = var.rancher_version + rancher_additional_helm_values = [ + "replicas: ${var.worker_nodes_count}" + ] +} diff --git a/recipes/rke/split-roles/aws/outputs.tf b/recipes/rke/split-roles/aws/outputs.tf index b8272b66..f992227d 100644 --- a/recipes/rke/split-roles/aws/outputs.tf +++ b/recipes/rke/split-roles/aws/outputs.tf @@ -1,17 +1,9 @@ -output "master_instances_public_ip" { - value = module.aws-ec2-upstream-master-nodes.instances_public_ip +output "instances_private_ip" { + value = concat([module.aws-ec2-upstream-master-nodes.instances_private_ip], [module.aws-ec2-upstream-worker-nodes.instances_private_ip]) } -output "master_instances_private_ip" { - value = module.aws-ec2-upstream-master-nodes.instances_private_ip -} - -output "worker_instances_public_ip" { - value = module.aws-ec2-upstream-worker-nodes.instances_public_ip -} - -output "worker_instances_private_ip" { - value = module.aws-ec2-upstream-worker-nodes.instances_private_ip +output "instances_public_ip" { + value = concat([module.aws-ec2-upstream-master-nodes.instances_public_ip], [module.aws-ec2-upstream-worker-nodes.instances_public_ip]) } output "vpc" { @@ -25,3 +17,13 @@ output "subnet" { output "security_group" { value = module.aws-ec2-upstream-master-nodes.security_group[0].id } + +output "rancher_url" { + description = "Rancher URL" + value = "https://${module.rancher_install.rancher_hostname}" +} + +output "rancher_password" { + description = "Rancher Initial Custom Password" + value = var.rancher_password +} diff --git a/recipes/rke/split-roles/aws/terraform.tfvars.example b/recipes/rke/split-roles/aws/terraform.tfvars.example index d0527b51..a1847526 100644 --- a/recipes/rke/split-roles/aws/terraform.tfvars.example +++ b/recipes/rke/split-roles/aws/terraform.tfvars.example @@ -41,14 +41,14 @@ aws_region = "" #Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html -## -- The number of Master nodes -master_nodes_count = 1 +## -- The number of Server nodes +server_nodes_count = 1 ## -- The number of Worker nodes worker_nodes_count = 1 ## -- Master nodes type -# master_nodes_type = "t3.medium" +# server_nodes_type = "t3.medium" ## -- Worker nodes type # worker_nodes_type = "t3.medium" @@ -57,7 +57,7 @@ worker_nodes_count = 1 # spot_instances = false ## -- Master nodes disk size (GB) -# master_nodes_disk_size = 80 +# server_nodes_disk_size = 80 ## -- Worker nodes disk size (GB) # worker_nodes_disk_size = 80 @@ -75,7 +75,7 @@ ssh_username = "ubuntu" # bastion_host = null ## -- IAM Instance Profile to assign to the Master nodes -# master_nodes_iam_instance_profile = null +# server_nodes_iam_instance_profile = null ## -- IAM Instance Profile to assign to the Worker nodes # worker_nodes_iam_instance_profile = null diff --git a/recipes/rke/split-roles/aws/variables.tf b/recipes/rke/split-roles/aws/variables.tf index 0aaf7084..b3017b3a 100644 --- a/recipes/rke/split-roles/aws/variables.tf +++ b/recipes/rke/split-roles/aws/variables.tf @@ -69,7 +69,19 @@ variable "create_security_group" { default = null } -variable "master_nodes_count" {} +variable "server_nodes_count" { + description = "The number of Server nodes" + default = 3 + + validation { + condition = contains([ + 1, + 3, + 5, + ], var.server_nodes_count) + error_message = "Invalid number of Server nodes specified! The value must be 1, 3 or 5 (ETCD quorum)." + } +} variable "worker_nodes_count" {} From 00d0e7627c6fb6ac3059e0cb29f7a61c2ea4f7e7 Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Mon, 1 Jul 2024 15:08:46 +0200 Subject: [PATCH 25/35] Fixed AWS x RKE2 instances count --- recipes/upstream/aws/rke2/main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/recipes/upstream/aws/rke2/main.tf b/recipes/upstream/aws/rke2/main.tf index be1aa4bc..84ae7825 100644 --- a/recipes/upstream/aws/rke2/main.tf +++ b/recipes/upstream/aws/rke2/main.tf @@ -58,7 +58,7 @@ module "rke2-additional-servers" { vpc_id = local.vpc_id subnet_id = local.subnet_id create_security_group = local.create_security_group - instance_count = var.instance_count + instance_count = var.instance_count - 1 # instance_type = var.instance_type # spot_instances = var.spot_instances # instance_disk_size = var.instance_disk_size From 92a3d1fa68319604a0bcc7438423dfd3faf7af6d Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Tue, 2 Jul 2024 10:36:20 +0200 Subject: [PATCH 26/35] Rewrote AWS EC2 x K3S recipe --- recipes/rke/split-roles/aws/README.md | 4 +- recipes/upstream/aws/k3s/README.md | 83 ++------ recipes/upstream/aws/k3s/docs.md | 74 +++---- recipes/upstream/aws/k3s/main.tf | 181 ++++++++++-------- recipes/upstream/aws/k3s/main.tf_bkp | 121 ------------ recipes/upstream/aws/k3s/outputs.tf | 24 +-- recipes/upstream/aws/k3s/provider.tf | 30 ++- .../upstream/aws/k3s/terraform.tfvars.example | 122 ++++++++---- recipes/upstream/aws/k3s/variables.tf | 178 ++++++++--------- 9 files changed, 362 insertions(+), 455 deletions(-) delete mode 100644 recipes/upstream/aws/k3s/main.tf_bkp diff --git a/recipes/rke/split-roles/aws/README.md b/recipes/rke/split-roles/aws/README.md index 0a90c9b5..a7a1f8b1 100644 --- a/recipes/rke/split-roles/aws/README.md +++ b/recipes/rke/split-roles/aws/README.md @@ -16,8 +16,8 @@ cd recipes/rke/split-roles/aws - Update the required variables: - `prefix` to give the resources an identifiable name (eg, your initials or first name) - `aws_region` to suit your region - - `master_node_count` to specify the number of Master nodes to create - - `worker_node_count` to specify the number of Worker nodes to create + - `server_nodes_count` to specify the number of Master nodes to create + - `worker_nodes_count` to specify the number of Worker nodes to create - `ssh_username` to specify the user used to create the VMs (default "ubuntu") - `rancher_hostname` in order to reach the Rancher console via DNS name - `rancher_password` to configure the initial Admin password (the password must be at least 12 characters) diff --git a/recipes/upstream/aws/k3s/README.md b/recipes/upstream/aws/k3s/README.md index 427b01bc..e80dee31 100644 --- a/recipes/upstream/aws/k3s/README.md +++ b/recipes/upstream/aws/k3s/README.md @@ -1,6 +1,6 @@ -# Upstream | AWS | K3S +# Upstream | AWS | EC2 x RKE2 -This module is used to establish a Rancher (local) management cluster using AWS and K3S. +This module is used to establish a Rancher (local) management cluster using [AWS EC2](https://aws.amazon.com/ec2/) and [K3s](https://docs.k3s.io/). Documentation can be found [here](./docs.md). @@ -11,77 +11,30 @@ git clone https://github.com/rancherlabs/tf-rancher-up.git cd recipes/upstream/aws/k3s ``` -- Copy `terraform.tfvars.example` to `terraform.tfvars` -- Edit `terraform.tfvars` +- Copy `./terraform.tfvars.exmaple` to `./terraform.tfvars` +- Edit `./terraform.tfvars` - Update the required variables: - - `aws_region` to suit your region - - uncomment `instance_type` and change the instance type if needed. - `prefix` to give the resources an identifiable name (eg, your initials or first name) - - Recommended: `spot_instances` can be set to `true` to use spot instances -- Check your AWS credentials are configured in `~/.aws/credentials`, terraform will use these by default. Refer the [`aws configure`](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html#cli-configure-files-methods) command on how to do this. -- If you don't want to configure AWS credentials using `aws configure` in above step, uncomment `aws_access_key` and `aws_secret_key` in `terraform.tfvars` and input the required keys there. -- If an HA cluster need to be deployed, change the `instance_count` variable to 3 or more. -- There are more optional variables which can be tweaked under `terraform.tfvars`. - -**NOTE** you may need to use ` terraform init -upgrade` to upgrade provider versions - -Execute the below commands to start deployment. - -```bash -terraform init -terraform plan -var-file terraform.tfvars -terraform apply -var-file terraform.tfvars -``` -The login details will be displayed in the screen once the deployment is successful. It will have the details as below. - -```bash -rancher_hostname = "https://rancher..sslip.io" -rancher_password = "initial-admin-password" -``` + - `aws_region` to suit your region + - `server_nodes_count` to specify the number of Master nodes to create + - `worker_nodes_count` to specify the number of Worker nodes to create + - `ssh_username` to specify the user used to create the VMs (default "ubuntu") + - `rancher_hostname` in order to reach the Rancher console via DNS name + - `rancher_password` to configure the initial Admin password (the password must be at least 12 characters) +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../modules/infra/aws/README.md). -- If storing multiple AWS credentials in `~/.aws/credentials`, set the profile when running terraform. +**NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** ```bash -AWS_PROFILE= terraform plan -var-file terraform.tfvars -AWS_PROFILE= terraform apply -var-file terraform.tfvars +terraform init -upgrade ; terraform apply -target=module.k3s-first-server.tls_private_key.ssh_private_key -target=module.k3s-first-server.local_file.private_key_pem -target=module.k3s-first-server.local_file.public_key_pem -target=module.k3s-first-server.aws_key_pair.key_pair -target=module.k3s-first-server.aws_vpc.vpc -target=module.k3s-first-server.aws_subnet.subnet -target=module.k3s-first-server.aws_security_group.sg_allowall -auto-approve ; terraform apply -auto-approve ``` -- Destroy the resources when cluster is no more needed. +- Destroy the resources when finished ```bash -terraform destroy -var-file terraform.tfvars +terraform destroy -target=module.rancher_install -auto-approve ; terraform destroy -auto-approve ``` -**IMPORTANT**: Please retire the services which are deployed using these terraform modules within 48 hours. Soon there will be automation to retire the service automatically after 48 hours but till that is in place it will be the users responsibility to not keep it running more than 48 hours. - -### Notes - -The user data automatically sets up each node for use with kubectl (also alias to k) and crictl when logged in. See full argument list for each module in use: - - [AWS](../../../../modules/infra/aws) - - [K3S](../../../../modules/distribution/k3s) - - [Rancher](../../../../modules/rancher) - -### Known Issues -- Terraform plan shows below warnings which can be ignored: - -```bash -Warning: Value for undeclared variable - -The root module does not declare a variable named "ssh_private_key_path" but a value was found in file "terraform.tfvars". If you meant to use this value, add a "variable" block to the configuration. - -Invalid attribute in provider configuration - -with module.rancher_install.provider["registry.terraform.io/hashicorp/kubernetes"], -on ../../../../modules/rancher/provider.tf line 7, in provider "kubernetes": -7: provider "kubernetes" { -``` -- Terraform apply shows below warnings and errors. Please rerun the terraform apply again and it will be successful[(Issue #22)](#22). - -```bash -Warning: - -Helm release "rancher" was created but has a failed status. Use the `helm` command to investigate the error, correct it, then run Terraform again. - -Error: 1 error occurred: -* Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": failed to call webhook: Post "https://rke2-ingress-nginx-controller-admission.kube-system.svc:443/networking/v1/ingresses?timeout=10s": no endpoints available for service "rke2-ingress-nginx-controller-admission" -``` + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - K3s: https://github.com/rancherlabs/tf-rancher-up/tree/main/modules/distribution/k3s + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/recipes/upstream/aws/k3s/docs.md b/recipes/upstream/aws/k3s/docs.md index d85c7101..998163cb 100644 --- a/recipes/upstream/aws/k3s/docs.md +++ b/recipes/upstream/aws/k3s/docs.md @@ -2,6 +2,10 @@ | Name | Version | |------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | | [ssh](#requirement\_ssh) | 2.6.0 | ## Providers @@ -9,59 +13,61 @@ | Name | Version | |------|---------| | [local](#provider\_local) | n/a | +| [null](#provider\_null) | n/a | | [ssh](#provider\_ssh) | 2.6.0 | ## Modules | Name | Source | Version | |------|--------|---------| -| [k3s\_additional](#module\_k3s\_additional) | ../../../../modules/distribution/k3s | n/a | -| [k3s\_additional\_servers](#module\_k3s\_additional\_servers) | ../../../../modules/infra/aws | n/a | -| [k3s\_first](#module\_k3s\_first) | ../../../../modules/distribution/k3s | n/a | -| [k3s\_first\_server](#module\_k3s\_first\_server) | ../../../../modules/infra/aws | n/a | -| [k3s\_workers](#module\_k3s\_workers) | ../../../../modules/infra/aws | n/a | +| [k3s-additional](#module\_k3s-additional) | ../../../../modules/distribution/k3s | n/a | +| [k3s-additional-servers](#module\_k3s-additional-servers) | ../../../../modules/infra/aws/ec2 | n/a | +| [k3s-additional-workers](#module\_k3s-additional-workers) | ../../../../modules/infra/aws/ec2 | n/a | +| [k3s-first](#module\_k3s-first) | ../../../../modules/distribution/k3s | n/a | +| [k3s-first-server](#module\_k3s-first-server) | ../../../../modules/infra/aws/ec2 | n/a | | [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | ## Resources | Name | Type | |------|------| -| [local_file.kube_config_yaml](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [local_file.kube_config_yaml_backup](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [ssh_resource.retrieve_kubeconfig](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource | -| [local_file.ssh_private_key](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source | +| [local_file.kube-config-yaml](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.kube-config-yaml-backup](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [null_resource.wait-k8s-services-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [ssh_resource.retrieve-kubeconfig](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource | +| [local_file.ssh-private-key](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [aws\_access\_key](#input\_aws\_access\_key) | AWS access key used to create infrastructure | `string` | `null` | no | | [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | -| [aws\_secret\_key](#input\_aws\_secret\_key) | AWS secret key used to create AWS infrastructure | `string` | `null` | no | -| [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `null` | no | -| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `null` | no | -| [instance\_disk\_size](#input\_instance\_disk\_size) | Specify root disk size (GB) | `string` | `null` | no | -| [instance\_security\_group](#input\_instance\_security\_group) | Provide a pre-existing security group ID | `string` | `null` | no | -| [instance\_type](#input\_instance\_type) | Instance type used for all EC2 instances | `string` | `null` | no | +| [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | +| [create\_security\_group](#input\_create\_security\_group) | n/a | `any` | `null` | no | +| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | +| [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | n/a | `any` | `null` | no | | [k3s\_channel](#input\_k3s\_channel) | K3s channel to use, the latest patch version for the provided minor version will be used | `string` | `null` | no | -| [k3s\_config](#input\_k3s\_config) | Additional k3s configuration to add to the config.yaml file | `any` | `null` | no | -| [k3s\_token](#input\_k3s\_token) | Token to use when configuring k3s nodes | `any` | `null` | no | -| [k3s\_version](#input\_k3s\_version) | Kubernetes version to use for the k3s cluster | `string` | `null` | no | +| [k3s\_config](#input\_k3s\_config) | Additional RKE2 configuration to add to the config.yaml file | `any` | `null` | no | +| [k3s\_token](#input\_k3s\_token) | Token to use when configuring RKE2 nodes | `any` | `null` | no | +| [k3s\_version](#input\_k3s\_version) | Kubernetes version to use for the RKE2 cluster | `string` | `"v1.28.9+k3s1"` | no | | [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | | [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | -| [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | `null` | no | -| [rancher\_bootstrap\_password](#input\_rancher\_bootstrap\_password) | Password to use for bootstrapping Rancher (min 12 characters) | `string` | `"initial-admin-password"` | no | -| [rancher\_password](#input\_rancher\_password) | Password to use for Rancher (min 12 characters) | `string` | `null` | no | -| [rancher\_replicas](#input\_rancher\_replicas) | Value for replicas when installing the Rancher helm chart | `number` | `3` | no | +| [prefix](#input\_prefix) | n/a | `any` | n/a | yes | +| [rancher\_hostname](#input\_rancher\_hostname) | n/a | `any` | n/a | yes | +| [rancher\_ingress\_class\_name](#input\_rancher\_ingress\_class\_name) | Rancher ingressClassName value | `string` | `"traefik"` | no | +| [rancher\_password](#input\_rancher\_password) | n/a | `string` | n/a | yes | +| [rancher\_service\_type](#input\_rancher\_service\_type) | Rancher serviceType value | `string` | `"ClusterIP"` | no | | [rancher\_version](#input\_rancher\_version) | Rancher version to install | `string` | `null` | no | -| [server\_instance\_count](#input\_server\_instance\_count) | Number of server EC2 instances to create | `number` | `null` | no | -| [spot\_instances](#input\_spot\_instances) | Use spot instances | `bool` | `null` | no | -| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | Specify the SSH key name to use (that's already present in AWS) | `string` | `null` | no | -| [ssh\_key\_pair\_path](#input\_ssh\_key\_pair\_path) | Path to the SSH private key used as the key pair (that's already present in AWS) | `string` | `null` | no | -| [ssh\_username](#input\_ssh\_username) | Username used for SSH with sudo access | `string` | `"ubuntu"` | no | -| [subnet\_id](#input\_subnet\_id) | VPC Subnet ID to create the instance(s) in | `string` | `null` | no | -| [wait](#input\_wait) | An optional wait before installing the Rancher helm chart | `string` | `"20s"` | no | -| [worker\_instance\_count](#input\_worker\_instance\_count) | Number of worker EC2 instances to create | `number` | `null` | no | +| [server\_nodes\_count](#input\_server\_nodes\_count) | The number of Server nodes | `number` | `3` | no | +| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | n/a | `any` | `null` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `any` | n/a | yes | +| [subnet\_id](#input\_subnet\_id) | n/a | `any` | `null` | no | +| [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | +| [vpc\_id](#input\_vpc\_id) | n/a | `any` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | Waiting time (in seconds) | `number` | `180` | no | +| [worker\_nodes\_count](#input\_worker\_nodes\_count) | n/a | `any` | n/a | yes | ## Outputs @@ -69,7 +75,5 @@ |------|-------------| | [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | | [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | -| [rancher\_admin\_token](#output\_rancher\_admin\_token) | Rancher API token for the admin user | -| [rancher\_bootstrap\_password](#output\_rancher\_bootstrap\_password) | n/a | -| [rancher\_hostname](#output\_rancher\_hostname) | n/a | -| [rancher\_url](#output\_rancher\_url) | n/a | +| [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | +| [rancher\_url](#output\_rancher\_url) | Rancher URL | diff --git a/recipes/upstream/aws/k3s/main.tf b/recipes/upstream/aws/k3s/main.tf index 009517a1..123b84fa 100644 --- a/recipes/upstream/aws/k3s/main.tf +++ b/recipes/upstream/aws/k3s/main.tf @@ -1,18 +1,18 @@ -# Setup local variables locals { - vpc = var.vpc == null ? "${var.prefix}-vpc" : var.vpc - subnet = var.subnet == null ? "${var.prefix}-subnet" : var.subnet - create_firewall = var.create_firewall == null ? false : true - private_ssh_key_path = fileexists("${path.cwd}/${var.prefix}-ssh_private_key.pem") ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path - public_ssh_key_path = fileexists("${path.cwd}/${var.prefix}-ssh_public_key.pem") ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + create_ssh_key_pair = var.create_ssh_key_pair == null ? false : true + ssh_key_pair_name = var.ssh_key_pair_name == null ? "tf-rancher-up-${var.prefix}" : var.ssh_key_pair_name + local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path + local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + vpc_id = var.vpc_id == null ? module.k3s-first-server.vpc[0].id : var.vpc_id + subnet_id = var.subnet_id == null ? module.k3s-first-server.subnet[0].id : var.subnet_id + create_security_group = var.create_security_group == null ? false : true + instance_security_group_id = local.create_security_group == "true" ? null : module.k3s-first-server.security_group[0].id + kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd + kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" + kc_file_backup = "${local.kc_file}.backup" } - kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd - kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" - kc_file_backup = "${local.kc_file}.backup" -} - -module "k3s_first" { +module "k3s-first" { source = "../../../../modules/distribution/k3s" k3s_token = var.k3s_token k3s_version = var.k3s_version @@ -20,110 +20,125 @@ module "k3s_first" { k3s_config = var.k3s_config } -module "k3s_first_server" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = 1 - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = var.create_ssh_key_pair - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - ssh_username = var.ssh_username - spot_instances = var.spot_instances - aws_region = var.aws_region - create_security_group = var.create_security_group - instance_security_group = var.instance_security_group - subnet_id = var.subnet_id - user_data = module.k3s_first.k3s_server_user_data +module "k3s-first-server" { + source = "../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + # create_ssh_key_pair = var.create_ssh_key_pair + # ssh_key_pair_name = var.ssh_key_pair_name + # ssh_private_key_path = var.ssh_private_key_path + # ssh_public_key_path = var.ssh_public_key_path + # vpc_id = var.vpc_id + # subnet_id = var.subnet_id + # create_security_group = var.create_security_group + instance_count = 1 + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + # instance_security_group_id = var.instance_security_group_id + ssh_username = var.ssh_username + user_data = module.k3s-first.k3s_server_user_data } -module "k3s_additional" { +module "k3s-additional" { source = "../../../../modules/distribution/k3s" - k3s_token = module.k3s_first.k3s_token + k3s_token = module.k3s-first.k3s_token k3s_version = var.k3s_version k3s_channel = var.k3s_channel k3s_config = var.k3s_config - first_server_ip = module.k3s_first_server.instances_private_ip[0] + first_server_ip = module.k3s-first-server.instances_private_ip[0] } -module "k3s_additional_servers" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = var.server_instance_count - 1 - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = false - ssh_key_pair_name = module.k3s_first_server.ssh_key_pair_name - ssh_key_pair_path = pathexpand(module.k3s_first_server.ssh_key_path) - ssh_username = var.ssh_username - spot_instances = var.spot_instances - tag_begin = 2 - aws_region = var.aws_region - create_security_group = false - instance_security_group = module.k3s_first_server.sg-id - subnet_id = var.subnet_id - user_data = module.k3s_additional.k3s_server_user_data +module "k3s-additional-servers" { + source = "../../../../modules/infra/aws/ec2" + prefix = "${var.prefix}-additional-server" + aws_region = var.aws_region + create_ssh_key_pair = local.create_ssh_key_pair + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path + vpc_id = local.vpc_id + subnet_id = local.subnet_id + create_security_group = local.create_security_group + instance_count = var.server_nodes_count - 1 + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + instance_security_group_id = local.instance_security_group_id + ssh_username = var.ssh_username + user_data = module.k3s-additional.k3s_server_user_data } - -module "k3s_workers" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = var.worker_instance_count - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = false - ssh_key_pair_name = module.k3s_first_server.ssh_key_pair_name - ssh_key_pair_path = pathexpand(module.k3s_first_server.ssh_key_path) - ssh_username = var.ssh_username - spot_instances = var.spot_instances - aws_region = var.aws_region - create_security_group = false - instance_security_group = module.k3s_first_server.sg-id - subnet_id = var.subnet_id - user_data = module.k3s_additional.k3s_worker_user_data +module "k3s-additional-workers" { + source = "../../../../modules/infra/aws/ec2" + prefix = "${var.prefix}-worker" + aws_region = var.aws_region + create_ssh_key_pair = local.create_ssh_key_pair + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path + vpc_id = local.vpc_id + subnet_id = local.subnet_id + create_security_group = local.create_security_group + instance_count = var.worker_nodes_count + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + instance_security_group_id = local.instance_security_group_id + ssh_username = var.ssh_username + user_data = module.k3s-additional.k3s_worker_user_data } - -data "local_file" "ssh_private_key" { - depends_on = [module.k3s_first_server] - filename = pathexpand(module.k3s_first_server.ssh_key_path) +data "local_file" "ssh-private-key" { + depends_on = [module.k3s-additional-workers] + filename = local.local_ssh_private_key_path } -resource "ssh_resource" "retrieve_kubeconfig" { - host = module.k3s_first_server.instances_public_ip[0] +resource "ssh_resource" "retrieve-kubeconfig" { + host = module.k3s-first-server.instances_public_ip[0] commands = [ - "sudo sed 's/127.0.0.1/${module.k3s_first_server.instances_public_ip[0]}/g' /etc/rancher/k3s/k3s.yaml" + "sudo sed 's/127.0.0.1/${module.k3s-first-server.instances_public_ip[0]}/g' /etc/rancher/k3s/k3s.yaml" ] user = var.ssh_username - private_key = data.local_file.ssh_private_key.content + private_key = data.local_file.ssh-private-key.content + retry_delay = "60s" } -resource "local_file" "kube_config_yaml" { +resource "local_file" "kube-config-yaml" { filename = local.kc_file - content = ssh_resource.retrieve_kubeconfig.result file_permission = "0600" + content = ssh_resource.retrieve-kubeconfig.result } -resource "local_file" "kube_config_yaml_backup" { +resource "local_file" "kube-config-yaml-backup" { filename = local.kc_file_backup - content = ssh_resource.retrieve_kubeconfig.result file_permission = "0600" + content = ssh_resource.retrieve-kubeconfig.result +} + +resource "null_resource" "wait-k8s-services-startup" { + depends_on = [local_file.kube-config-yaml] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } } locals { - rancher_hostname = join(".", ["rancher", module.k3s_first_server.instances_public_ip[0], "sslip.io"]) + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.k3s-first-server.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.k3s-first-server.instances_public_ip[0], "sslip.io"]) } module "rancher_install" { source = "../../../../modules/rancher" - dependency = var.server_instance_count > 1 ? module.k3s_additional_servers.dependency : module.k3s_first_server.dependency - kubeconfig_file = local_file.kube_config_yaml.filename + dependency = [null_resource.wait-k8s-services-startup] + kubeconfig_file = local.kc_file rancher_hostname = local.rancher_hostname - rancher_replicas = min(var.rancher_replicas, var.server_instance_count) - rancher_bootstrap_password = var.rancher_bootstrap_password + rancher_bootstrap_password = var.rancher_password rancher_password = var.rancher_password + bootstrap_rancher = var.bootstrap_rancher rancher_version = var.rancher_version - wait = var.wait + rancher_additional_helm_values = [ + "replicas: ${var.worker_nodes_count}", + "ingress.ingressClassName: ${var.rancher_ingress_class_name}", + "service.type: ${var.rancher_service_type}" + ] } diff --git a/recipes/upstream/aws/k3s/main.tf_bkp b/recipes/upstream/aws/k3s/main.tf_bkp deleted file mode 100644 index c30b9afa..00000000 --- a/recipes/upstream/aws/k3s/main.tf_bkp +++ /dev/null @@ -1,121 +0,0 @@ -locals { - kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd - kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" - kc_file_backup = "${local.kc_file}.backup" -} - -module "k3s_first" { - source = "../../../../modules/distribution/k3s" - k3s_token = var.k3s_token - k3s_version = var.k3s_version - k3s_channel = var.k3s_channel - k3s_config = var.k3s_config -} - -module "k3s_first_server" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = 1 - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = var.create_ssh_key_pair - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - ssh_username = var.ssh_username - spot_instances = var.spot_instances - aws_region = var.aws_region - create_security_group = var.create_security_group - instance_security_group = var.instance_security_group - subnet_id = var.subnet_id - user_data = module.k3s_first.k3s_server_user_data -} - -module "k3s_additional" { - source = "../../../../modules/distribution/k3s" - k3s_token = module.k3s_first.k3s_token - k3s_version = var.k3s_version - k3s_channel = var.k3s_channel - k3s_config = var.k3s_config - first_server_ip = module.k3s_first_server.instances_private_ip[0] -} - -module "k3s_additional_servers" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = var.server_instance_count - 1 - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = false - ssh_key_pair_name = module.k3s_first_server.ssh_key_pair_name - ssh_key_pair_path = pathexpand(module.k3s_first_server.ssh_key_path) - ssh_username = var.ssh_username - spot_instances = var.spot_instances - tag_begin = 2 - aws_region = var.aws_region - create_security_group = false - instance_security_group = module.k3s_first_server.sg-id - subnet_id = var.subnet_id - user_data = module.k3s_additional.k3s_server_user_data -} - - -module "k3s_workers" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = var.worker_instance_count - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = false - ssh_key_pair_name = module.k3s_first_server.ssh_key_pair_name - ssh_key_pair_path = pathexpand(module.k3s_first_server.ssh_key_path) - ssh_username = var.ssh_username - spot_instances = var.spot_instances - aws_region = var.aws_region - create_security_group = false - instance_security_group = module.k3s_first_server.sg-id - subnet_id = var.subnet_id - user_data = module.k3s_additional.k3s_worker_user_data -} - - -data "local_file" "ssh_private_key" { - depends_on = [module.k3s_first_server] - filename = pathexpand(module.k3s_first_server.ssh_key_path) -} - -resource "ssh_resource" "retrieve_kubeconfig" { - host = module.k3s_first_server.instances_public_ip[0] - commands = [ - "sudo sed 's/127.0.0.1/${module.k3s_first_server.instances_public_ip[0]}/g' /etc/rancher/k3s/k3s.yaml" - ] - user = var.ssh_username - private_key = data.local_file.ssh_private_key.content -} - -resource "local_file" "kube_config_yaml" { - filename = local.kc_file - content = ssh_resource.retrieve_kubeconfig.result - file_permission = "0600" -} - -resource "local_file" "kube_config_yaml_backup" { - filename = local.kc_file_backup - content = ssh_resource.retrieve_kubeconfig.result - file_permission = "0600" -} - -locals { - rancher_hostname = join(".", ["rancher", module.k3s_first_server.instances_public_ip[0], "sslip.io"]) -} - -module "rancher_install" { - source = "../../../../modules/rancher" - dependency = var.server_instance_count > 1 ? module.k3s_additional_servers.dependency : module.k3s_first_server.dependency - kubeconfig_file = local_file.kube_config_yaml.filename - rancher_hostname = local.rancher_hostname - rancher_replicas = min(var.rancher_replicas, var.server_instance_count) - rancher_bootstrap_password = var.rancher_bootstrap_password - rancher_password = var.rancher_password - rancher_version = var.rancher_version - wait = var.wait -} diff --git a/recipes/upstream/aws/k3s/outputs.tf b/recipes/upstream/aws/k3s/outputs.tf index 5dd2766a..c21cc80f 100644 --- a/recipes/upstream/aws/k3s/outputs.tf +++ b/recipes/upstream/aws/k3s/outputs.tf @@ -1,25 +1,17 @@ -output "instances_public_ip" { - value = concat([module.k3s_first_server.instances_public_ip], [module.k3s_additional_servers.instances_public_ip]) -} - output "instances_private_ip" { - value = concat([module.k3s_first_server.instances_private_ip], [module.k3s_additional_servers.instances_private_ip]) + value = concat([module.k3s-first-server.instances_private_ip], [module.k3s-additional-servers.instances_private_ip]) } -output "rancher_hostname" { - value = local.rancher_hostname +output "instances_public_ip" { + value = concat([module.k3s-first-server.instances_public_ip], [module.k3s-additional-servers.instances_public_ip]) } output "rancher_url" { - value = "https://${local.rancher_hostname}" -} - -output "rancher_bootstrap_password" { - value = var.rancher_bootstrap_password + description = "Rancher URL" + value = "https://${module.rancher_install.rancher_hostname}" } -output "rancher_admin_token" { - description = "Rancher API token for the admin user" - value = module.rancher_install.rancher_admin_token - sensitive = true +output "rancher_password" { + description = "Rancher Initial Custom Password" + value = var.rancher_password } diff --git a/recipes/upstream/aws/k3s/provider.tf b/recipes/upstream/aws/k3s/provider.tf index 6997a762..8e915083 100644 --- a/recipes/upstream/aws/k3s/provider.tf +++ b/recipes/upstream/aws/k3s/provider.tf @@ -1,8 +1,36 @@ terraform { required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + ssh = { source = "loafoe/ssh" version = "2.6.0" } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } } -} \ No newline at end of file + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/recipes/upstream/aws/k3s/terraform.tfvars.example b/recipes/upstream/aws/k3s/terraform.tfvars.example index c73ad2a8..1ec29de6 100644 --- a/recipes/upstream/aws/k3s/terraform.tfvars.example +++ b/recipes/upstream/aws/k3s/terraform.tfvars.example @@ -1,53 +1,105 @@ ###### !! Required variables !! ###### -## -- Terraform will use the default ~/.aws/credentials file or environment variables to determine the access/secret keys. Uncomment the below only if necessary. -# aws_access_key = "ACCESS_KEY_HERE" -# aws_secret_key = "SECRET_KEY_HERE" +## -- The prefix used in front of all AWS resources +prefix = "" -## -- AWS region to create the resources, uncomment one or adjust as needed -# aws_region = "us-east-1" # US, Virginia -# aws_region = "us-west-2" # US, Oregon -# aws_region = "eu-west-1" # EU, Ireland -# aws_region = "eu-west-1" # EU, Frankfurt -# aws_region = "ap-southeast-2" # AU, Sydney -# aws_region = "ap-south-1" # IN, Mumbai +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = -## -- Set the prefix for the name tag on instancrease created. A default prefix (rancher-terraform) if not provided. -prefix = "my-name-here" +## -- AWS Region to create the resources +aws_region = "" -###### !! Optional variables !! ###### +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html -## -- Password to set when installing Rancher, otherwise use default (initial-admin-password) -# rancher_password = "at-least-12-characters" +## -- Specify if a new SSH key pair needs to be created for the instances +# create_ssh_key_pair = true -## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository -# rancher_version = "2.7.3" +## -- If you want to use an existing key pair, specify its name +# ssh_key_pair_name = null + +#Ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html + +## -- The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_private_key_path = null + +## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_public_key_path = null + +## -- Range of private IPs available for the AWS VPC +# vpc_ip_cidr_range = "10.0.0.0/16" + +## -- AWS VPC used for all resources +# vpc_id = null -## -- Override the default k8s version or channel used by K3S -# k3s_version = "v1.24.14+k3s1" -k3s_channel = "v1.25" +## -- AWS Subnet used for all resources +# subnet_id = null -## -- Number and type of EC2 instances to launch -server_instance_count = 1 -worker_instance_count = 1 +## -- AWS Security Group used for all resources +# create_security_group = null + +#Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html + +## -- The number of Server nodes +server_nodes_count = 1 + +## -- The number of Worker nodes +worker_nodes_count = 1 + +## -- The name of a AWS EC2 machine type # instance_type = "t3.medium" ## -- Use spot instances # spot_instances = false -## -- K3S token, override the programmatically generated token +## -- Size of the disk attached to each node, specified in GB +# instance_disk_size = 80 + +## -- If the Security Group was created manually, it can be entered here +# instance_security_group_id = null + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" + +## -- Script that will run when the VMs start +# user_data = "" + +## -- Waiting time (in seconds) +# waiting_time = 180 + +## -- RKE2 version +# k3s_version = "v1.28.3+k3sr2" + +## -- K3s channel +# k3s_channel = + +## -- RKE2 token, override the programmatically generated token # k3s_token = "string here" -##### SSH -## -- (A) Create a new keypair in AWS -create_ssh_key_pair = true -## -- Override the default (./${prefix}_ssh_private_key.pem) path where this SSH key is written -# ssh_private_key_path = "/path/to/private/key.pem" +## -- RKE2 custom config file +# k3s_config = "" + +## -- RKE2 KUBECONFIG file path +# kube_config_path = "" + +## -- RKE2 KUBECONFIG file +# kube_config_filename = "" + +## -- Bootstrap the Rancher installation +# bootstrap_rancher = false + +## -- Hostname to set when installing Rancher +rancher_hostname = "rancher" + +## -- Password to set when installing Rancher +rancher_password = "at-least-12-characters" + +## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository +# rancher_version = "2.7.3" -## -- (B) Provide an existing keypair name in AWS to use for nodes, the matching private key file for this keypair also must be provided so RKE can SSH to the launched nodes -# ssh_key_pair_name = "aws_keypair_name" -# ssh_key_pair_path = "/path/to/private/key.pem" -##### +## -- Rancher ingressClassName value +# rancher_ingress_class_name = "nginx" -## -- Override the default (${prefix}_kube_config.yml) kubeconfig file/path -# kube_config_path = "~/.kube/rancher-terraform.yml" +## -- Rancher serviceType value +# rancher_service_type = "ClusterIP" diff --git a/recipes/upstream/aws/k3s/variables.tf b/recipes/upstream/aws/k3s/variables.tf index 1c13e035..a999344a 100644 --- a/recipes/upstream/aws/k3s/variables.tf +++ b/recipes/upstream/aws/k3s/variables.tf @@ -1,14 +1,10 @@ -variable "aws_access_key" { - type = string - description = "AWS access key used to create infrastructure" - default = null -} +variable "prefix" {} -variable "aws_secret_key" { - type = string - description = "AWS secret key used to create AWS infrastructure" - default = null -} +# variable "aws_access_key" {} + +# variable "aws_secret_key" {} + +# variable "aws_session_token" {} variable "aws_region" { type = string @@ -51,40 +47,78 @@ variable "aws_region" { } } -variable "prefix" { - type = string - description = "Prefix added to names of all resources" - default = null +variable "create_ssh_key_pair" { + default = null } -variable "server_instance_count" { - type = number - description = "Number of server EC2 instances to create" - default = null +variable "ssh_key_pair_name" { + default = null } -variable "worker_instance_count" { - type = number - description = "Number of worker EC2 instances to create" - default = null +variable "ssh_private_key_path" { + default = null } -variable "instance_type" { - type = string - description = "Instance type used for all EC2 instances" - default = null +variable "ssh_public_key_path" { + default = null } -variable "instance_disk_size" { - type = string - description = "Specify root disk size (GB)" +# variable "vpc_ip_cidr_range" {} + +variable "vpc_id" { + default = null +} + +variable "subnet_id" { + default = null +} + +variable "create_security_group" { + default = null +} + +variable "server_nodes_count" { + description = "The number of Server nodes" + default = 3 + + validation { + condition = contains([ + 1, + 3, + 5, + ], var.server_nodes_count) + error_message = "Invalid number of Server nodes specified! The value must be 1, 3 or 5 (ETCD quorum)." + } +} + +variable "worker_nodes_count" {} + +# variable "instance_type" {} + +# variable "spot_instances" {} + +# variable "instance_disk_size" {} + +variable "instance_security_group_id" { + default = null +} + +variable "ssh_username" {} + +variable "user_data" { + description = "User data content for EC2 instance(s)" default = null } +variable "waiting_time" { + description = "Waiting time (in seconds)" + default = 180 +} + variable "k3s_version" { type = string - description = "Kubernetes version to use for the k3s cluster" - default = null + description = "Kubernetes version to use for the RKE2 cluster" + default = "v1.28.9+k3s1" #Version compatible with Rancher v2.8.3 } variable "k3s_channel" { @@ -94,12 +128,12 @@ variable "k3s_channel" { } variable "k3s_token" { - description = "Token to use when configuring k3s nodes" + description = "Token to use when configuring RKE2 nodes" default = null } variable "k3s_config" { - description = "Additional k3s configuration to add to the config.yaml file" + description = "Additional RKE2 configuration to add to the config.yaml file" default = null } @@ -115,85 +149,35 @@ variable "kube_config_filename" { default = null } -variable "rancher_bootstrap_password" { - description = "Password to use for bootstrapping Rancher (min 12 characters)" - default = "initial-admin-password" - type = string +variable "bootstrap_rancher" { + description = "Bootstrap the Rancher installation" + type = bool + default = true } +variable "rancher_hostname" {} + variable "rancher_password" { - description = "Password to use for Rancher (min 12 characters)" - default = null - type = string + type = string validation { condition = length(var.rancher_password) >= 12 - error_message = "The password provided for Rancher (rancher_password) must be at least 12 characters" + error_message = "The password must be at least 12 characters." } } variable "rancher_version" { description = "Rancher version to install" - default = null - type = string -} - -variable "rancher_replicas" { - description = "Value for replicas when installing the Rancher helm chart" - default = 3 - type = number -} - -variable "create_ssh_key_pair" { - type = bool - description = "Specify if a new SSH key pair needs to be created for the instances" - default = null -} - -variable "ssh_key_pair_name" { - type = string - description = "Specify the SSH key name to use (that's already present in AWS)" - default = null -} - -variable "ssh_key_pair_path" { type = string - description = "Path to the SSH private key used as the key pair (that's already present in AWS)" default = null } -variable "ssh_username" { - type = string - description = "Username used for SSH with sudo access" - default = "ubuntu" -} - -variable "spot_instances" { - type = bool - description = "Use spot instances" - default = null -} - -variable "subnet_id" { - type = string - description = "VPC Subnet ID to create the instance(s) in" - default = null -} - -variable "create_security_group" { - type = bool - description = "Should create the security group associated with the instance(s)" - default = null -} - -# TODO: Add a check based on above value -variable "instance_security_group" { - type = string - description = "Provide a pre-existing security group ID" - default = null +variable "rancher_ingress_class_name" { + description = "Rancher ingressClassName value" + default = "traefik" } -variable "wait" { - description = "An optional wait before installing the Rancher helm chart" - default = "20s" +variable "rancher_service_type" { + description = "Rancher serviceType value" + default = "ClusterIP" } From a2e05767c796da95811adaec85d32bc0cd69518e Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Tue, 2 Jul 2024 12:24:11 +0200 Subject: [PATCH 27/35] Reviewed tests for AWS EC2 x RKE (split-roles) --- tests/recipes/rke/split-roles/aws/README.md | 31 +++++++++++++ tests/recipes/rke/split-roles/aws/docs.md | 44 +++++++++++++++++++ tests/recipes/rke/split-roles/aws/main.tf | 30 +++++-------- tests/recipes/rke/split-roles/aws/outputs.tf | 19 ++++++++ tests/recipes/rke/split-roles/aws/provider.tf | 36 +++++++++++++++ .../recipes/rke/split-roles/aws/variables.tf | 24 ++++++---- 6 files changed, 158 insertions(+), 26 deletions(-) create mode 100644 tests/recipes/rke/split-roles/aws/README.md create mode 100644 tests/recipes/rke/split-roles/aws/docs.md create mode 100644 tests/recipes/rke/split-roles/aws/outputs.tf create mode 100644 tests/recipes/rke/split-roles/aws/provider.tf diff --git a/tests/recipes/rke/split-roles/aws/README.md b/tests/recipes/rke/split-roles/aws/README.md new file mode 100644 index 00000000..d3790e82 --- /dev/null +++ b/tests/recipes/rke/split-roles/aws/README.md @@ -0,0 +1,31 @@ +# RKE | With split roles | AWS + +This module helps to create an RKE cluster with split roles (master, worker) on AWS infrastructure. + +Documentation can be found [here](./docs.md). + +## Usage + +```bash +git clone https://github.com/rancherlabs/tf-rancher-up.git +cd tests/recipes/rke/split-roles/aws +``` + +- Edit `./variables.tf` + - Update the required variables (`prefix`, `aws_region`, `server_nodes_count`, `worker_nodes_count`, and `ssh_username`). +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../../modules/infra/aws/README.md). + +```bash +terraform init --upgrade ; terraform apply --auto-approve +``` + +- Destroy the resources when finished +```bash +terraform destroy --auto-approve + +``` + +See full argument list for each module in use: + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - RKE: https://github.com/rancher/tf-rancher-up/tree/main/modules/distribution/rke + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/tests/recipes/rke/split-roles/aws/docs.md b/tests/recipes/rke/split-roles/aws/docs.md new file mode 100644 index 00000000..48e9c812 --- /dev/null +++ b/tests/recipes/rke/split-roles/aws/docs.md @@ -0,0 +1,44 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | +| [ssh](#requirement\_ssh) | 2.6.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [aws-ec2-upstream-master-nodes](#module\_aws-ec2-upstream-master-nodes) | ../../../../../modules/infra/aws/ec2 | n/a | +| [aws-ec2-upstream-worker-nodes](#module\_aws-ec2-upstream-worker-nodes) | ../../../../../modules/infra/aws/ec2 | n/a | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws\_region](#input\_aws\_region) | n/a | `string` | `"us-east-1"` | no | +| [prefix](#input\_prefix) | n/a | `string` | `"ec2-test"` | no | +| [server\_nodes\_count](#input\_server\_nodes\_count) | n/a | `number` | `3` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `string` | `"ubuntu"` | no | +| [worker\_nodes\_count](#input\_worker\_nodes\_count) | n/a | `number` | `3` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | +| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | +| [security\_group](#output\_security\_group) | n/a | +| [subnet](#output\_subnet) | n/a | +| [vpc](#output\_vpc) | n/a | diff --git a/tests/recipes/rke/split-roles/aws/main.tf b/tests/recipes/rke/split-roles/aws/main.tf index ed638dd3..9f693428 100644 --- a/tests/recipes/rke/split-roles/aws/main.tf +++ b/tests/recipes/rke/split-roles/aws/main.tf @@ -1,21 +1,15 @@ -module "test1_default" { - source = "../../../../../recipes/rke/split-roles/aws" - - prefix = "test1_default" - aws_access_key = var.aws_access_key - aws_secret_key = var.aws_secret_key - aws_region = "us-west-2" - create_ssh_key_pair = true +module "aws-ec2-upstream-master-nodes" { + source = "../../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + instance_count = var.server_nodes_count + ssh_username = var.ssh_username } -module "test2_pass_existing_key" { - source = "../../../../../recipes/rke/split-roles/aws" - - prefix = "test1_default" - aws_access_key = var.aws_access_key - aws_secret_key = var.aws_secret_key - aws_region = "us-west-2" - create_ssh_key_pair = true - ssh_key_pair_name = "junk" - ssh_key_pair_path = "~/somepath" +module "aws-ec2-upstream-worker-nodes" { + source = "../../../../../modules/infra/aws/ec2" + prefix = "${var.prefix}-w" + aws_region = var.aws_region + instance_count = var.worker_nodes_count + ssh_username = var.ssh_username } diff --git a/tests/recipes/rke/split-roles/aws/outputs.tf b/tests/recipes/rke/split-roles/aws/outputs.tf new file mode 100644 index 00000000..02d833fb --- /dev/null +++ b/tests/recipes/rke/split-roles/aws/outputs.tf @@ -0,0 +1,19 @@ +output "instances_private_ip" { + value = concat([module.aws-ec2-upstream-master-nodes.instances_private_ip], [module.aws-ec2-upstream-worker-nodes.instances_private_ip]) +} + +output "instances_public_ip" { + value = concat([module.aws-ec2-upstream-master-nodes.instances_public_ip], [module.aws-ec2-upstream-worker-nodes.instances_public_ip]) +} + +output "vpc" { + value = module.aws-ec2-upstream-master-nodes.vpc[0].id +} + +output "subnet" { + value = module.aws-ec2-upstream-master-nodes.subnet[0].id +} + +output "security_group" { + value = module.aws-ec2-upstream-master-nodes.security_group[0].id +} diff --git a/tests/recipes/rke/split-roles/aws/provider.tf b/tests/recipes/rke/split-roles/aws/provider.tf new file mode 100644 index 00000000..8e915083 --- /dev/null +++ b/tests/recipes/rke/split-roles/aws/provider.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/tests/recipes/rke/split-roles/aws/variables.tf b/tests/recipes/rke/split-roles/aws/variables.tf index 21e0b5af..382f6564 100644 --- a/tests/recipes/rke/split-roles/aws/variables.tf +++ b/tests/recipes/rke/split-roles/aws/variables.tf @@ -1,11 +1,19 @@ -variable "aws_access_key" { - type = string - description = "AWS access key used to create infrastructure" - default = null +variable "prefix" { + default = "ec2-test" } -variable "aws_secret_key" { - type = string - description = "AWS secret key used to create AWS infrastructure" - default = null +variable "aws_region" { + default = "us-east-1" +} + +variable "server_nodes_count" { + default = 3 +} + +variable "worker_nodes_count" { + default = 3 +} + +variable "ssh_username" { + default = "ubuntu" } From e73ffd198372d6d7b52ae54a4b13b51e1b0ac4bb Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Tue, 2 Jul 2024 17:41:01 +0200 Subject: [PATCH 28/35] Fixed AWS x K3S README.md file --- recipes/upstream/aws/k3s/README.md | 2 +- recipes/upstream/aws/k3s/docs.md | 6 +++--- recipes/upstream/aws/k3s/terraform.tfvars.example | 12 ++++++------ recipes/upstream/aws/k3s/variables.tf | 6 +++--- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/recipes/upstream/aws/k3s/README.md b/recipes/upstream/aws/k3s/README.md index e80dee31..11bc92ca 100644 --- a/recipes/upstream/aws/k3s/README.md +++ b/recipes/upstream/aws/k3s/README.md @@ -1,4 +1,4 @@ -# Upstream | AWS | EC2 x RKE2 +# Upstream | AWS | EC2 x K3S This module is used to establish a Rancher (local) management cluster using [AWS EC2](https://aws.amazon.com/ec2/) and [K3s](https://docs.k3s.io/). diff --git a/recipes/upstream/aws/k3s/docs.md b/recipes/upstream/aws/k3s/docs.md index 998163cb..056b8b30 100644 --- a/recipes/upstream/aws/k3s/docs.md +++ b/recipes/upstream/aws/k3s/docs.md @@ -47,9 +47,9 @@ | [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | | [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | n/a | `any` | `null` | no | | [k3s\_channel](#input\_k3s\_channel) | K3s channel to use, the latest patch version for the provided minor version will be used | `string` | `null` | no | -| [k3s\_config](#input\_k3s\_config) | Additional RKE2 configuration to add to the config.yaml file | `any` | `null` | no | -| [k3s\_token](#input\_k3s\_token) | Token to use when configuring RKE2 nodes | `any` | `null` | no | -| [k3s\_version](#input\_k3s\_version) | Kubernetes version to use for the RKE2 cluster | `string` | `"v1.28.9+k3s1"` | no | +| [k3s\_config](#input\_k3s\_config) | Additional K3S configuration to add to the config.yaml file | `any` | `null` | no | +| [k3s\_token](#input\_k3s\_token) | Token to use when configuring K3S nodes | `any` | `null` | no | +| [k3s\_version](#input\_k3s\_version) | Kubernetes version to use for the K3S cluster | `string` | `"v1.28.9+k3s1"` | no | | [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | | [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | | [prefix](#input\_prefix) | n/a | `any` | n/a | yes | diff --git a/recipes/upstream/aws/k3s/terraform.tfvars.example b/recipes/upstream/aws/k3s/terraform.tfvars.example index 1ec29de6..fee7a5f6 100644 --- a/recipes/upstream/aws/k3s/terraform.tfvars.example +++ b/recipes/upstream/aws/k3s/terraform.tfvars.example @@ -68,22 +68,22 @@ ssh_username = "ubuntu" ## -- Waiting time (in seconds) # waiting_time = 180 -## -- RKE2 version +## -- K3S version # k3s_version = "v1.28.3+k3sr2" ## -- K3s channel # k3s_channel = -## -- RKE2 token, override the programmatically generated token +## -- K3S token, override the programmatically generated token # k3s_token = "string here" -## -- RKE2 custom config file -# k3s_config = "" +## -- K3S custom config file +# k3s_config = "" -## -- RKE2 KUBECONFIG file path +## -- K3S KUBECONFIG file path # kube_config_path = "" -## -- RKE2 KUBECONFIG file +## -- K3S KUBECONFIG file # kube_config_filename = "" ## -- Bootstrap the Rancher installation diff --git a/recipes/upstream/aws/k3s/variables.tf b/recipes/upstream/aws/k3s/variables.tf index a999344a..c5b932f7 100644 --- a/recipes/upstream/aws/k3s/variables.tf +++ b/recipes/upstream/aws/k3s/variables.tf @@ -117,7 +117,7 @@ variable "waiting_time" { variable "k3s_version" { type = string - description = "Kubernetes version to use for the RKE2 cluster" + description = "Kubernetes version to use for the K3S cluster" default = "v1.28.9+k3s1" #Version compatible with Rancher v2.8.3 } @@ -128,12 +128,12 @@ variable "k3s_channel" { } variable "k3s_token" { - description = "Token to use when configuring RKE2 nodes" + description = "Token to use when configuring K3S nodes" default = null } variable "k3s_config" { - description = "Additional RKE2 configuration to add to the config.yaml file" + description = "Additional K3S configuration to add to the config.yaml file" default = null } From 559084233644a99c199476e2dce5fe3dbd54993a Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Wed, 3 Jul 2024 18:42:36 +0200 Subject: [PATCH 29/35] Fixed AWS x K3S README.md file --- recipes/upstream/aws/k3s/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/recipes/upstream/aws/k3s/README.md b/recipes/upstream/aws/k3s/README.md index 11bc92ca..efc1c0a4 100644 --- a/recipes/upstream/aws/k3s/README.md +++ b/recipes/upstream/aws/k3s/README.md @@ -16,7 +16,7 @@ cd recipes/upstream/aws/k3s - Update the required variables: - `prefix` to give the resources an identifiable name (eg, your initials or first name) - `aws_region` to suit your region - - `server_nodes_count` to specify the number of Master nodes to create + - `server_nodes_count` to specify the number of Master nodes to create (to maintain ETCD quorum, the value must be 1, 3, or 5) - `worker_nodes_count` to specify the number of Worker nodes to create - `ssh_username` to specify the user used to create the VMs (default "ubuntu") - `rancher_hostname` in order to reach the Rancher console via DNS name @@ -31,7 +31,7 @@ terraform init -upgrade ; terraform apply -target=module.k3s-first-server.tls_pr - Destroy the resources when finished ```bash -terraform destroy -target=module.rancher_install -auto-approve ; terraform destroy -auto-approve +terraform state rm module.rancher_install ; terraform destroy -auto-approve ``` See full argument list for each module in use: From 9e4b6588b4fd110ee130bfdfa4e8dd74ce2ac907 Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Fri, 12 Jul 2024 08:59:55 +0200 Subject: [PATCH 30/35] Fixed copy/paste issue From 203c579fd36d2c09d978f979b5248986217e9843 Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Tue, 30 Jul 2024 08:34:50 +0200 Subject: [PATCH 31/35] Rebase --- recipes/upstream/aws/k3s/README.md | 4 ++-- recipes/upstream/aws/k3s/docs.md | 2 -- recipes/upstream/aws/k3s/main.tf | 2 ++ recipes/upstream/aws/k3s/outputs.tf | 8 -------- recipes/upstream/aws/rke2/README.md | 4 ++-- recipes/upstream/aws/rke2/docs.md | 5 ----- recipes/upstream/aws/rke2/main.tf | 1 + recipes/upstream/aws/rke2/outputs.tf | 20 -------------------- 8 files changed, 7 insertions(+), 39 deletions(-) diff --git a/recipes/upstream/aws/k3s/README.md b/recipes/upstream/aws/k3s/README.md index efc1c0a4..3476c12a 100644 --- a/recipes/upstream/aws/k3s/README.md +++ b/recipes/upstream/aws/k3s/README.md @@ -26,12 +26,12 @@ cd recipes/upstream/aws/k3s **NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** ```bash -terraform init -upgrade ; terraform apply -target=module.k3s-first-server.tls_private_key.ssh_private_key -target=module.k3s-first-server.local_file.private_key_pem -target=module.k3s-first-server.local_file.public_key_pem -target=module.k3s-first-server.aws_key_pair.key_pair -target=module.k3s-first-server.aws_vpc.vpc -target=module.k3s-first-server.aws_subnet.subnet -target=module.k3s-first-server.aws_security_group.sg_allowall -auto-approve ; terraform apply -auto-approve +terraform init -upgrade && terraform apply -auto-approve ``` - Destroy the resources when finished ```bash -terraform state rm module.rancher_install ; terraform destroy -auto-approve +terraform state rm module.rancher_install && terraform destroy -auto-approve ``` See full argument list for each module in use: diff --git a/recipes/upstream/aws/k3s/docs.md b/recipes/upstream/aws/k3s/docs.md index 056b8b30..21be1a6b 100644 --- a/recipes/upstream/aws/k3s/docs.md +++ b/recipes/upstream/aws/k3s/docs.md @@ -73,7 +73,5 @@ | Name | Description | |------|-------------| -| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | -| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | | [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | | [rancher\_url](#output\_rancher\_url) | Rancher URL | diff --git a/recipes/upstream/aws/k3s/main.tf b/recipes/upstream/aws/k3s/main.tf index 123b84fa..ad06514c 100644 --- a/recipes/upstream/aws/k3s/main.tf +++ b/recipes/upstream/aws/k3s/main.tf @@ -51,6 +51,7 @@ module "k3s-additional" { module "k3s-additional-servers" { source = "../../../../modules/infra/aws/ec2" + count = 0 prefix = "${var.prefix}-additional-server" aws_region = var.aws_region create_ssh_key_pair = local.create_ssh_key_pair @@ -71,6 +72,7 @@ module "k3s-additional-servers" { module "k3s-additional-workers" { source = "../../../../modules/infra/aws/ec2" + count = 0 prefix = "${var.prefix}-worker" aws_region = var.aws_region create_ssh_key_pair = local.create_ssh_key_pair diff --git a/recipes/upstream/aws/k3s/outputs.tf b/recipes/upstream/aws/k3s/outputs.tf index c21cc80f..defc2d99 100644 --- a/recipes/upstream/aws/k3s/outputs.tf +++ b/recipes/upstream/aws/k3s/outputs.tf @@ -1,11 +1,3 @@ -output "instances_private_ip" { - value = concat([module.k3s-first-server.instances_private_ip], [module.k3s-additional-servers.instances_private_ip]) -} - -output "instances_public_ip" { - value = concat([module.k3s-first-server.instances_public_ip], [module.k3s-additional-servers.instances_public_ip]) -} - output "rancher_url" { description = "Rancher URL" value = "https://${module.rancher_install.rancher_hostname}" diff --git a/recipes/upstream/aws/rke2/README.md b/recipes/upstream/aws/rke2/README.md index 811b29e6..a35b3f9b 100644 --- a/recipes/upstream/aws/rke2/README.md +++ b/recipes/upstream/aws/rke2/README.md @@ -25,12 +25,12 @@ cd recipes/upstream/aws/rke2 **NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** ```bash -terraform init -upgrade ; terraform apply -target=module.rke2-first-server.tls_private_key.ssh_private_key -target=module.rke2-first-server.local_file.private_key_pem -target=module.rke2-first-server.local_file.public_key_pem -target=module.rke2-first-server.aws_key_pair.key_pair -target=module.rke2-first-server.aws_vpc.vpc -target=module.rke2-first-server.aws_subnet.subnet -target=module.rke2-first-server.aws_security_group.sg_allowall -auto-approve ; terraform apply -auto-approve ; terraform apply -target=module.rancher_install -auto-approve +terraform init -upgrade && terraform apply -auto-approve ``` - Destroy the resources when finished ```bash -terraform destroy -target=module.rancher_install -auto-approve ; terraform destroy -auto-approve +terraform destroy -target=module.rancher_install -auto-approve && terraform destroy -auto-approve ``` See full argument list for each module in use: diff --git a/recipes/upstream/aws/rke2/docs.md b/recipes/upstream/aws/rke2/docs.md index 0801c42e..3f128ba4 100644 --- a/recipes/upstream/aws/rke2/docs.md +++ b/recipes/upstream/aws/rke2/docs.md @@ -70,10 +70,5 @@ | Name | Description | |------|-------------| -| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | -| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | | [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | | [rancher\_url](#output\_rancher\_url) | Rancher URL | -| [security\_group](#output\_security\_group) | n/a | -| [subnet](#output\_subnet) | n/a | -| [vpc](#output\_vpc) | n/a | diff --git a/recipes/upstream/aws/rke2/main.tf b/recipes/upstream/aws/rke2/main.tf index 84ae7825..fe336082 100644 --- a/recipes/upstream/aws/rke2/main.tf +++ b/recipes/upstream/aws/rke2/main.tf @@ -49,6 +49,7 @@ module "rke2-additional" { module "rke2-additional-servers" { source = "../../../../modules/infra/aws/ec2" + count = 0 prefix = var.prefix aws_region = var.aws_region create_ssh_key_pair = local.create_ssh_key_pair diff --git a/recipes/upstream/aws/rke2/outputs.tf b/recipes/upstream/aws/rke2/outputs.tf index a85d4257..34a6f90d 100644 --- a/recipes/upstream/aws/rke2/outputs.tf +++ b/recipes/upstream/aws/rke2/outputs.tf @@ -1,23 +1,3 @@ -output "instances_private_ip" { - value = concat([module.rke2-first-server.instances_private_ip], [module.rke2-additional-servers.instances_private_ip]) -} - -output "instances_public_ip" { - value = concat([module.rke2-first-server.instances_public_ip], [module.rke2-additional-servers.instances_public_ip]) -} - -output "vpc" { - value = module.rke2-first-server.vpc[0].id -} - -output "subnet" { - value = module.rke2-first-server.subnet[0].id -} - -output "security_group" { - value = module.rke2-first-server.security_group[0].id -} - # Uncomment for debugging purposes #output "rke2_first_server_config_file" { # value = nonsensitive(module.rke2-first.rke2_user_data) From d61897982af2899a2787278337a8393d1e1e8c5f Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Mon, 15 Jul 2024 20:34:14 +0200 Subject: [PATCH 32/35] Fixed all the AWS recipes --- recipes/upstream/aws/k3s/docs.md | 1 + recipes/upstream/aws/k3s/main.tf | 6 ++++-- recipes/upstream/aws/k3s/terraform.tfvars.example | 3 +++ recipes/upstream/aws/k3s/variables.tf | 4 ++++ recipes/upstream/aws/rke2/docs.md | 1 + recipes/upstream/aws/rke2/main.tf | 4 +++- recipes/upstream/aws/rke2/terraform.tfvars.example | 3 +++ recipes/upstream/aws/rke2/variables.tf | 4 ++++ 8 files changed, 23 insertions(+), 3 deletions(-) diff --git a/recipes/upstream/aws/k3s/docs.md b/recipes/upstream/aws/k3s/docs.md index 21be1a6b..2826f4dc 100644 --- a/recipes/upstream/aws/k3s/docs.md +++ b/recipes/upstream/aws/k3s/docs.md @@ -45,6 +45,7 @@ | [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | | [create\_security\_group](#input\_create\_security\_group) | n/a | `any` | `null` | no | | [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | +| [create\_vpc](#input\_create\_vpc) | n/a | `any` | `null` | no | | [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | n/a | `any` | `null` | no | | [k3s\_channel](#input\_k3s\_channel) | K3s channel to use, the latest patch version for the provided minor version will be used | `string` | `null` | no | | [k3s\_config](#input\_k3s\_config) | Additional K3S configuration to add to the config.yaml file | `any` | `null` | no | diff --git a/recipes/upstream/aws/k3s/main.tf b/recipes/upstream/aws/k3s/main.tf index ad06514c..b6b50821 100644 --- a/recipes/upstream/aws/k3s/main.tf +++ b/recipes/upstream/aws/k3s/main.tf @@ -3,6 +3,7 @@ locals { ssh_key_pair_name = var.ssh_key_pair_name == null ? "tf-rancher-up-${var.prefix}" : var.ssh_key_pair_name local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + create_vpc = var.create_vpc == null ? false : true vpc_id = var.vpc_id == null ? module.k3s-first-server.vpc[0].id : var.vpc_id subnet_id = var.subnet_id == null ? module.k3s-first-server.subnet[0].id : var.subnet_id create_security_group = var.create_security_group == null ? false : true @@ -28,6 +29,7 @@ module "k3s-first-server" { # ssh_key_pair_name = var.ssh_key_pair_name # ssh_private_key_path = var.ssh_private_key_path # ssh_public_key_path = var.ssh_public_key_path + # create_vpc = var.create_vpc # vpc_id = var.vpc_id # subnet_id = var.subnet_id # create_security_group = var.create_security_group @@ -51,13 +53,13 @@ module "k3s-additional" { module "k3s-additional-servers" { source = "../../../../modules/infra/aws/ec2" - count = 0 prefix = "${var.prefix}-additional-server" aws_region = var.aws_region create_ssh_key_pair = local.create_ssh_key_pair ssh_key_pair_name = local.ssh_key_pair_name ssh_private_key_path = local.local_ssh_private_key_path ssh_public_key_path = local.local_ssh_public_key_path + create_vpc = local.create_vpc vpc_id = local.vpc_id subnet_id = local.subnet_id create_security_group = local.create_security_group @@ -72,13 +74,13 @@ module "k3s-additional-servers" { module "k3s-additional-workers" { source = "../../../../modules/infra/aws/ec2" - count = 0 prefix = "${var.prefix}-worker" aws_region = var.aws_region create_ssh_key_pair = local.create_ssh_key_pair ssh_key_pair_name = local.ssh_key_pair_name ssh_private_key_path = local.local_ssh_private_key_path ssh_public_key_path = local.local_ssh_public_key_path + create_vpc = local.create_vpc vpc_id = local.vpc_id subnet_id = local.subnet_id create_security_group = local.create_security_group diff --git a/recipes/upstream/aws/k3s/terraform.tfvars.example b/recipes/upstream/aws/k3s/terraform.tfvars.example index fee7a5f6..9fd47570 100644 --- a/recipes/upstream/aws/k3s/terraform.tfvars.example +++ b/recipes/upstream/aws/k3s/terraform.tfvars.example @@ -27,6 +27,9 @@ aws_region = "" ## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set # ssh_public_key_path = null +## - Specify whether VPC / Subnet should be created for the instances +# create_vpc = null + ## -- Range of private IPs available for the AWS VPC # vpc_ip_cidr_range = "10.0.0.0/16" diff --git a/recipes/upstream/aws/k3s/variables.tf b/recipes/upstream/aws/k3s/variables.tf index c5b932f7..d8c11e83 100644 --- a/recipes/upstream/aws/k3s/variables.tf +++ b/recipes/upstream/aws/k3s/variables.tf @@ -63,6 +63,10 @@ variable "ssh_public_key_path" { default = null } +variable "create_vpc" { + default = null +} + # variable "vpc_ip_cidr_range" {} variable "vpc_id" { diff --git a/recipes/upstream/aws/rke2/docs.md b/recipes/upstream/aws/rke2/docs.md index 3f128ba4..6cc9f7e4 100644 --- a/recipes/upstream/aws/rke2/docs.md +++ b/recipes/upstream/aws/rke2/docs.md @@ -44,6 +44,7 @@ | [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | | [create\_security\_group](#input\_create\_security\_group) | n/a | `any` | `null` | no | | [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | +| [create\_vpc](#input\_create\_vpc) | n/a | `any` | `null` | no | | [instance\_count](#input\_instance\_count) | n/a | `any` | n/a | yes | | [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | n/a | `any` | `null` | no | | [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | diff --git a/recipes/upstream/aws/rke2/main.tf b/recipes/upstream/aws/rke2/main.tf index fe336082..c0d78aff 100644 --- a/recipes/upstream/aws/rke2/main.tf +++ b/recipes/upstream/aws/rke2/main.tf @@ -3,6 +3,7 @@ locals { ssh_key_pair_name = var.ssh_key_pair_name == null ? "tf-rancher-up-${var.prefix}" : var.ssh_key_pair_name local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + create_vpc = var.create_vpc == null ? false : true vpc_id = var.vpc_id == null ? module.rke2-first-server.vpc[0].id : var.vpc_id subnet_id = var.subnet_id == null ? module.rke2-first-server.subnet[0].id : var.subnet_id create_security_group = var.create_security_group == null ? false : true @@ -27,6 +28,7 @@ module "rke2-first-server" { # ssh_key_pair_name = var.ssh_key_pair_name # ssh_private_key_path = var.ssh_private_key_path # ssh_public_key_path = var.ssh_public_key_path + # create_vpc = var.create_vpc # vpc_id = var.vpc_id # subnet_id = var.subnet_id # create_security_group = var.create_security_group @@ -49,13 +51,13 @@ module "rke2-additional" { module "rke2-additional-servers" { source = "../../../../modules/infra/aws/ec2" - count = 0 prefix = var.prefix aws_region = var.aws_region create_ssh_key_pair = local.create_ssh_key_pair ssh_key_pair_name = local.ssh_key_pair_name ssh_private_key_path = local.local_ssh_private_key_path ssh_public_key_path = local.local_ssh_public_key_path + create_vpc = local.create_vpc vpc_id = local.vpc_id subnet_id = local.subnet_id create_security_group = local.create_security_group diff --git a/recipes/upstream/aws/rke2/terraform.tfvars.example b/recipes/upstream/aws/rke2/terraform.tfvars.example index 3b85cf4e..6aa5b1f3 100644 --- a/recipes/upstream/aws/rke2/terraform.tfvars.example +++ b/recipes/upstream/aws/rke2/terraform.tfvars.example @@ -27,6 +27,9 @@ aws_region = "" ## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set # ssh_public_key_path = null +## - Specify whether VPC / Subnet should be created for the instances +# create_vpc = null + ## -- Range of private IPs available for the AWS VPC # vpc_ip_cidr_range = "10.0.0.0/16" diff --git a/recipes/upstream/aws/rke2/variables.tf b/recipes/upstream/aws/rke2/variables.tf index e4263217..c0607891 100644 --- a/recipes/upstream/aws/rke2/variables.tf +++ b/recipes/upstream/aws/rke2/variables.tf @@ -63,6 +63,10 @@ variable "ssh_public_key_path" { default = null } +variable "create_vpc" { + default = null +} + # variable "vpc_ip_cidr_range" {} variable "vpc_id" { From 9223f88aae1ff0e49fa3610404fbdeec58509bda Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Mon, 15 Jul 2024 21:00:58 +0200 Subject: [PATCH 33/35] Fixed RKE split-roles recipe --- recipes/rke/split-roles/aws/README.md | 4 ++-- recipes/rke/split-roles/aws/docs.md | 1 + recipes/rke/split-roles/aws/main.tf | 2 ++ recipes/rke/split-roles/aws/terraform.tfvars.example | 3 +++ recipes/rke/split-roles/aws/variables.tf | 4 ++++ 5 files changed, 12 insertions(+), 2 deletions(-) diff --git a/recipes/rke/split-roles/aws/README.md b/recipes/rke/split-roles/aws/README.md index a7a1f8b1..6de6add0 100644 --- a/recipes/rke/split-roles/aws/README.md +++ b/recipes/rke/split-roles/aws/README.md @@ -26,12 +26,12 @@ cd recipes/rke/split-roles/aws **NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** ```bash -terraform init -upgrade ; terraform apply -target=module.aws-ec2-upstream-master-nodes.tls_private_key.ssh_private_key -target=module.aws-ec2-upstream-master-nodes.local_file.private_key_pem -target=module.aws-ec2-upstream-master-nodes.local_file.public_key_pem -target=module.aws-ec2-upstream-master-nodes.aws_key_pair.key_pair -target=module.aws-ec2-upstream-master-nodes.aws_vpc.vpc -target=module.aws-ec2-upstream-master-nodes.aws_subnet.subnet -target=module.aws-ec2-upstream-master-nodes.aws_security_group.sg_allowall -auto-approve ; terraform apply -auto-approve +terraform init -upgrade && terraform apply -auto-approve ``` - Destroy the resources when finished ```bash -terraform destroy -target=module.rancher_install -auto-approve ; terraform destroy -auto-approve +terraform destroy -target=module.rancher_install -auto-approve && terraform destroy -auto-approve ``` See full argument list for each module in use: diff --git a/recipes/rke/split-roles/aws/docs.md b/recipes/rke/split-roles/aws/docs.md index 75c5341d..8119301a 100644 --- a/recipes/rke/split-roles/aws/docs.md +++ b/recipes/rke/split-roles/aws/docs.md @@ -39,6 +39,7 @@ | [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | | [create\_security\_group](#input\_create\_security\_group) | n/a | `any` | `null` | no | | [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | +| [create\_vpc](#input\_create\_vpc) | n/a | `any` | `null` | no | | [docker\_version](#input\_docker\_version) | Docker version to install on nodes | `string` | `"20.10"` | no | | [ingress\_provider](#input\_ingress\_provider) | Ingress controller provider | `string` | `"nginx"` | no | | [install\_docker](#input\_install\_docker) | Install Docker while creating the instances | `bool` | `true` | no | diff --git a/recipes/rke/split-roles/aws/main.tf b/recipes/rke/split-roles/aws/main.tf index 4c837074..1b02bbf5 100644 --- a/recipes/rke/split-roles/aws/main.tf +++ b/recipes/rke/split-roles/aws/main.tf @@ -3,6 +3,7 @@ locals { ssh_key_pair_name = var.ssh_key_pair_name == null ? "tf-rancher-up-${var.prefix}" : var.ssh_key_pair_name local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + create_vpc = var.create_vpc == null ? false : true vpc_id = var.vpc_id == null ? module.aws-ec2-upstream-master-nodes.vpc[0].id : var.vpc_id subnet_id = var.subnet_id == null ? module.aws-ec2-upstream-master-nodes.subnet[0].id : var.subnet_id create_security_group = var.create_security_group == null ? false : true @@ -32,6 +33,7 @@ module "aws-ec2-upstream-worker-nodes" { ssh_key_pair_name = local.ssh_key_pair_name ssh_private_key_path = local.local_ssh_private_key_path ssh_public_key_path = local.local_ssh_public_key_path + create_vpc = local.create_vpc vpc_id = local.vpc_id subnet_id = local.subnet_id create_security_group = local.create_security_group diff --git a/recipes/rke/split-roles/aws/terraform.tfvars.example b/recipes/rke/split-roles/aws/terraform.tfvars.example index a1847526..e4bc2f48 100644 --- a/recipes/rke/split-roles/aws/terraform.tfvars.example +++ b/recipes/rke/split-roles/aws/terraform.tfvars.example @@ -27,6 +27,9 @@ aws_region = "" ## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set # ssh_public_key_path = null +## - Specify whether VPC / Subnet should be created for the instances +# create_vpc = null + ## -- Range of private IPs available for the AWS VPC # vpc_ip_cidr_range = "10.0.0.0/16" diff --git a/recipes/rke/split-roles/aws/variables.tf b/recipes/rke/split-roles/aws/variables.tf index b3017b3a..28466f01 100644 --- a/recipes/rke/split-roles/aws/variables.tf +++ b/recipes/rke/split-roles/aws/variables.tf @@ -57,6 +57,10 @@ variable "ssh_public_key_path" { default = null } +variable "create_vpc" { + default = null +} + variable "vpc_id" { default = null } From 35f71067767a78881736c2a64db938f05b549b5f Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Thu, 25 Jul 2024 19:19:06 +0200 Subject: [PATCH 34/35] One-destroy command - GKE x RKE,RKE2,K3s --- recipes/standalone/aws/rke/README.md | 93 ++---------- recipes/standalone/aws/rke/docs.md | 51 ++++--- recipes/standalone/aws/rke/main.tf | 75 +++++++--- recipes/standalone/aws/rke/outputs.tf | 16 +- recipes/standalone/aws/rke/provider.tf | 36 +++++ .../aws/rke/terraform.tfvars.example | 115 ++++++++++---- recipes/standalone/aws/rke/variables.tf | 141 ++++++++---------- recipes/upstream/aws/k3s/README.md | 2 +- recipes/upstream/aws/k3s/docs.md | 19 ++- recipes/upstream/aws/k3s/main.tf | 78 +++++----- recipes/upstream/aws/rke2/README.md | 2 +- recipes/upstream/aws/rke2/docs.md | 19 ++- recipes/upstream/aws/rke2/main.tf | 70 +++++---- recipes/upstream/aws/rke2/outputs.tf | 4 +- 14 files changed, 382 insertions(+), 339 deletions(-) create mode 100644 recipes/standalone/aws/rke/provider.tf diff --git a/recipes/standalone/aws/rke/README.md b/recipes/standalone/aws/rke/README.md index ae7ac039..71e33587 100644 --- a/recipes/standalone/aws/rke/README.md +++ b/recipes/standalone/aws/rke/README.md @@ -1,6 +1,6 @@ -# Upstream | AWS | RKE +# Upstream | AWS standalone | EC2 x RKE -This module is used to establish a Rancher (local) management cluster using AWS and RKE. +This module is used to establish a Rancher (local) management cluster using [AWS EC2](https://aws.amazon.com/ec2/) and [RKE](https://rke.docs.rancher.com/). Documentation can be found [here](./docs.md). @@ -11,90 +11,27 @@ git clone https://github.com/rancherlabs/tf-rancher-up.git cd recipes/upstream/aws/rke ``` -- Copy `terraform.tfvars.example` to `terraform.tfvars` -- Edit `terraform.tfvars` +- Copy `./terraform.tfvars.exmaple` to `./terraform.tfvars` +- Edit `./terraform.tfvars` - Update the required variables: - - `aws_region` to suit your region - `prefix` to give the resources an identifiable name (eg, your initials or first name) - - Recommended: `spot_instances` can be set to `true` to use spot instances -- Check your AWS credentials are configured in `~/.aws/credentials`, terraform will use these by default. Refer the [`aws configure`](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html#cli-configure-files-methods) command on how to do this. -- If you don't want to configure AWS credentials using `aws configure` in above step, uncomment `aws_access_key` and `aws_secret_key` in `terraform.tfvars` and input the required keys there. -- If an HA cluster need to be deployed, change the `instance_count` variable to 3 or more. -- There are more optional variables which can be tweaked under `terraform.tfvars`. - -**NOTE** you may need to use ` terraform init -upgrade` to upgrade provider versions - -Execute the below commands to start deployment. - -```bash -terraform init -terraform plan -terraform apply -``` - -The login details will be displayed in the screen once the deployment is successful. It will have the details as below. - -```bash -rancher_hostname = "https://rancher..sslip.io" -rancher_password = "initial-admin-password" -``` + - `aws_region` to suit your region + - `instance_count` to specify the number of instances to create + - `ssh_username` to specify the user used to create the VMs (default "ubuntu") +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../modules/infra/aws/README.md). -- If storing multiple AWS credentials in `~/.aws/credentials`, set the profile when running terraform. +**NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** ```bash -AWS_PROFILE= terraform plan -AWS_PROFILE= terraform apply +terraform init -upgrade && terraform apply -auto-approve ``` -- Destroy the resources when cluster is no more needed. +- Destroy the resources when finished ```bash -terraform destroy +terraform destroy -auto-approve ``` -**IMPORTANT**: Please retire the services which are deployed using these terraform modules within 48 hours. Soon there will be automation to retire the service automatically after 48 hours but till that is in place it will be the users responsibility to not keep it running more than 48 hours. - -### Advanced - -Target a specific resource/module to action the changes only for that resource/module - -For example, target only the `rke_cluster` resource to re-run the equivalent of `rke up` - -```bash -terraform apply -target module.rke.rke_cluster.this -target module.rke.local_file.kube_config_yaml -``` - -This also updates the kube_config generated by RKE. - -### Notes - -A log file for the RKE provisioning is written to `rke.log` - See full argument list for each module in use: - - [AWS](../../../../modules/infra/aws) - - [RKE](../../../../modules/distribution/rke) - - [Rancher](../../../../modules/rancher) - -### Known Issues -- Terraform plan shows below warnings which can be ignored: - -```bash -Warning: Value for undeclared variable - -The root module does not declare a variable named "ssh_private_key_path" but a value was found in file "terraform.tfvars". If you meant to use this value, add a "variable" block to the configuration. - -Invalid attribute in provider configuration - -with module.rancher_install.provider["registry.terraform.io/hashicorp/kubernetes"], -on ../../../../modules/rancher/provider.tf line 7, in provider "kubernetes": -7: provider "kubernetes" { -``` -- Terraform apply shows below warnings and errors. Please rerun the terraform apply again and it will be successful [(Issue #22)](#22). - -```bash -Warning: - -Helm release "rancher" was created but has a failed status. Use the `helm` command to investigate the error, correct it, then run Terraform again. - -Error: 1 error occurred: -* Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": failed to call webhook: Post "https://rke2-ingress-nginx-controller-admission.kube-system.svc:443/networking/v1/ingresses?timeout=10s": no endpoints available for service "rke2-ingress-nginx-controller-admission" -``` + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - RKE: https://github.com/rancher/tf-rancher-up/tree/main/modules/distribution/rke + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/recipes/standalone/aws/rke/docs.md b/recipes/standalone/aws/rke/docs.md index 1494e0c9..4ad01afa 100644 --- a/recipes/standalone/aws/rke/docs.md +++ b/recipes/standalone/aws/rke/docs.md @@ -1,54 +1,57 @@ ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | +| [ssh](#requirement\_ssh) | 2.6.0 | ## Providers -No providers. +| Name | Version | +|------|---------| +| [null](#provider\_null) | n/a | ## Modules | Name | Source | Version | |------|--------|---------| -| [cluster-nodes](#module\_cluster-nodes) | ../../../../modules/infra/aws | n/a | +| [aws\_ec2\_upstream\_cluster](#module\_aws\_ec2\_upstream\_cluster) | ../../../../modules/infra/aws/ec2 | n/a | | [rke](#module\_rke) | ../../../../modules/distribution/rke | n/a | ## Resources -No resources. +| Name | Type | +|------|------| +| [null_resource.wait_docker_startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.wait_k8s_services_startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [aws\_access\_key](#input\_aws\_access\_key) | AWS access key used to create infrastructure | `string` | `null` | no | | [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | -| [aws\_secret\_key](#input\_aws\_secret\_key) | AWS secret key used to create AWS infrastructure | `string` | `null` | no | -| [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `null` | no | -| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `null` | no | -| [dependency](#input\_dependency) | An optional variable to add a dependency from another resource (not used) | `any` | `null` | no | +| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | +| [create\_vpc](#input\_create\_vpc) | n/a | `any` | `null` | no | | [docker\_version](#input\_docker\_version) | Docker version to install on nodes | `string` | `"20.10"` | no | -| [install\_docker](#input\_install\_docker) | Should install docker while creating the instance | `bool` | `true` | no | -| [instance\_count](#input\_instance\_count) | Number of EC2 instances to create | `number` | `null` | no | -| [instance\_disk\_size](#input\_instance\_disk\_size) | Specify root disk size (GB) | `string` | `null` | no | -| [instance\_security\_group](#input\_instance\_security\_group) | Provide a pre-existing security group ID | `string` | `null` | no | -| [instance\_type](#input\_instance\_type) | Instance type used for all EC2 instances | `string` | `null` | no | +| [install\_docker](#input\_install\_docker) | Install Docker while creating the instances | `bool` | `true` | no | +| [instance\_count](#input\_instance\_count) | n/a | `any` | n/a | yes | | [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | | [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | -| [kubernetes\_version](#input\_kubernetes\_version) | Kubernetes version to use for the RKE cluster | `string` | `null` | no | -| [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | `null` | no | -| [spot\_instances](#input\_spot\_instances) | Use spot instances | `bool` | `null` | no | -| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | Specify the SSH key name to use (that's already present in AWS) | `string` | `null` | no | -| [ssh\_key\_pair\_path](#input\_ssh\_key\_pair\_path) | Path to the SSH private key used as the key pair (that's already present in AWS) | `string` | `null` | no | -| [ssh\_username](#input\_ssh\_username) | Username used for SSH with sudo access | `string` | `"ubuntu"` | no | -| [subnet\_id](#input\_subnet\_id) | VPC Subnet ID to create the instance(s) in | `string` | `null` | no | +| [prefix](#input\_prefix) | n/a | `any` | n/a | yes | +| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | n/a | `any` | `null` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `any` | n/a | yes | +| [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | Waiting time (in seconds) | `number` | `120` | no | ## Outputs | Name | Description | |------|-------------| -| [dependency](#output\_dependency) | n/a | | [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | | [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | -| [kubeconfig\_filename](#output\_kubeconfig\_filename) | n/a | -| [kubeconfig\_yaml](#output\_kubeconfig\_yaml) | n/a | +| [kube\_config\_path](#output\_kube\_config\_path) | n/a | diff --git a/recipes/standalone/aws/rke/main.tf b/recipes/standalone/aws/rke/main.tf index e586988b..4d7dea06 100644 --- a/recipes/standalone/aws/rke/main.tf +++ b/recipes/standalone/aws/rke/main.tf @@ -1,18 +1,31 @@ -module "cluster-nodes" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = var.instance_count - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = var.create_ssh_key_pair - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - ssh_username = var.ssh_username - spot_instances = var.spot_instances - aws_region = var.aws_region - create_security_group = var.create_security_group - instance_security_group = var.instance_security_group - subnet_id = var.subnet_id +locals { + create_ssh_key_pair = var.create_ssh_key_pair == null ? false : true + ssh_key_pair_name = var.ssh_key_pair_name == null ? "tf-rancher-up-${var.prefix}" : var.ssh_key_pair_name + local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path + local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + create_vpc = var.create_vpc == null ? false : true + kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd + kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" +} + +module "aws_ec2_upstream_cluster" { + source = "../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + # create_ssh_key_pair = var.create_ssh_key_pair + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path + # create_vpc = var.create_vpc + # vpc_id = var.vpc_id + # subnet_id = var.subnet_id + # create_security_group = var.create_security_group + instance_count = var.instance_count + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + # instance_security_group_id = var.instance_security_group_id + ssh_username = var.ssh_username user_data = templatefile("${path.module}/user_data.tmpl", { install_docker = var.install_docker @@ -20,27 +33,41 @@ module "cluster-nodes" { docker_version = var.docker_version } ) + # bastion_host = var.bastion_host + # iam_instance_profile = var.iam_instance_profile + # tags = var.tags +} + +resource "null_resource" "wait_docker_startup" { + depends_on = [module.aws_ec2_upstream_cluster.instances_public_ip] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } } module "rke" { source = "../../../../modules/distribution/rke" prefix = var.prefix - dependency = module.cluster-nodes.dependency - ssh_private_key_path = module.cluster-nodes.ssh_key_path + dependency = [null_resource.wait_docker_startup] + ssh_private_key_path = local.local_ssh_private_key_path node_username = var.ssh_username - kube_config_path = var.kube_config_path - kube_config_filename = var.kube_config_filename - kubernetes_version = var.kubernetes_version + # kubernetes_version = var.kubernetes_version - rancher_nodes = [for instance_ips in module.cluster-nodes.instance_ips : + rancher_nodes = [for instance_ips in module.aws_ec2_upstream_cluster.instance_ips : { public_ip = instance_ips.public_ip, private_ip = instance_ips.private_ip, roles = ["etcd", "controlplane", "worker"], - ssh_key_path = module.cluster-nodes.ssh_key_path - ssh_key = null - node_username = module.cluster-nodes.node_username + ssh_key_path = local.local_ssh_private_key_path, + ssh_key = null, hostname_override = null } ] } + +resource "null_resource" "wait_k8s_services_startup" { + depends_on = [module.rke] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } +} diff --git a/recipes/standalone/aws/rke/outputs.tf b/recipes/standalone/aws/rke/outputs.tf index 8cc5675c..3ce0f7d3 100644 --- a/recipes/standalone/aws/rke/outputs.tf +++ b/recipes/standalone/aws/rke/outputs.tf @@ -1,19 +1,11 @@ output "instances_public_ip" { - value = module.cluster-nodes.instances_public_ip + value = module.aws_ec2_upstream_cluster.instances_public_ip } output "instances_private_ip" { - value = module.cluster-nodes.instances_private_ip + value = module.aws_ec2_upstream_cluster.instances_private_ip } -output "dependency" { - value = module.rke.dependency -} - -output "kubeconfig_filename" { - value = module.rke.rke_kubeconfig_filename -} - -output "kubeconfig_yaml" { - value = module.rke.kube_config_yaml +output "kube_config_path" { + value = local.kc_file } diff --git a/recipes/standalone/aws/rke/provider.tf b/recipes/standalone/aws/rke/provider.tf new file mode 100644 index 00000000..8e915083 --- /dev/null +++ b/recipes/standalone/aws/rke/provider.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/recipes/standalone/aws/rke/terraform.tfvars.example b/recipes/standalone/aws/rke/terraform.tfvars.example index c155c6ef..4a701c8f 100644 --- a/recipes/standalone/aws/rke/terraform.tfvars.example +++ b/recipes/standalone/aws/rke/terraform.tfvars.example @@ -1,48 +1,99 @@ ###### !! Required variables !! ###### -## -- Terraform will use the default ~/.aws/credentials file or environment variables to determine the access/secret keys. Uncomment the below only if necessary. -# aws_access_key = "ACCESS_KEY_HERE" -# aws_secret_key = "SECRET_KEY_HERE" +## -- The prefix used in front of all AWS resources +prefix = "" -## -- AWS region to create the resources, uncomment one or adjust as needed -# aws_region = "us-east-1" # US, Virginia -# aws_region = "us-west-2" # US, Oregon -# aws_region = "eu-west-1" # EU, Ireland -# aws_region = "eu-west-1" # EU, Frankfurt -# aws_region = "ap-southeast-2" # AU, Sydney -# aws_region = "ap-south-1" # IN, Mumbai +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = -## -- Set the prefix for the name tag on instancrease created. A default prefix (rancher-terraform) if not provided. -prefix = "my-name-here" +## -- AWS Region to create the resources +aws_region = "" -###### !! Optional variables !! ###### +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html -## -- Password to set when installing Rancher, otherwise use default (initial-admin-password) -# rancher_password = "at-least-12-characters" +## -- Specify if a new SSH key pair needs to be created for the instances +# create_ssh_key_pair = true -## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository -# rancher_version = "2.7.3" +## -- If you want to use an existing key pair, specify its name +# ssh_key_pair_name = null -## -- Override the default k8s version used by RKE -# kubernetes_version = "v1.24.10-rancher4-1" +#Ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html + +## -- The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_private_key_path = null + +## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_public_key_path = null + +## - Specify whether VPC / Subnet should be created for the instances +# create_vpc = null + +## -- Range of private IPs available for the AWS VPC +# vpc_ip_cidr_range = "10.0.0.0/16" -## -- Number and type of EC2 instances to launch -instance_count = 1 +## -- AWS VPC used for all resources +# vpc_id = null + +## -- AWS Subnet used for all resources +# subnet_id = null + +## -- AWS Security Group used for all resources +# create_security_group = null + +#Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html + +## -- The number of nodes +instance_count = 3 + +## -- The name of a AWS EC2 machine type # instance_type = "t3.medium" ## -- Use spot instances # spot_instances = false -##### SSH -## -- (A) Create a new keypair in AWS -create_ssh_key_pair = true -## -- Override the default (./${prefix}_ssh_private_key.pem) path where this SSH key is written -# ssh_private_key_path = "/path/to/private/key.pem" +## -- Size of the disk attached to each node, specified in GB +# instance_disk_size = 80 + +## -- If the Security Group was created manually, it can be entered here +# instance_security_group_id = null + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" -## -- (B) Provide an existing keypair name in AWS to use for nodes, the matching private key file for this keypair also must be provided so RKE can SSH to the launched nodes -# ssh_key_pair_name = "aws_keypair_name" -# ssh_key_pair_path = "/path/to/private/key.pem" -##### +## -- Script that will run when the VMs start +# user_data = "" -## -- Override the default (${prefix}_kube_config.yml) kubeconfig file/path -# kube_config_path = "~/.kube/rancher-terraform.yml" \ No newline at end of file +## -- Bastion host configuration to access the instances +# bastion_host = null + +## -- IAM Instance Profile to assign to the instances/nodes +# iam_instance_profile = null + +## -- User-provided tags for the resources +# tags = {} + +## -- Install Docker while creating the instance +# install_docker = true + +## -- Docker version to install on nodes +# docker_version = "20.10" + +## -- Waiting time (in seconds) +# waiting_time = 180 + +## -- Override the default k8s version used by RKE +# kubernetes_version = "v1.24.10-rancher4-1" + +## -- K8s Ingress Controller +# ingress_provider = nginx + +## -- Hostname to set when installing Rancher +# rancher_hostname = "rancher" + +## -- Password to set when installing Rancher +rancher_password = "at-least-12-characters" + +## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository +# rancher_version = "2.7.3" diff --git a/recipes/standalone/aws/rke/variables.tf b/recipes/standalone/aws/rke/variables.tf index fc4c46dc..3d7afebd 100644 --- a/recipes/standalone/aws/rke/variables.tf +++ b/recipes/standalone/aws/rke/variables.tf @@ -1,19 +1,10 @@ -variable "dependency" { - description = "An optional variable to add a dependency from another resource (not used)" - default = null -} +variable "prefix" {} -variable "aws_access_key" { - type = string - description = "AWS access key used to create infrastructure" - default = null -} +# variable "aws_access_key" {} -variable "aws_secret_key" { - type = string - description = "AWS secret key used to create AWS infrastructure" - default = null -} +# variable "aws_secret_key" {} + +# variable "aws_session_token" {} variable "aws_region" { type = string @@ -56,51 +47,69 @@ variable "aws_region" { } } -variable "prefix" { - type = string - description = "Prefix added to names of all resources" - default = null +variable "create_ssh_key_pair" { + default = null } -variable "instance_count" { - type = number - description = "Number of EC2 instances to create" - default = null +variable "ssh_key_pair_name" { + default = null } -variable "instance_type" { - type = string - description = "Instance type used for all EC2 instances" - default = null +variable "ssh_private_key_path" { + default = null } -variable "instance_disk_size" { - type = string - description = "Specify root disk size (GB)" - default = null +variable "ssh_public_key_path" { + default = null } -variable "kube_config_path" { - description = "The path to write the kubeconfig for the RKE cluster" - type = string - default = null +variable "create_vpc" { + default = null } -variable "kube_config_filename" { - description = "Filename to write the kube config" - type = string - default = null -} +# variable "vpc_ip_cidr_range" {} -variable "kubernetes_version" { - type = string - description = "Kubernetes version to use for the RKE cluster" +# variable "vpc_id" {} + +# variable "subnet_id" {} + +# variable "create_security_group" {} + +variable "instance_count" {} + +# variable "instance_type" {} + +# variable "spot_instances" {} + +# variable "instance_disk_size" {} + +# variable "instance_security_group_id" {} + +variable "ssh_username" {} + +variable "user_data" { + description = "User data content for EC2 instance(s)" default = null } +#variable "bastion_host" { +# type = object({ +# address = string +# user = string +# ssh_key = string +# ssh_key_path = string +# }) +# default = null +# description = "Bastion host configuration to access the instances" +#} + +# variable "iam_instance_profile" {} + +# variable "tags" {} + variable "install_docker" { type = bool - description = "Should install docker while creating the instance" + description = "Install Docker while creating the instances" default = true } @@ -110,51 +119,21 @@ variable "docker_version" { default = "20.10" } -variable "create_ssh_key_pair" { - type = bool - description = "Specify if a new SSH key pair needs to be created for the instances" - default = null +variable "waiting_time" { + description = "Waiting time (in seconds)" + default = 120 } -variable "ssh_key_pair_name" { - type = string - description = "Specify the SSH key name to use (that's already present in AWS)" - default = null -} +# variable "kubernetes_version" {} -variable "ssh_key_pair_path" { - type = string - description = "Path to the SSH private key used as the key pair (that's already present in AWS)" - default = null -} - -variable "ssh_username" { - type = string - description = "Username used for SSH with sudo access" - default = "ubuntu" -} - -variable "spot_instances" { - type = bool - description = "Use spot instances" - default = null -} - -variable "subnet_id" { +variable "kube_config_path" { + description = "The path to write the kubeconfig for the RKE cluster" type = string - description = "VPC Subnet ID to create the instance(s) in" - default = null -} - -variable "create_security_group" { - type = bool - description = "Should create the security group associated with the instance(s)" default = null } -# TODO: Add a check based on above value -variable "instance_security_group" { +variable "kube_config_filename" { + description = "Filename to write the kube config" type = string - description = "Provide a pre-existing security group ID" default = null } diff --git a/recipes/upstream/aws/k3s/README.md b/recipes/upstream/aws/k3s/README.md index 3476c12a..2161f73a 100644 --- a/recipes/upstream/aws/k3s/README.md +++ b/recipes/upstream/aws/k3s/README.md @@ -31,7 +31,7 @@ terraform init -upgrade && terraform apply -auto-approve - Destroy the resources when finished ```bash -terraform state rm module.rancher_install && terraform destroy -auto-approve +terraform destroy -auto-approve ``` See full argument list for each module in use: diff --git a/recipes/upstream/aws/k3s/docs.md b/recipes/upstream/aws/k3s/docs.md index 2826f4dc..2132e2f9 100644 --- a/recipes/upstream/aws/k3s/docs.md +++ b/recipes/upstream/aws/k3s/docs.md @@ -20,22 +20,21 @@ | Name | Source | Version | |------|--------|---------| -| [k3s-additional](#module\_k3s-additional) | ../../../../modules/distribution/k3s | n/a | -| [k3s-additional-servers](#module\_k3s-additional-servers) | ../../../../modules/infra/aws/ec2 | n/a | -| [k3s-additional-workers](#module\_k3s-additional-workers) | ../../../../modules/infra/aws/ec2 | n/a | -| [k3s-first](#module\_k3s-first) | ../../../../modules/distribution/k3s | n/a | -| [k3s-first-server](#module\_k3s-first-server) | ../../../../modules/infra/aws/ec2 | n/a | +| [k3s\_additional](#module\_k3s\_additional) | ../../../../modules/distribution/k3s | n/a | +| [k3s\_additional\_servers](#module\_k3s\_additional\_servers) | ../../../../modules/infra/aws/ec2 | n/a | +| [k3s\_additional\_workers](#module\_k3s\_additional\_workers) | ../../../../modules/infra/aws/ec2 | n/a | +| [k3s\_first](#module\_k3s\_first) | ../../../../modules/distribution/k3s | n/a | +| [k3s\_first\_server](#module\_k3s\_first\_server) | ../../../../modules/infra/aws/ec2 | n/a | | [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | ## Resources | Name | Type | |------|------| -| [local_file.kube-config-yaml](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [local_file.kube-config-yaml-backup](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [null_resource.wait-k8s-services-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [ssh_resource.retrieve-kubeconfig](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource | -| [local_file.ssh-private-key](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source | +| [local_file.kube_config_yaml](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [null_resource.wait_k8s_services_startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [ssh_resource.retrieve_kubeconfig](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource | +| [local_file.ssh_private_key](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source | ## Inputs diff --git a/recipes/upstream/aws/k3s/main.tf b/recipes/upstream/aws/k3s/main.tf index b6b50821..b75e839f 100644 --- a/recipes/upstream/aws/k3s/main.tf +++ b/recipes/upstream/aws/k3s/main.tf @@ -4,16 +4,16 @@ locals { local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path create_vpc = var.create_vpc == null ? false : true - vpc_id = var.vpc_id == null ? module.k3s-first-server.vpc[0].id : var.vpc_id - subnet_id = var.subnet_id == null ? module.k3s-first-server.subnet[0].id : var.subnet_id + vpc_id = var.vpc_id == null ? module.k3s_first_server.vpc[0].id : var.vpc_id + subnet_id = var.subnet_id == null ? module.k3s_first_server.subnet[0].id : var.subnet_id create_security_group = var.create_security_group == null ? false : true - instance_security_group_id = local.create_security_group == "true" ? null : module.k3s-first-server.security_group[0].id + instance_security_group_id = local.create_security_group == "true" ? null : module.k3s_first_server.security_group[0].id kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" kc_file_backup = "${local.kc_file}.backup" } -module "k3s-first" { +module "k3s_first" { source = "../../../../modules/distribution/k3s" k3s_token = var.k3s_token k3s_version = var.k3s_version @@ -21,14 +21,14 @@ module "k3s-first" { k3s_config = var.k3s_config } -module "k3s-first-server" { +module "k3s_first_server" { source = "../../../../modules/infra/aws/ec2" prefix = var.prefix aws_region = var.aws_region # create_ssh_key_pair = var.create_ssh_key_pair - # ssh_key_pair_name = var.ssh_key_pair_name - # ssh_private_key_path = var.ssh_private_key_path - # ssh_public_key_path = var.ssh_public_key_path + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path # create_vpc = var.create_vpc # vpc_id = var.vpc_id # subnet_id = var.subnet_id @@ -39,19 +39,19 @@ module "k3s-first-server" { # instance_disk_size = var.instance_disk_size # instance_security_group_id = var.instance_security_group_id ssh_username = var.ssh_username - user_data = module.k3s-first.k3s_server_user_data + user_data = module.k3s_first.k3s_server_user_data } -module "k3s-additional" { +module "k3s_additional" { source = "../../../../modules/distribution/k3s" - k3s_token = module.k3s-first.k3s_token + k3s_token = module.k3s_first.k3s_token k3s_version = var.k3s_version k3s_channel = var.k3s_channel k3s_config = var.k3s_config - first_server_ip = module.k3s-first-server.instances_private_ip[0] + first_server_ip = module.k3s_first_server.instances_private_ip[0] } -module "k3s-additional-servers" { +module "k3s_additional_servers" { source = "../../../../modules/infra/aws/ec2" prefix = "${var.prefix}-additional-server" aws_region = var.aws_region @@ -69,10 +69,10 @@ module "k3s-additional-servers" { # instance_disk_size = var.instance_disk_size instance_security_group_id = local.instance_security_group_id ssh_username = var.ssh_username - user_data = module.k3s-additional.k3s_server_user_data + user_data = module.k3s_additional.k3s_server_user_data } -module "k3s-additional-workers" { +module "k3s_additional_workers" { source = "../../../../modules/infra/aws/ec2" prefix = "${var.prefix}-worker" aws_region = var.aws_region @@ -90,51 +90,61 @@ module "k3s-additional-workers" { # instance_disk_size = var.instance_disk_size instance_security_group_id = local.instance_security_group_id ssh_username = var.ssh_username - user_data = module.k3s-additional.k3s_worker_user_data + user_data = module.k3s_additional.k3s_worker_user_data } -data "local_file" "ssh-private-key" { - depends_on = [module.k3s-additional-workers] - filename = local.local_ssh_private_key_path +data "local_file" "ssh_private_key" { + depends_on = [module.k3s_additional_workers] + + filename = local.local_ssh_private_key_path } -resource "ssh_resource" "retrieve-kubeconfig" { - host = module.k3s-first-server.instances_public_ip[0] +resource "ssh_resource" "retrieve_kubeconfig" { + depends_on = [data.local_file.ssh_private_key] + + host = module.k3s_first_server.instances_public_ip[0] commands = [ - "sudo sed 's/127.0.0.1/${module.k3s-first-server.instances_public_ip[0]}/g' /etc/rancher/k3s/k3s.yaml" + "sudo sed 's/127.0.0.1/${module.k3s_first_server.instances_public_ip[0]}/g' /etc/rancher/k3s/k3s.yaml" ] user = var.ssh_username - private_key = data.local_file.ssh-private-key.content + private_key = data.local_file.ssh_private_key.content retry_delay = "60s" } -resource "local_file" "kube-config-yaml" { +resource "local_file" "kube_config_yaml" { + depends_on = [ssh_resource.retrieve_kubeconfig] + filename = local.kc_file file_permission = "0600" - content = ssh_resource.retrieve-kubeconfig.result + content = ssh_resource.retrieve_kubeconfig.result } -resource "local_file" "kube-config-yaml-backup" { - filename = local.kc_file_backup - file_permission = "0600" - content = ssh_resource.retrieve-kubeconfig.result +provider "kubernetes" { + config_path = local_file.kube_config_yaml.filename } -resource "null_resource" "wait-k8s-services-startup" { - depends_on = [local_file.kube-config-yaml] +provider "helm" { + kubernetes { + config_path = local_file.kube_config_yaml.filename + } +} + +resource "null_resource" "wait_k8s_services_startup" { + depends_on = [local_file.kube_config_yaml] + provisioner "local-exec" { command = "sleep ${var.waiting_time}" } } locals { - rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.k3s-first-server.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.k3s-first-server.instances_public_ip[0], "sslip.io"]) + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.k3s_first_server.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.k3s_first_server.instances_public_ip[0], "sslip.io"]) } module "rancher_install" { source = "../../../../modules/rancher" - dependency = [null_resource.wait-k8s-services-startup] - kubeconfig_file = local.kc_file + dependency = [null_resource.wait_k8s_services_startup] + kubeconfig_file = local_file.kube_config_yaml.filename rancher_hostname = local.rancher_hostname rancher_bootstrap_password = var.rancher_password rancher_password = var.rancher_password diff --git a/recipes/upstream/aws/rke2/README.md b/recipes/upstream/aws/rke2/README.md index a35b3f9b..ccfe949e 100644 --- a/recipes/upstream/aws/rke2/README.md +++ b/recipes/upstream/aws/rke2/README.md @@ -30,7 +30,7 @@ terraform init -upgrade && terraform apply -auto-approve - Destroy the resources when finished ```bash -terraform destroy -target=module.rancher_install -auto-approve && terraform destroy -auto-approve +terraform destroy -auto-approve ``` See full argument list for each module in use: diff --git a/recipes/upstream/aws/rke2/docs.md b/recipes/upstream/aws/rke2/docs.md index 6cc9f7e4..42af12a9 100644 --- a/recipes/upstream/aws/rke2/docs.md +++ b/recipes/upstream/aws/rke2/docs.md @@ -12,8 +12,8 @@ | Name | Version | |------|---------| -| [local](#provider\_local) | n/a | -| [null](#provider\_null) | n/a | +| [local](#provider\_local) | 2.5.1 | +| [null](#provider\_null) | 3.2.2 | | [ssh](#provider\_ssh) | 2.6.0 | ## Modules @@ -21,19 +21,18 @@ | Name | Source | Version | |------|--------|---------| | [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | -| [rke2-additional](#module\_rke2-additional) | ../../../../modules/distribution/rke2 | n/a | -| [rke2-additional-servers](#module\_rke2-additional-servers) | ../../../../modules/infra/aws/ec2 | n/a | -| [rke2-first](#module\_rke2-first) | ../../../../modules/distribution/rke2 | n/a | -| [rke2-first-server](#module\_rke2-first-server) | ../../../../modules/infra/aws/ec2 | n/a | +| [rke2\_additional](#module\_rke2\_additional) | ../../../../modules/distribution/rke2 | n/a | +| [rke2\_additional\_servers](#module\_rke2\_additional\_servers) | ../../../../modules/infra/aws/ec2 | n/a | +| [rke2\_first](#module\_rke2\_first) | ../../../../modules/distribution/rke2 | n/a | +| [rke2\_first\_server](#module\_rke2\_first\_server) | ../../../../modules/infra/aws/ec2 | n/a | ## Resources | Name | Type | |------|------| -| [local_file.kube-config-yaml](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [local_file.kube-config-yaml-backup](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [null_resource.wait-k8s-services-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | -| [ssh_resource.retrieve-kubeconfig](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource | +| [local_file.kube_config_yaml](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [null_resource.wait_k8s_services_startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [ssh_resource.retrieve_kubeconfig](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource | | [local_file.ssh_private_key](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source | ## Inputs diff --git a/recipes/upstream/aws/rke2/main.tf b/recipes/upstream/aws/rke2/main.tf index c0d78aff..438745ff 100644 --- a/recipes/upstream/aws/rke2/main.tf +++ b/recipes/upstream/aws/rke2/main.tf @@ -4,30 +4,30 @@ locals { local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path create_vpc = var.create_vpc == null ? false : true - vpc_id = var.vpc_id == null ? module.rke2-first-server.vpc[0].id : var.vpc_id - subnet_id = var.subnet_id == null ? module.rke2-first-server.subnet[0].id : var.subnet_id + vpc_id = var.vpc_id == null ? module.rke2_first_server.vpc[0].id : var.vpc_id + subnet_id = var.subnet_id == null ? module.rke2_first_server.subnet[0].id : var.subnet_id create_security_group = var.create_security_group == null ? false : true - instance_security_group_id = local.create_security_group == "true" ? null : module.rke2-first-server.security_group[0].id + instance_security_group_id = local.create_security_group == "true" ? null : module.rke2_first_server.security_group[0].id kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" kc_file_backup = "${local.kc_file}.backup" } -module "rke2-first" { +module "rke2_first" { source = "../../../../modules/distribution/rke2" rke2_token = var.rke2_token rke2_version = var.rke2_version rke2_config = var.rke2_config } -module "rke2-first-server" { +module "rke2_first_server" { source = "../../../../modules/infra/aws/ec2" prefix = var.prefix aws_region = var.aws_region # create_ssh_key_pair = var.create_ssh_key_pair - # ssh_key_pair_name = var.ssh_key_pair_name - # ssh_private_key_path = var.ssh_private_key_path - # ssh_public_key_path = var.ssh_public_key_path + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path # create_vpc = var.create_vpc # vpc_id = var.vpc_id # subnet_id = var.subnet_id @@ -38,18 +38,18 @@ module "rke2-first-server" { # instance_disk_size = var.instance_disk_size # instance_security_group_id = var.instance_security_group_id ssh_username = var.ssh_username - user_data = module.rke2-first.rke2_user_data + user_data = module.rke2_first.rke2_user_data } -module "rke2-additional" { +module "rke2_additional" { source = "../../../../modules/distribution/rke2" - rke2_token = module.rke2-first.rke2_token + rke2_token = module.rke2_first.rke2_token rke2_version = var.rke2_version rke2_config = var.rke2_config - first_server_ip = module.rke2-first-server.instances_private_ip[0] + first_server_ip = module.rke2_first_server.instances_private_ip[0] } -module "rke2-additional-servers" { +module "rke2_additional_servers" { source = "../../../../modules/infra/aws/ec2" prefix = var.prefix aws_region = var.aws_region @@ -67,50 +67,60 @@ module "rke2-additional-servers" { # instance_disk_size = var.instance_disk_size instance_security_group_id = local.instance_security_group_id ssh_username = var.ssh_username - user_data = module.rke2-additional.rke2_user_data + user_data = module.rke2_additional.rke2_user_data } data "local_file" "ssh_private_key" { - depends_on = [module.rke2-first-server] - filename = local.local_ssh_private_key_path + depends_on = [module.rke2_additional_servers] + + filename = local.local_ssh_private_key_path } -resource "ssh_resource" "retrieve-kubeconfig" { - host = module.rke2-first-server.instances_public_ip[0] +resource "ssh_resource" "retrieve_kubeconfig" { + depends_on = [data.local_file.ssh_private_key] + + host = module.rke2_first_server.instances_public_ip[0] commands = [ - "sudo sed 's/127.0.0.1/${module.rke2-first-server.instances_public_ip[0]}/g' /etc/rancher/rke2/rke2.yaml" + "sudo sed 's/127.0.0.1/${module.rke2_first_server.instances_public_ip[0]}/g' /etc/rancher/rke2/rke2.yaml" ] user = var.ssh_username private_key = data.local_file.ssh_private_key.content } -resource "local_file" "kube-config-yaml" { +resource "local_file" "kube_config_yaml" { + depends_on = [ssh_resource.retrieve_kubeconfig] + filename = local.kc_file file_permission = "0600" - content = ssh_resource.retrieve-kubeconfig.result + content = ssh_resource.retrieve_kubeconfig.result } -resource "local_file" "kube-config-yaml-backup" { - filename = local.kc_file_backup - file_permission = "0600" - content = ssh_resource.retrieve-kubeconfig.result +provider "kubernetes" { + config_path = local_file.kube_config_yaml.filename } -resource "null_resource" "wait-k8s-services-startup" { - depends_on = [module.rke2-additional-servers] +provider "helm" { + kubernetes { + config_path = local_file.kube_config_yaml.filename + } +} + +resource "null_resource" "wait_k8s_services_startup" { + depends_on = [local_file.kube_config_yaml] + provisioner "local-exec" { command = "sleep ${var.waiting_time}" } } locals { - rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.rke2-first-server.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.rke2-first-server.instances_public_ip[0], "sslip.io"]) + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.rke2_first_server.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.rke2_first_server.instances_public_ip[0], "sslip.io"]) } module "rancher_install" { source = "../../../../modules/rancher" - dependency = [null_resource.wait-k8s-services-startup] - kubeconfig_file = local.kc_file + dependency = [null_resource.wait_k8s_services_startup] + kubeconfig_file = local_file.kube_config_yaml.filename rancher_hostname = local.rancher_hostname rancher_bootstrap_password = var.rancher_password rancher_password = var.rancher_password diff --git a/recipes/upstream/aws/rke2/outputs.tf b/recipes/upstream/aws/rke2/outputs.tf index 34a6f90d..34d05284 100644 --- a/recipes/upstream/aws/rke2/outputs.tf +++ b/recipes/upstream/aws/rke2/outputs.tf @@ -1,11 +1,11 @@ # Uncomment for debugging purposes #output "rke2_first_server_config_file" { -# value = nonsensitive(module.rke2-first.rke2_user_data) +# value = nonsensitive(module.rke2_first.rke2_user_data) #} # Uncomment for debugging purposes #output "rke2_additional_servers_config_file" { -# value = nonsensitive(module.rke2-additional.rke2_user_data) +# value = nonsensitive(module.rke2_additional.rke2_user_data) #} output "rancher_url" { From 6670f1e85d4b609f64760083752698ac7fd145fb Mon Sep 17 00:00:00 2001 From: Giovanni Lo Vecchio Date: Tue, 30 Jul 2024 08:28:59 +0200 Subject: [PATCH 35/35] Rebase Reviewed tests for AWS EC2 and AWS EC2 x RKE x Rancher Rebase Reviewed tests for AWS EC2 and AWS EC2 x RKE2 x Rancher Fixed code in path recipes/rke/split-roles/aws - Added Rancher deployment Fixed AWS x RKE2 instances count Rewrote AWS EC2 x K3S recipe Reviewed tests for AWS EC2 x RKE (split-roles) Fixed AWS x K3S README.md file Fixed AWS x K3S README.md file Fixed copy/paste issue Rebase Fixed all the AWS recipes Fixed RKE split-roles recipe One-destroy command - GKE x RKE,RKE2,K3s Reviewed tests for AWS EC2 and AWS EC2 x RKE x Rancher Fixed AWS x RKE2 instances count Reviewed tests for AWS EC2 x RKE (split-roles) Fixed AWS x K3S README.md file Fixed copy/paste issue Fixed RKE split-roles recipe Revised standalone RKE module for AWS Fixed recipes/standalone/aws/rke/README.md file --- modules/infra/aws/README.md | 102 +++++--- modules/infra/aws/ec2/README.md | 217 ++++++++++++++++ modules/infra/aws/{ => ec2}/data.tf | 2 + modules/infra/aws/{ => ec2}/docs.md | 33 +-- modules/infra/aws/{ => ec2}/main.tf | 75 +++++- modules/infra/aws/ec2/outputs.tf | 30 +++ modules/infra/aws/{ => ec2}/variables.tf | 121 ++++----- modules/infra/aws/{ => ec2}/versions.tf | 0 modules/infra/aws/outputs.tf | 42 --- modules/infra/aws/provider.tf | 5 - recipes/rke/split-roles/aws/README.md | 35 +++ recipes/rke/split-roles/aws/docs.md | 90 ++++--- recipes/rke/split-roles/aws/main.tf | 144 ++++++----- recipes/rke/split-roles/aws/outputs.tf | 33 ++- recipes/rke/split-roles/aws/provider.tf | 36 +++ .../split-roles/aws/terraform.tfvars.example | 111 ++++++++ recipes/rke/split-roles/aws/variables.tf | 239 +++++++----------- recipes/standalone/aws/rke/README.md | 93 ++----- recipes/standalone/aws/rke/docs.md | 51 ++-- recipes/standalone/aws/rke/main.tf | 75 ++++-- recipes/standalone/aws/rke/outputs.tf | 16 +- recipes/standalone/aws/rke/provider.tf | 36 +++ .../aws/rke/terraform.tfvars.example | 115 ++++++--- recipes/standalone/aws/rke/variables.tf | 141 +++++------ recipes/upstream/aws/k3s/README.md | 83 ++---- recipes/upstream/aws/k3s/docs.md | 66 ++--- recipes/upstream/aws/k3s/main.tf | 165 +++++++----- recipes/upstream/aws/k3s/outputs.tf | 26 +- recipes/upstream/aws/k3s/provider.tf | 30 ++- .../upstream/aws/k3s/terraform.tfvars.example | 123 ++++++--- recipes/upstream/aws/k3s/variables.tf | 182 +++++++------ recipes/upstream/aws/rke/README.md | 95 ++----- recipes/upstream/aws/rke/docs.md | 54 ++-- recipes/upstream/aws/rke/main.tf | 66 ++--- recipes/upstream/aws/rke/outputs.tf | 22 +- recipes/upstream/aws/rke/provider.tf | 36 +++ .../upstream/aws/rke/terraform.tfvars.example | 115 ++++++--- recipes/upstream/aws/rke/variables.tf | 188 +++++++++----- recipes/upstream/aws/rke2/README.md | 81 ++---- recipes/upstream/aws/rke2/docs.md | 58 +++-- recipes/upstream/aws/rke2/main.tf | 127 ++++++---- recipes/upstream/aws/rke2/outputs.tf | 30 +-- recipes/upstream/aws/rke2/provider.tf | 30 ++- .../aws/rke2/terraform.tfvars.example | 115 ++++++--- recipes/upstream/aws/rke2/variables.tf | 158 +++++------- tests/modules/infra/aws/README.md | 1 - tests/modules/infra/aws/ec2/README.md | 32 +++ tests/modules/infra/aws/ec2/docs.md | 40 +++ tests/modules/infra/aws/ec2/main.tf | 7 + tests/modules/infra/aws/ec2/outputs.tf | 7 + tests/modules/infra/aws/ec2/provider.tf | 36 +++ .../infra/aws/ec2/terraform.tfvars.example | 20 ++ tests/modules/infra/aws/ec2/user_data.tmpl | 9 + tests/modules/infra/aws/ec2/variables.tf | 19 ++ tests/modules/infra/aws/main.tf | 29 --- tests/recipes/rke/split-roles/aws/README.md | 31 +++ tests/recipes/rke/split-roles/aws/docs.md | 44 ++++ tests/recipes/rke/split-roles/aws/main.tf | 30 +-- tests/recipes/rke/split-roles/aws/outputs.tf | 19 ++ tests/recipes/rke/split-roles/aws/provider.tf | 36 +++ .../recipes/rke/split-roles/aws/variables.tf | 24 +- tests/recipes/upstream/aws/rke/README.md | 31 +++ tests/recipes/upstream/aws/rke/docs.md | 57 +++++ tests/recipes/upstream/aws/rke/main.tf | 79 +++++- tests/recipes/upstream/aws/rke/outputs.tf | 17 ++ tests/recipes/upstream/aws/rke/provider.tf | 36 +++ .../upstream/aws/rke/terraform.tfvars.example | 96 +++++++ tests/recipes/upstream/aws/rke/user_data.tmpl | 9 + tests/recipes/upstream/aws/rke/variables.tf | 59 ++++- 69 files changed, 2867 insertions(+), 1593 deletions(-) create mode 100644 modules/infra/aws/ec2/README.md rename modules/infra/aws/{ => ec2}/data.tf (88%) rename modules/infra/aws/{ => ec2}/docs.md (64%) rename modules/infra/aws/{ => ec2}/main.tf (57%) create mode 100644 modules/infra/aws/ec2/outputs.tf rename modules/infra/aws/{ => ec2}/variables.tf (75%) rename modules/infra/aws/{ => ec2}/versions.tf (100%) delete mode 100644 modules/infra/aws/outputs.tf delete mode 100644 modules/infra/aws/provider.tf create mode 100644 recipes/rke/split-roles/aws/provider.tf create mode 100644 recipes/rke/split-roles/aws/terraform.tfvars.example create mode 100644 recipes/standalone/aws/rke/provider.tf create mode 100644 recipes/upstream/aws/rke/provider.tf delete mode 100644 tests/modules/infra/aws/README.md create mode 100644 tests/modules/infra/aws/ec2/README.md create mode 100644 tests/modules/infra/aws/ec2/docs.md create mode 100644 tests/modules/infra/aws/ec2/main.tf create mode 100644 tests/modules/infra/aws/ec2/outputs.tf create mode 100644 tests/modules/infra/aws/ec2/provider.tf create mode 100644 tests/modules/infra/aws/ec2/terraform.tfvars.example create mode 100644 tests/modules/infra/aws/ec2/user_data.tmpl create mode 100644 tests/modules/infra/aws/ec2/variables.tf delete mode 100644 tests/modules/infra/aws/main.tf create mode 100644 tests/recipes/rke/split-roles/aws/README.md create mode 100644 tests/recipes/rke/split-roles/aws/docs.md create mode 100644 tests/recipes/rke/split-roles/aws/outputs.tf create mode 100644 tests/recipes/rke/split-roles/aws/provider.tf create mode 100644 tests/recipes/upstream/aws/rke/README.md create mode 100644 tests/recipes/upstream/aws/rke/docs.md create mode 100644 tests/recipes/upstream/aws/rke/outputs.tf create mode 100644 tests/recipes/upstream/aws/rke/provider.tf create mode 100644 tests/recipes/upstream/aws/rke/terraform.tfvars.example create mode 100644 tests/recipes/upstream/aws/rke/user_data.tmpl diff --git a/modules/infra/aws/README.md b/modules/infra/aws/README.md index 768097e3..58da987e 100644 --- a/modules/infra/aws/README.md +++ b/modules/infra/aws/README.md @@ -1,49 +1,79 @@ -# Terraform | AWS Infrastructure +# Terraform | AWS - Preparatory steps -Terraform module to provide AWS nodes prepared for creating a kubernetes cluster. +In order for Terraform to run operations on your behalf, you must [install and configure the AWS CLI tool](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html#getting-started-install-instructions). -Basic infrastructure options are provided to be coupled with other modules for example environments. +## Example -Documentation can be found [here](./docs.md). +#### macOS installation and setup for all users -## Examples +```console +curl "https://awscli.amazonaws.com/AWSCLIV2.pkg" -o "AWSCLIV2.pkg" +``` + +```console +sudo installer -pkg AWSCLIV2.pkg -target / +``` + +#### Verify installation + +```console +$ which aws +/usr/local/bin/aws +``` + +```console +$ aws --version +aws-cli/2.13.33 Python/3.11.6 Darwin/23.1.0 exe/x86_64 prompt/off +``` -#### Launch a single instance, create a keypair +#### Setup credentials and configuration -```terraform -module "upstream_cluster" { - source = "git::https://github.com/rancherlabs/tf-rancher-up.git//modules/infra/aws" - aws_region = "us-east-1" - prefix = "example-rancher" - instance_count = 1 - create_ssh_key_pair = true - user_data = | - echo "hello world" -} +##### Option 1 - AWS CLI + +```console +export AWS_ACCESS_KEY_ID= +export AWS_SECRET_ACCESS_KEY= +export AWS_DEFAULT_REGION= +export AWS_DEFAULT_OUTPUT=text ``` -#### Provide an existing SSH key and Security Group +##### Option 2 - Manually creating credential files -```terraform -module "upstream_cluster" { - source = "git::https://github.com/rancherlabs/tf-rancher-up.git//modules/infra/aws" - aws_region = "us-east-1" - prefix = "example-rancher" - instance_count = 1 - ssh_key_pair_name = "rancher-ssh" - instance_security_group = "sg-xxxxx" -} +```console +mkdir ~/.aws ``` -#### Provide an existing VPC and Subnet +```console +cd ~/.aws +``` + +```console +cat > credentials << EOL +[default] +aws_access_key_id = +aws_secret_access_key = +EOL +``` + +```console +cat > config << EOL +[default] +region = +output = text +EOL +``` + +##### Option 3 - IAM Identity Center credentials + +```console +aws configure sso +``` + +```console +export AWS_PROFILE= +``` -```terraform -module "upstream_cluster" { - source = "git::https://github.com/rancherlabs/tf-rancher-up.git//modules/infra/aws" - aws_region = "us-east-1" - prefix = "example-rancher" - instance_count = 1 - vpc_id = "vpc-xxxxx" - subnet_id = "subnet-xxxxxx" -} +##### Verify credentials +```console +aws sts get-caller-identity ``` diff --git a/modules/infra/aws/ec2/README.md b/modules/infra/aws/ec2/README.md new file mode 100644 index 00000000..b383150d --- /dev/null +++ b/modules/infra/aws/ec2/README.md @@ -0,0 +1,217 @@ +# Terraform | AWS EC2 + +Terraform modules to provide VM instances - AWS EC2. + +Documentation can be found [here](./docs.md). + +## Example + +#### Launch three identical VM instances + +```terraform +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + region = var.aws_region +} + +variable "prefix" {} + +variable "aws_region" { + type = string + description = "AWS region used for all resources" + default = "us-east-1" + + validation { + condition = contains([ + "us-east-2", + "us-east-1", + "us-west-1", + "us-west-2", + "af-south-1", + "ap-east-1", + "ap-south-2", + "ap-southeast-3", + "ap-southeast-4", + "ap-south-1", + "ap-northeast-3", + "ap-northeast-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ca-central-1", + "ca-west-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-south-1", + "eu-west-3", + "eu-south-2", + "eu-north-1", + "eu-central-2", + "il-central-1", + "me-south-1", + "me-central-1", + "sa-east-1", + ], var.aws_region) + error_message = "Invalid Region specified!" + } +} + +variable "ssh_private_key_path" { + description = "The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if `create_ssh_key_pair = false` this variable must be set" + default = null +} + +variable "vpc_id" {} + +variable "subnet_id" {} + +variable "instance_count" {} + +variable "ssh_username" {} + +module "aws-ec2-upstream-cluster" { + source = "git::https://github.com/rancher/tf-rancher-up.git//modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + vpc_id = var.vpc_id + subnet_id = var.subnet_id + instance_count = var.instance_count + ssh_username = var.ssh_username +} + +output "instances_public_ip" { + value = module.aws-ec2-upstream-cluster.instances_public_ip +} + +output "instances_private_ip" { + value = module.aws-ec2-upstream-cluster.instances_private_ip +} +``` + +#### Launch two identical VM instances and a dedicated new VPC/Subnet + +```terraform +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + region = var.aws_region +} + +variable "prefix" {} + +variable "aws_region" { + type = string + description = "AWS region used for all resources" + default = "us-east-1" + + validation { + condition = contains([ + "us-east-2", + "us-east-1", + "us-west-1", + "us-west-2", + "af-south-1", + "ap-east-1", + "ap-south-2", + "ap-southeast-3", + "ap-southeast-4", + "ap-south-1", + "ap-northeast-3", + "ap-northeast-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ca-central-1", + "ca-west-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-south-1", + "eu-west-3", + "eu-south-2", + "eu-north-1", + "eu-central-2", + "il-central-1", + "me-south-1", + "me-central-1", + "sa-east-1", + ], var.aws_region) + error_message = "Invalid Region specified!" + } +} + +variable "ssh_private_key_path" { + description = "The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if `create_ssh_key_pair = false` this variable must be set" + default = null +} + +variable "instance_count" {} + +variable "ssh_username" {} + +module "aws-ec2-upstream-cluster" { + source = "git::https://github.com/rancher/tf-rancher-up.git//modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + instance_count = var.instance_count + ssh_username = var.ssh_username +} + +output "instances_public_ip" { + value = module.aws-ec2-upstream-cluster.instances_public_ip +} + +output "instances_private_ip" { + value = module.aws-ec2-upstream-cluster.instances_private_ip +} +``` diff --git a/modules/infra/aws/data.tf b/modules/infra/aws/ec2/data.tf similarity index 88% rename from modules/infra/aws/data.tf rename to modules/infra/aws/ec2/data.tf index 03859e23..ce8eb122 100644 --- a/modules/infra/aws/data.tf +++ b/modules/infra/aws/ec2/data.tf @@ -1,3 +1,5 @@ +data "aws_availability_zones" "available" {} + # TODO: Make the Ubuntu OS version configurable # TODO: Add support for ARM architecture data "aws_ami" "ubuntu" { diff --git a/modules/infra/aws/docs.md b/modules/infra/aws/ec2/docs.md similarity index 64% rename from modules/infra/aws/docs.md rename to modules/infra/aws/ec2/docs.md index 9a4a1c4a..d31ae970 100644 --- a/modules/infra/aws/docs.md +++ b/modules/infra/aws/ec2/docs.md @@ -21,50 +21,53 @@ No modules. | Name | Type | |------|------| | [aws_instance.instance](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/instance) | resource | +| [aws_internet_gateway.internet_gateway](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/internet_gateway) | resource | | [aws_key_pair.key_pair](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource | +| [aws_route_table.route_table](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table) | resource | +| [aws_route_table_association.rt_association](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/route_table_association) | resource | | [aws_security_group.sg_allowall](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_subnet.subnet](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/subnet) | resource | +| [aws_vpc.vpc](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc) | resource | | [local_file.private_key_pem](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [local_file.public_key_pem](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | | [tls_private_key.ssh_private_key](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | | [aws_ami.ubuntu](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [aws\_access\_key](#input\_aws\_access\_key) | AWS access key used to create infrastructure | `string` | `null` | no | | [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | -| [aws\_secret\_key](#input\_aws\_secret\_key) | AWS secret key used to create AWS infrastructure | `string` | `null` | no | | [bastion\_host](#input\_bastion\_host) | Bastion host configuration to access the instances |
object({
address = string
user = string
ssh_key = string
ssh_key_path = string
})
| `null` | no | | [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `true` | no | -| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `false` | no | +| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `true` | no | +| [create\_vpc](#input\_create\_vpc) | Specify whether VPC / Subnet should be created for the instances | `bool` | `true` | no | | [iam\_instance\_profile](#input\_iam\_instance\_profile) | Specify IAM Instance Profile to assign to the instances/nodes | `string` | `null` | no | | [instance\_count](#input\_instance\_count) | Number of EC2 instances to create | `number` | `3` | no | | [instance\_disk\_size](#input\_instance\_disk\_size) | Specify root disk size (GB) | `string` | `"80"` | no | -| [instance\_security\_group](#input\_instance\_security\_group) | Provide a pre-existing security group ID | `string` | `null` | no | +| [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | Provide a pre-existing security group ID | `string` | `null` | no | | [instance\_type](#input\_instance\_type) | Instance type used for all EC2 instances | `string` | `"t3.medium"` | no | | [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | `"rancher-terraform"` | no | | [spot\_instances](#input\_spot\_instances) | Use spot instances | `bool` | `false` | no | -| [ssh\_key](#input\_ssh\_key) | Contents of the private key to connect to the instances. | `string` | `null` | no | -| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | Specify the SSH key name to use (that's already present in AWS) | `string` | `null` | no | -| [ssh\_key\_pair\_path](#input\_ssh\_key\_pair\_path) | Path to the SSH private key used as the key pair (that's already present in AWS) | `string` | `null` | no | -| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | Path to write the generated SSH private key | `string` | `null` | no | +| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | If you want to use an existing key pair, specify its name | `string` | `null` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform) | `string` | `null` | no | +| [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform) | `any` | `null` | no | | [ssh\_username](#input\_ssh\_username) | Username used for SSH with sudo access | `string` | `"ubuntu"` | no | | [subnet\_id](#input\_subnet\_id) | VPC Subnet ID to create the instance(s) in | `string` | `null` | no | -| [tag\_begin](#input\_tag\_begin) | When module is being called mode than once, begin tagging from this number | `number` | `1` | no | +| [tag\_begin](#input\_tag\_begin) | When module is being called more than once, begin tagging from this number | `number` | `1` | no | | [tags](#input\_tags) | User-provided tags for the resources | `map(string)` | `{}` | no | | [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | | [vpc\_id](#input\_vpc\_id) | VPC ID to create the instance(s) in | `string` | `null` | no | +| [vpc\_ip\_cidr\_range](#input\_vpc\_ip\_cidr\_range) | Range of private IPs available for the AWS VPC | `string` | `"10.0.0.0/16"` | no | ## Outputs | Name | Description | |------|-------------| -| [dependency](#output\_dependency) | n/a | | [instance\_ips](#output\_instance\_ips) | n/a | | [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | | [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | -| [node\_username](#output\_node\_username) | n/a | -| [sg-id](#output\_sg-id) | n/a | -| [ssh\_key](#output\_ssh\_key) | n/a | -| [ssh\_key\_pair\_name](#output\_ssh\_key\_pair\_name) | n/a | -| [ssh\_key\_path](#output\_ssh\_key\_path) | n/a | +| [security\_group](#output\_security\_group) | n/a | +| [subnet](#output\_subnet) | n/a | +| [vpc](#output\_vpc) | n/a | diff --git a/modules/infra/aws/main.tf b/modules/infra/aws/ec2/main.tf similarity index 57% rename from modules/infra/aws/main.tf rename to modules/infra/aws/ec2/main.tf index 724f3814..5f5b6a6c 100644 --- a/modules/infra/aws/main.tf +++ b/modules/infra/aws/ec2/main.tf @@ -1,6 +1,6 @@ -# Condition to use an existing keypair if a keypair name and file is also provided locals { - new_key_pair_path = var.ssh_private_key_path != null ? var.ssh_private_key_path : "${path.cwd}/${var.prefix}-ssh_private_key.pem" + private_ssh_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path + public_ssh_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path } resource "tls_private_key" "ssh_private_key" { @@ -10,21 +10,76 @@ resource "tls_private_key" "ssh_private_key" { resource "local_file" "private_key_pem" { count = var.create_ssh_key_pair ? 1 : 0 - filename = local.new_key_pair_path + filename = local.private_ssh_key_path content = tls_private_key.ssh_private_key[0].private_key_openssh file_permission = "0600" } +resource "local_file" "public_key_pem" { + count = var.create_ssh_key_pair ? 1 : 0 + filename = local.public_ssh_key_path + content = tls_private_key.ssh_private_key[0].public_key_openssh + file_permission = "0600" +} + resource "aws_key_pair" "key_pair" { count = var.create_ssh_key_pair ? 1 : 0 key_name = "tf-rancher-up-${var.prefix}" public_key = tls_private_key.ssh_private_key[0].public_key_openssh } -resource "aws_security_group" "sg_allowall" { - count = var.create_security_group ? 1 : 0 - vpc_id = var.vpc_id +resource "aws_vpc" "vpc" { + count = var.create_vpc ? 1 : 0 + cidr_block = var.vpc_ip_cidr_range + + tags = { + Name = "${var.prefix}-vpc" + } +} + +resource "aws_subnet" "subnet" { + depends_on = [resource.aws_route_table.route_table[0]] + + count = var.create_vpc ? 1 : 0 + availability_zone = data.aws_availability_zones.available.names[count.index] + # cidr_block = var.subnet_ip_cidr_range[count.index] + cidr_block = "10.0.${count.index}.0/24" + map_public_ip_on_launch = true + vpc_id = var.vpc_id == null ? aws_vpc.vpc[0].id : var.vpc_id + + tags = { + Name = "${var.prefix}-subnet" + } +} + +resource "aws_internet_gateway" "internet_gateway" { + count = var.create_vpc ? 1 : 0 + vpc_id = aws_vpc.vpc[0].id + + tags = { + Name = "${var.prefix}-ig" + } +} + +resource "aws_route_table" "route_table" { + count = var.create_vpc ? 1 : 0 + vpc_id = aws_vpc.vpc[0].id + route { + cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.internet_gateway[0].id + } +} + +resource "aws_route_table_association" "rt_association" { + count = var.create_vpc ? 1 : 0 + subnet_id = var.subnet_id == null ? "${aws_subnet.subnet.*.id[0]}" : var.subnet_id + route_table_id = aws_route_table.route_table[0].id +} + +resource "aws_security_group" "sg_allowall" { + count = var.create_security_group ? 1 : 0 + vpc_id = aws_vpc.vpc[0].id name = "${var.prefix}-allow-nodes" description = "Allow traffic for nodes in the cluster" @@ -74,13 +129,15 @@ resource "aws_security_group" "sg_allowall" { } resource "aws_instance" "instance" { + depends_on = [resource.aws_route_table_association.rt_association[0]] + count = var.instance_count ami = data.aws_ami.ubuntu.id instance_type = var.instance_type - subnet_id = var.subnet_id + subnet_id = var.subnet_id == null ? "${aws_subnet.subnet.*.id[0]}" : var.subnet_id key_name = var.create_ssh_key_pair ? aws_key_pair.key_pair[0].key_name : var.ssh_key_pair_name - vpc_security_group_ids = [var.create_security_group ? aws_security_group.sg_allowall[0].id : var.instance_security_group] + vpc_security_group_ids = [var.create_security_group == true ? aws_security_group.sg_allowall[0].id : var.instance_security_group_id] user_data = var.user_data root_block_device { @@ -102,7 +159,7 @@ resource "aws_instance" "instance" { type = "ssh" host = var.bastion_host == null ? self.public_ip : self.private_ip user = var.ssh_username - private_key = var.create_ssh_key_pair ? tls_private_key.ssh_private_key[0].private_key_pem : (var.ssh_key_pair_path != null ? file(pathexpand(var.ssh_key_pair_path)) : var.ssh_key) + private_key = var.create_ssh_key_pair ? tls_private_key.ssh_private_key[0].private_key_openssh : file("${local.private_ssh_key_path}") bastion_host = var.bastion_host != null ? var.bastion_host.address : null bastion_user = var.bastion_host != null ? var.bastion_host.user : null diff --git a/modules/infra/aws/ec2/outputs.tf b/modules/infra/aws/ec2/outputs.tf new file mode 100644 index 00000000..32aebcb7 --- /dev/null +++ b/modules/infra/aws/ec2/outputs.tf @@ -0,0 +1,30 @@ +output "instances_public_ip" { + value = aws_instance.instance.*.public_ip +} + +output "instances_private_ip" { + value = aws_instance.instance.*.private_ip +} + +output "instance_ips" { + value = [ + for i in aws_instance.instance[*] : + { + public_ip = i.public_ip + private_ip = i.private_ip + private_dns = i.private_dns + } + ] +} + +output "vpc" { + value = aws_vpc.vpc +} + +output "subnet" { + value = aws_subnet.subnet +} + +output "security_group" { + value = aws_security_group.sg_allowall +} diff --git a/modules/infra/aws/variables.tf b/modules/infra/aws/ec2/variables.tf similarity index 75% rename from modules/infra/aws/variables.tf rename to modules/infra/aws/ec2/variables.tf index ea4dc590..5cfb164a 100644 --- a/modules/infra/aws/variables.tf +++ b/modules/infra/aws/ec2/variables.tf @@ -1,13 +1,7 @@ -variable "aws_access_key" { - type = string - description = "AWS access key used to create infrastructure" - default = null -} - -variable "aws_secret_key" { +variable "prefix" { type = string - description = "AWS secret key used to create AWS infrastructure" - default = null + description = "Prefix added to names of all resources" + default = "rancher-terraform" } variable "aws_region" { @@ -51,37 +45,39 @@ variable "aws_region" { } } -variable "prefix" { - type = string - description = "Prefix added to names of all resources" - default = "rancher-terraform" +variable "create_ssh_key_pair" { + type = bool + description = "Specify if a new SSH key pair needs to be created for the instances" + default = true } -variable "tag_begin" { - type = number - description = "When module is being called mode than once, begin tagging from this number" - default = 1 +variable "ssh_key_pair_name" { + type = string + description = "If you want to use an existing key pair, specify its name" + default = null } -variable "instance_type" { +variable "ssh_private_key_path" { type = string - description = "Instance type used for all EC2 instances" - default = "t3.medium" - nullable = false + description = "The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform)" + default = null } -variable "instance_disk_size" { - type = string - description = "Specify root disk size (GB)" - default = "80" - nullable = false +variable "ssh_public_key_path" { + description = "The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform)" + default = null } -variable "instance_count" { - type = number - description = "Number of EC2 instances to create" - default = 3 - nullable = false +variable "create_vpc" { + type = bool + description = "Specify whether VPC / Subnet should be created for the instances" + default = true +} + +variable "vpc_ip_cidr_range" { + type = string + default = "10.0.0.0/16" + description = "Range of private IPs available for the AWS VPC" } variable "vpc_id" { @@ -96,50 +92,42 @@ variable "subnet_id" { default = null } -variable "create_ssh_key_pair" { +variable "create_security_group" { type = bool - description = "Specify if a new SSH key pair needs to be created for the instances" - default = false + description = "Should create the security group associated with the instance(s)" + default = true nullable = false } -variable "ssh_key_pair_name" { - type = string - description = "Specify the SSH key name to use (that's already present in AWS)" - default = null +variable "instance_count" { + type = number + description = "Number of EC2 instances to create" + default = 3 + nullable = false } -variable "ssh_key_pair_path" { +variable "instance_type" { type = string - description = "Path to the SSH private key used as the key pair (that's already present in AWS)" - default = null + description = "Instance type used for all EC2 instances" + default = "t3.medium" + nullable = false } -# Used in CI/CD as we don't store the SSH key local. It would read from a secret and -# the contents are passed on directly. Used when create_ssh_key_pair is false and -# ssh_key_pair_name is null -variable "ssh_key" { - type = string - description = "Contents of the private key to connect to the instances." - default = null - sensitive = true +variable "spot_instances" { + type = bool + description = "Use spot instances" + default = false + nullable = false } -variable "ssh_private_key_path" { +variable "instance_disk_size" { type = string - description = "Path to write the generated SSH private key" - default = null -} - -variable "create_security_group" { - type = bool - description = "Should create the security group associated with the instance(s)" - default = true + description = "Specify root disk size (GB)" + default = "80" nullable = false } -# TODO: Add a check based on above value -variable "instance_security_group" { +variable "instance_security_group_id" { type = string description = "Provide a pre-existing security group ID" default = null @@ -152,13 +140,6 @@ variable "ssh_username" { nullable = false } -variable "spot_instances" { - type = bool - description = "Use spot instances" - default = false - nullable = false -} - variable "user_data" { description = "User data content for EC2 instance(s)" default = null @@ -181,6 +162,12 @@ variable "iam_instance_profile" { default = null } +variable "tag_begin" { + type = number + description = "When module is being called more than once, begin tagging from this number" + default = 1 +} + variable "tags" { description = "User-provided tags for the resources" type = map(string) diff --git a/modules/infra/aws/versions.tf b/modules/infra/aws/ec2/versions.tf similarity index 100% rename from modules/infra/aws/versions.tf rename to modules/infra/aws/ec2/versions.tf diff --git a/modules/infra/aws/outputs.tf b/modules/infra/aws/outputs.tf deleted file mode 100644 index e638dc76..00000000 --- a/modules/infra/aws/outputs.tf +++ /dev/null @@ -1,42 +0,0 @@ -output "dependency" { - value = var.instance_count != 0 ? aws_instance.instance[0].arn : null -} - -output "instances_public_ip" { - value = aws_instance.instance.*.public_ip -} - -output "instances_private_ip" { - value = aws_instance.instance.*.private_ip -} - -output "instance_ips" { - value = [ - for i in aws_instance.instance[*] : - { - public_ip = i.public_ip - private_ip = i.private_ip - private_dns = i.private_dns - } - ] -} - -output "node_username" { - value = var.ssh_username -} - -output "ssh_key" { - value = var.create_ssh_key_pair ? tls_private_key.ssh_private_key[0].private_key_openssh : (var.ssh_key_pair_path != null ? file(pathexpand(var.ssh_key_pair_path)) : var.ssh_key) -} - -output "ssh_key_path" { - value = var.create_ssh_key_pair ? local_file.private_key_pem[0].filename : var.ssh_key_pair_path -} - -output "ssh_key_pair_name" { - value = var.create_ssh_key_pair ? aws_key_pair.key_pair[0].key_name : var.ssh_key_pair_name -} - -output "sg-id" { - value = var.create_security_group ? aws_security_group.sg_allowall[0].id : var.instance_security_group -} \ No newline at end of file diff --git a/modules/infra/aws/provider.tf b/modules/infra/aws/provider.tf deleted file mode 100644 index f14e1d72..00000000 --- a/modules/infra/aws/provider.tf +++ /dev/null @@ -1,5 +0,0 @@ -provider "aws" { - access_key = var.aws_access_key != null ? var.aws_access_key : null - secret_key = var.aws_secret_key != null ? var.aws_secret_key : null - region = var.aws_region -} \ No newline at end of file diff --git a/recipes/rke/split-roles/aws/README.md b/recipes/rke/split-roles/aws/README.md index ff4456c4..6de6add0 100644 --- a/recipes/rke/split-roles/aws/README.md +++ b/recipes/rke/split-roles/aws/README.md @@ -3,3 +3,38 @@ This module helps to create an RKE cluster with split roles (master, worker) on AWS infrastructure. Documentation can be found [here](./docs.md). + +## Usage + +```bash +git clone https://github.com/rancherlabs/tf-rancher-up.git +cd recipes/rke/split-roles/aws +``` + +- Copy `./terraform.tfvars.exmaple` to `./terraform.tfvars` +- Edit `./terraform.tfvars` + - Update the required variables: + - `prefix` to give the resources an identifiable name (eg, your initials or first name) + - `aws_region` to suit your region + - `server_nodes_count` to specify the number of Master nodes to create + - `worker_nodes_count` to specify the number of Worker nodes to create + - `ssh_username` to specify the user used to create the VMs (default "ubuntu") + - `rancher_hostname` in order to reach the Rancher console via DNS name + - `rancher_password` to configure the initial Admin password (the password must be at least 12 characters) +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../modules/infra/aws/README.md). + +**NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** + +```bash +terraform init -upgrade && terraform apply -auto-approve +``` + +- Destroy the resources when finished +```bash +terraform destroy -target=module.rancher_install -auto-approve && terraform destroy -auto-approve +``` + +See full argument list for each module in use: + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - RKE: https://github.com/rancher/tf-rancher-up/tree/main/modules/distribution/rke + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/recipes/rke/split-roles/aws/docs.md b/recipes/rke/split-roles/aws/docs.md index 8d44c5e5..8119301a 100644 --- a/recipes/rke/split-roles/aws/docs.md +++ b/recipes/rke/split-roles/aws/docs.md @@ -1,66 +1,72 @@ ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | +| [ssh](#requirement\_ssh) | 2.6.0 | ## Providers -No providers. +| Name | Version | +|------|---------| +| [null](#provider\_null) | n/a | ## Modules | Name | Source | Version | |------|--------|---------| -| [master\_nodes](#module\_master\_nodes) | ../../../../modules/infra/aws | n/a | +| [aws-ec2-upstream-master-nodes](#module\_aws-ec2-upstream-master-nodes) | ../../../../modules/infra/aws/ec2 | n/a | +| [aws-ec2-upstream-worker-nodes](#module\_aws-ec2-upstream-worker-nodes) | ../../../../modules/infra/aws/ec2 | n/a | +| [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | | [rke](#module\_rke) | ../../../../modules/distribution/rke | n/a | -| [worker\_nodes](#module\_worker\_nodes) | ../../../../modules/infra/aws | n/a | ## Resources -No resources. +| Name | Type | +|------|------| +| [null_resource.wait-docker-startup-m](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.wait-docker-startup-w](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.wait-k8s-services-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [aws\_access\_key](#input\_aws\_access\_key) | Enter your AWS access key | `string` | n/a | yes | -| [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | n/a | yes | -| [aws\_secret\_key](#input\_aws\_secret\_key) | Enter your AWS secret key | `string` | n/a | yes | -| [bastion\_host](#input\_bastion\_host) | Bastion host configuration to access the RKE nodes |
object({
address = string
user = string
ssh_key_path = string
ssh_key = string
})
| `null` | no | -| [cloud\_provider](#input\_cloud\_provider) | Specify the cloud provider name | `string` | `null` | no | -| [create\_kubeconfig\_file](#input\_create\_kubeconfig\_file) | Boolean flag to generate a kubeconfig file (mostly used for dev only) | `bool` | `true` | no | -| [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `true` | no | -| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `false` | no | -| [dependency](#input\_dependency) | An optional variable to add a dependency from another resource (not used) | `any` | `null` | no | -| [docker\_version](#input\_docker\_version) | Docker version to install on nodes | `string` | `"23.0.6"` | no | -| [install\_docker](#input\_install\_docker) | Should install docker while creating the instance | `bool` | `true` | no | -| [instance\_security\_group](#input\_instance\_security\_group) | Provide a pre-existing security group ID | `string` | `null` | no | -| [instance\_security\_group\_name](#input\_instance\_security\_group\_name) | Provide a pre-existing security group name | `string` | `null` | no | -| [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | -| [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | -| [kubernetes\_version](#input\_kubernetes\_version) | Kubernetes version to use for the RKE cluster | `string` | `null` | no | -| [master\_nodes\_count](#input\_master\_nodes\_count) | Number of master nodes to create | `number` | `1` | no | -| [master\_nodes\_iam\_instance\_profile](#input\_master\_nodes\_iam\_instance\_profile) | Specify IAM instance profile to attach to master nodes | `string` | `null` | no | -| [master\_nodes\_instance\_disk\_size](#input\_master\_nodes\_instance\_disk\_size) | Disk size used for all master nodes (in GB) | `string` | `"80"` | no | -| [master\_nodes\_instance\_type](#input\_master\_nodes\_instance\_type) | Instance type used for all master nodes | `string` | `"t3.medium"` | no | -| [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | n/a | yes | -| [ssh\_key](#input\_ssh\_key) | Contents of the private key to connect to the instances. | `string` | `null` | no | -| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | Specify the SSH key name to use (that's already present in AWS) | `string` | `null` | no | -| [ssh\_key\_pair\_path](#input\_ssh\_key\_pair\_path) | Path to the SSH private key used as the key pair (that's already present in AWS) | `string` | `null` | no | -| [ssh\_username](#input\_ssh\_username) | Username used for SSH with sudo access | `string` | `"ubuntu"` | no | -| [subnet\_id](#input\_subnet\_id) | VPC Subnet ID to create the instance(s) in | `string` | `null` | no | -| [tags](#input\_tags) | User-provided tags for the resources | `map(string)` | `{}` | no | -| [vpc\_id](#input\_vpc\_id) | VPC ID to create the instance(s) in | `string` | `null` | no | -| [vpc\_zone](#input\_vpc\_zone) | VPC zone | `string` | `null` | no | -| [worker\_nodes\_count](#input\_worker\_nodes\_count) | Number of worker nodes to create | `number` | `1` | no | -| [worker\_nodes\_iam\_instance\_profile](#input\_worker\_nodes\_iam\_instance\_profile) | Specify IAM instance profile to attach to worker nodes | `string` | `null` | no | -| [worker\_nodes\_instance\_disk\_size](#input\_worker\_nodes\_instance\_disk\_size) | Disk size used for all worker nodes (in GB) | `string` | `"80"` | no | -| [worker\_nodes\_instance\_type](#input\_worker\_nodes\_instance\_type) | Instance type used for all worker nodes | `string` | `"t3.large"` | no | +| [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | +| [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | +| [create\_security\_group](#input\_create\_security\_group) | n/a | `any` | `null` | no | +| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | +| [create\_vpc](#input\_create\_vpc) | n/a | `any` | `null` | no | +| [docker\_version](#input\_docker\_version) | Docker version to install on nodes | `string` | `"20.10"` | no | +| [ingress\_provider](#input\_ingress\_provider) | Ingress controller provider | `string` | `"nginx"` | no | +| [install\_docker](#input\_install\_docker) | Install Docker while creating the instances | `bool` | `true` | no | +| [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | n/a | `any` | `null` | no | +| [prefix](#input\_prefix) | n/a | `any` | n/a | yes | +| [rancher\_hostname](#input\_rancher\_hostname) | n/a | `any` | n/a | yes | +| [rancher\_password](#input\_rancher\_password) | n/a | `string` | n/a | yes | +| [rancher\_version](#input\_rancher\_version) | Rancher version to install | `string` | `null` | no | +| [server\_nodes\_count](#input\_server\_nodes\_count) | The number of Server nodes | `number` | `3` | no | +| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | n/a | `any` | `null` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `any` | n/a | yes | +| [subnet\_id](#input\_subnet\_id) | n/a | `any` | `null` | no | +| [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | +| [vpc\_id](#input\_vpc\_id) | n/a | `any` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | Waiting time (in seconds) | `number` | `180` | no | +| [worker\_nodes\_count](#input\_worker\_nodes\_count) | n/a | `any` | n/a | yes | ## Outputs | Name | Description | |------|-------------| -| [credentials](#output\_credentials) | n/a | -| [dependency](#output\_dependency) | n/a | -| [kube\_config\_yaml](#output\_kube\_config\_yaml) | n/a | -| [kubeconfig\_file](#output\_kubeconfig\_file) | n/a | +| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | +| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | +| [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | +| [rancher\_url](#output\_rancher\_url) | Rancher URL | +| [security\_group](#output\_security\_group) | n/a | +| [subnet](#output\_subnet) | n/a | +| [vpc](#output\_vpc) | n/a | diff --git a/recipes/rke/split-roles/aws/main.tf b/recipes/rke/split-roles/aws/main.tf index f7ab3d8c..1b02bbf5 100644 --- a/recipes/rke/split-roles/aws/main.tf +++ b/recipes/rke/split-roles/aws/main.tf @@ -1,20 +1,21 @@ -module "master_nodes" { - source = "../../../../modules/infra/aws" +locals { + create_ssh_key_pair = var.create_ssh_key_pair == null ? false : true + ssh_key_pair_name = var.ssh_key_pair_name == null ? "tf-rancher-up-${var.prefix}" : var.ssh_key_pair_name + local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path + local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + create_vpc = var.create_vpc == null ? false : true + vpc_id = var.vpc_id == null ? module.aws-ec2-upstream-master-nodes.vpc[0].id : var.vpc_id + subnet_id = var.subnet_id == null ? module.aws-ec2-upstream-master-nodes.subnet[0].id : var.subnet_id + create_security_group = var.create_security_group == null ? false : true + instance_security_group_id = local.create_security_group == "true" ? null : module.aws-ec2-upstream-master-nodes.security_group[0].id +} - prefix = "${var.prefix}-m" - instance_count = var.master_nodes_count - instance_type = var.master_nodes_instance_type - instance_disk_size = var.master_nodes_instance_disk_size - create_ssh_key_pair = var.create_ssh_key_pair - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - ssh_key = var.ssh_key - ssh_username = var.ssh_username - aws_region = var.aws_region - create_security_group = var.create_security_group - instance_security_group = var.instance_security_group - subnet_id = var.subnet_id - bastion_host = var.bastion_host +module "aws-ec2-upstream-master-nodes" { + source = "../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + instance_count = var.server_nodes_count + ssh_username = var.ssh_username user_data = templatefile("${path.module}/user_data.tmpl", { install_docker = var.install_docker @@ -22,27 +23,23 @@ module "master_nodes" { docker_version = var.docker_version } ) - iam_instance_profile = var.master_nodes_iam_instance_profile != null ? var.master_nodes_iam_instance_profile : null - tags = var.tags } -module "worker_nodes" { - source = "../../../../modules/infra/aws" - - prefix = "${var.prefix}-w" - instance_count = var.worker_nodes_count - instance_type = var.worker_nodes_instance_type - instance_disk_size = var.worker_nodes_instance_disk_size - create_ssh_key_pair = var.create_ssh_key_pair - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - ssh_key = var.ssh_key - ssh_username = var.ssh_username - aws_region = var.aws_region - create_security_group = var.create_security_group - instance_security_group = var.instance_security_group - subnet_id = var.subnet_id - bastion_host = var.bastion_host +module "aws-ec2-upstream-worker-nodes" { + source = "../../../../modules/infra/aws/ec2" + prefix = "${var.prefix}-w" + aws_region = var.aws_region + create_ssh_key_pair = local.create_ssh_key_pair + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path + create_vpc = local.create_vpc + vpc_id = local.vpc_id + subnet_id = local.subnet_id + create_security_group = local.create_security_group + instance_count = var.worker_nodes_count + instance_security_group_id = local.instance_security_group_id + ssh_username = var.ssh_username user_data = templatefile("${path.module}/user_data.tmpl", { install_docker = var.install_docker @@ -50,45 +47,78 @@ module "worker_nodes" { docker_version = var.docker_version } ) - iam_instance_profile = var.worker_nodes_iam_instance_profile != null ? var.worker_nodes_iam_instance_profile : null - tags = var.tags +} + +resource "null_resource" "wait-docker-startup-m" { + depends_on = [module.aws-ec2-upstream-master-nodes.instances_public_ip] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } +} + +resource "null_resource" "wait-docker-startup-w" { + depends_on = [module.aws-ec2-upstream-worker-nodes.instances_public_ip] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } } locals { - master_nodes = [for instance_ips in module.master_nodes.instance_ips : + ssh_private_key_path = var.ssh_private_key_path != null ? var.ssh_private_key_path : "${path.cwd}/${var.prefix}-ssh_private_key.pem" + server_nodes = [for instance_ips in module.aws-ec2-upstream-master-nodes.instance_ips : { public_ip = instance_ips.public_ip, private_ip = instance_ips.private_ip, roles = ["etcd", "controlplane"], - ssh_key_path = var.ssh_key_pair_path, - ssh_key = var.ssh_key - node_username = module.master_nodes.node_username, - hostname_override = instance_ips.private_dns + ssh_key_path = local.ssh_private_key_path, + ssh_key = null, + hostname_override = null } ] - worker_nodes = [for instance_ips in module.worker_nodes.instance_ips : + worker_nodes = [for instance_ips in module.aws-ec2-upstream-worker-nodes.instance_ips : { public_ip = instance_ips.public_ip, private_ip = instance_ips.private_ip, roles = ["worker"], - ssh_key_path = var.ssh_key_pair_path, - ssh_key = var.ssh_key - node_username = module.worker_nodes.node_username - hostname_override = instance_ips.private_dns + ssh_key_path = local.ssh_private_key_path, + ssh_key = null, + hostname_override = null } ] } module "rke" { - source = "../../../../modules/distribution/rke" - prefix = var.prefix - node_username = var.ssh_username - create_kubeconfig_file = var.create_kubeconfig_file - kube_config_path = var.kube_config_path - kube_config_filename = var.kube_config_filename - kubernetes_version = var.kubernetes_version - bastion_host = var.bastion_host - cloud_provider = var.cloud_provider + source = "../../../../modules/distribution/rke" + prefix = var.prefix + ssh_private_key_path = local.ssh_private_key_path + node_username = var.ssh_username + ingress_provider = var.ingress_provider - rancher_nodes = concat(local.master_nodes, local.worker_nodes) + rancher_nodes = concat(local.server_nodes, local.worker_nodes) +} + +resource "null_resource" "wait-k8s-services-startup" { + depends_on = [module.rke] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } +} + +locals { + kubeconfig_file = "${path.cwd}/${var.prefix}_kube_config.yml" + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.aws-ec2-upstream-worker-nodes.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.aws-ec2-upstream-worker-nodes.instances_public_ip[0], "sslip.io"]) +} + +module "rancher_install" { + source = "../../../../modules/rancher" + dependency = [null_resource.wait-k8s-services-startup] + kubeconfig_file = local.kubeconfig_file + rancher_hostname = local.rancher_hostname + rancher_bootstrap_password = var.rancher_password + rancher_password = var.rancher_password + bootstrap_rancher = var.bootstrap_rancher + rancher_version = var.rancher_version + rancher_additional_helm_values = [ + "replicas: ${var.worker_nodes_count}" + ] } diff --git a/recipes/rke/split-roles/aws/outputs.tf b/recipes/rke/split-roles/aws/outputs.tf index a974b706..f992227d 100644 --- a/recipes/rke/split-roles/aws/outputs.tf +++ b/recipes/rke/split-roles/aws/outputs.tf @@ -1,18 +1,29 @@ -output "dependency" { - value = [ - var.master_nodes_count != 0 ? module.master_nodes[*].instance_ips : null, - var.worker_nodes_count != 0 ? module.worker_nodes[*].instance_ips : null - ] +output "instances_private_ip" { + value = concat([module.aws-ec2-upstream-master-nodes.instances_private_ip], [module.aws-ec2-upstream-worker-nodes.instances_private_ip]) } -output "kubeconfig_file" { - value = module.rke.rke_kubeconfig_filename +output "instances_public_ip" { + value = concat([module.aws-ec2-upstream-master-nodes.instances_public_ip], [module.aws-ec2-upstream-worker-nodes.instances_public_ip]) } -output "kube_config_yaml" { - value = module.rke.kube_config_yaml +output "vpc" { + value = module.aws-ec2-upstream-master-nodes.vpc[0].id } -output "credentials" { - value = module.rke.credentials +output "subnet" { + value = module.aws-ec2-upstream-master-nodes.subnet[0].id +} + +output "security_group" { + value = module.aws-ec2-upstream-master-nodes.security_group[0].id +} + +output "rancher_url" { + description = "Rancher URL" + value = "https://${module.rancher_install.rancher_hostname}" +} + +output "rancher_password" { + description = "Rancher Initial Custom Password" + value = var.rancher_password } diff --git a/recipes/rke/split-roles/aws/provider.tf b/recipes/rke/split-roles/aws/provider.tf new file mode 100644 index 00000000..8e915083 --- /dev/null +++ b/recipes/rke/split-roles/aws/provider.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/recipes/rke/split-roles/aws/terraform.tfvars.example b/recipes/rke/split-roles/aws/terraform.tfvars.example new file mode 100644 index 00000000..e4bc2f48 --- /dev/null +++ b/recipes/rke/split-roles/aws/terraform.tfvars.example @@ -0,0 +1,111 @@ +###### !! Required variables !! ###### + +## -- The prefix used in front of all AWS resources +prefix = "" + +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = + +## -- AWS Region to create the resources +aws_region = "" + +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html + +## -- Specify if a new SSH key pair needs to be created for the instances +# create_ssh_key_pair = true + +## -- If you want to use an existing key pair, specify its name +# ssh_key_pair_name = null + +#Ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html + +## -- The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_private_key_path = null + +## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_public_key_path = null + +## - Specify whether VPC / Subnet should be created for the instances +# create_vpc = null + +## -- Range of private IPs available for the AWS VPC +# vpc_ip_cidr_range = "10.0.0.0/16" + +## -- AWS VPC used for all resources +# vpc_id = null + +## -- AWS Subnet used for all resources +# subnet_id = null + +## -- AWS Security Group used for all resources +# create_security_group = null + +#Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html + +## -- The number of Server nodes +server_nodes_count = 1 + +## -- The number of Worker nodes +worker_nodes_count = 1 + +## -- Master nodes type +# server_nodes_type = "t3.medium" + +## -- Worker nodes type +# worker_nodes_type = "t3.medium" + +## -- Use spot instances +# spot_instances = false + +## -- Master nodes disk size (GB) +# server_nodes_disk_size = 80 + +## -- Worker nodes disk size (GB) +# worker_nodes_disk_size = 80 + +## -- If the Security Group was created manually, it can be entered here +# instance_security_group_id = null + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" + +## -- Script that will run when the VMs start +# user_data = "" + +## -- Bastion host configuration to access the instances +# bastion_host = null + +## -- IAM Instance Profile to assign to the Master nodes +# server_nodes_iam_instance_profile = null + +## -- IAM Instance Profile to assign to the Worker nodes +# worker_nodes_iam_instance_profile = null + +## -- User-provided tags for the resources +# tags = {} + +## -- Install Docker while creating the instance +# install_docker = true + +## -- Docker version to install on nodes +# docker_version = "20.10" + +## -- Waiting time (in seconds) +# waiting_time = 180 + +## -- Override the default k8s version used by RKE +# kubernetes_version = "v1.24.10-rancher4-1" + +## -- K8s Ingress Controller +# ingress_provider = nginx + +## -- Hostname to set when installing Rancher +# rancher_hostname = "rancher" + +## -- Password to set when installing Rancher +rancher_password = "at-least-12-characters" + +## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository +# rancher_version = "2.7.3" diff --git a/recipes/rke/split-roles/aws/variables.tf b/recipes/rke/split-roles/aws/variables.tf index dabc8251..28466f01 100644 --- a/recipes/rke/split-roles/aws/variables.tf +++ b/recipes/rke/split-roles/aws/variables.tf @@ -1,199 +1,146 @@ -variable "aws_access_key" { - type = string - description = "Enter your AWS access key" -} - -variable "aws_secret_key" { - type = string - description = "Enter your AWS secret key" - sensitive = true -} +variable "prefix" {} variable "aws_region" { type = string description = "AWS region used for all resources" + default = "us-east-1" + + validation { + condition = contains([ + "us-east-2", + "us-east-1", + "us-west-1", + "us-west-2", + "af-south-1", + "ap-east-1", + "ap-south-2", + "ap-southeast-3", + "ap-southeast-4", + "ap-south-1", + "ap-northeast-3", + "ap-northeast-2", + "ap-southeast-1", + "ap-southeast-2", + "ap-northeast-1", + "ca-central-1", + "ca-west-1", + "eu-central-1", + "eu-west-1", + "eu-west-2", + "eu-south-1", + "eu-west-3", + "eu-south-2", + "eu-north-1", + "eu-central-2", + "il-central-1", + "me-south-1", + "me-central-1", + "sa-east-1", + ], var.aws_region) + error_message = "Invalid Region specified!" + } } -variable "vpc_zone" { - type = string - description = "VPC zone" - default = null +variable "create_ssh_key_pair" { + default = null } -variable "subnet_id" { - type = string - description = "VPC Subnet ID to create the instance(s) in" - default = null +variable "ssh_key_pair_name" { + default = null } -variable "vpc_id" { - type = string - description = "VPC ID to create the instance(s) in" - default = null +variable "ssh_private_key_path" { + default = null } -variable "create_security_group" { - type = bool - description = "Should create the security group associated with the instance(s)" - default = true +variable "ssh_public_key_path" { + default = null } -# TODO: Add a check based on above value -variable "instance_security_group" { - type = string - description = "Provide a pre-existing security group ID" - default = null +variable "create_vpc" { + default = null } -variable "instance_security_group_name" { - type = string - description = "Provide a pre-existing security group name" - default = null +variable "vpc_id" { + default = null } -variable "prefix" { - type = string - description = "Prefix added to names of all resources" +variable "subnet_id" { + default = null } -variable "master_nodes_count" { - type = number - description = "Number of master nodes to create" - default = 1 +variable "create_security_group" { + default = null } -variable "worker_nodes_count" { - type = number - description = "Number of worker nodes to create" - default = 1 -} +variable "server_nodes_count" { + description = "The number of Server nodes" + default = 3 -variable "kube_config_path" { - description = "The path to write the kubeconfig for the RKE cluster" - type = string - default = null + validation { + condition = contains([ + 1, + 3, + 5, + ], var.server_nodes_count) + error_message = "Invalid number of Server nodes specified! The value must be 1, 3 or 5 (ETCD quorum)." + } } -variable "kube_config_filename" { - description = "Filename to write the kube config" - type = string - default = null +variable "worker_nodes_count" {} + +variable "instance_security_group_id" { + default = null } -variable "kubernetes_version" { - type = string - description = "Kubernetes version to use for the RKE cluster" +variable "ssh_username" {} + +variable "user_data" { + description = "User data content for EC2 instance(s)" default = null } variable "install_docker" { type = bool - description = "Should install docker while creating the instance" + description = "Install Docker while creating the instances" default = true } variable "docker_version" { type = string description = "Docker version to install on nodes" - default = "23.0.6" + default = "20.10" } -variable "create_ssh_key_pair" { - type = bool - description = "Specify if a new SSH key pair needs to be created for the instances" - default = false +variable "waiting_time" { + description = "Waiting time (in seconds)" + default = 180 } -variable "ssh_key_pair_name" { - type = string - description = "Specify the SSH key name to use (that's already present in AWS)" - default = null +variable "ingress_provider" { + description = "Ingress controller provider" + default = "nginx" } -variable "ssh_key_pair_path" { - type = string - description = "Path to the SSH private key used as the key pair (that's already present in AWS)" - default = null -} - -variable "ssh_key" { - type = string - description = "Contents of the private key to connect to the instances." - default = null - sensitive = true -} - -variable "bastion_host" { - type = object({ - address = string - user = string - ssh_key_path = string - ssh_key = string - }) - default = null - description = "Bastion host configuration to access the RKE nodes" -} - -variable "ssh_username" { - type = string - description = "Username used for SSH with sudo access" - default = "ubuntu" -} - -variable "master_nodes_instance_type" { - type = string - description = "Instance type used for all master nodes" - default = "t3.medium" -} - -variable "master_nodes_instance_disk_size" { - type = string - description = "Disk size used for all master nodes (in GB)" - default = "80" -} - -variable "worker_nodes_instance_type" { - type = string - description = "Instance type used for all worker nodes" - default = "t3.large" +variable "bootstrap_rancher" { + description = "Bootstrap the Rancher installation" + type = bool + default = true } -variable "worker_nodes_instance_disk_size" { - type = string - description = "Disk size used for all worker nodes (in GB)" - default = "80" -} +variable "rancher_hostname" {} -variable "dependency" { - description = "An optional variable to add a dependency from another resource (not used)" - default = null -} +variable "rancher_password" { + type = string -variable "master_nodes_iam_instance_profile" { - description = "Specify IAM instance profile to attach to master nodes" - default = null - type = string + validation { + condition = length(var.rancher_password) >= 12 + error_message = "The password must be at least 12 characters." + } } -variable "worker_nodes_iam_instance_profile" { - description = "Specify IAM instance profile to attach to worker nodes" - default = null - type = string -} - -variable "tags" { - description = "User-provided tags for the resources" - type = map(string) - default = {} -} - -variable "cloud_provider" { - description = "Specify the cloud provider name" +variable "rancher_version" { + description = "Rancher version to install" type = string default = null } - -variable "create_kubeconfig_file" { - description = "Boolean flag to generate a kubeconfig file (mostly used for dev only)" - default = true -} diff --git a/recipes/standalone/aws/rke/README.md b/recipes/standalone/aws/rke/README.md index ae7ac039..71e33587 100644 --- a/recipes/standalone/aws/rke/README.md +++ b/recipes/standalone/aws/rke/README.md @@ -1,6 +1,6 @@ -# Upstream | AWS | RKE +# Upstream | AWS standalone | EC2 x RKE -This module is used to establish a Rancher (local) management cluster using AWS and RKE. +This module is used to establish a Rancher (local) management cluster using [AWS EC2](https://aws.amazon.com/ec2/) and [RKE](https://rke.docs.rancher.com/). Documentation can be found [here](./docs.md). @@ -11,90 +11,27 @@ git clone https://github.com/rancherlabs/tf-rancher-up.git cd recipes/upstream/aws/rke ``` -- Copy `terraform.tfvars.example` to `terraform.tfvars` -- Edit `terraform.tfvars` +- Copy `./terraform.tfvars.exmaple` to `./terraform.tfvars` +- Edit `./terraform.tfvars` - Update the required variables: - - `aws_region` to suit your region - `prefix` to give the resources an identifiable name (eg, your initials or first name) - - Recommended: `spot_instances` can be set to `true` to use spot instances -- Check your AWS credentials are configured in `~/.aws/credentials`, terraform will use these by default. Refer the [`aws configure`](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html#cli-configure-files-methods) command on how to do this. -- If you don't want to configure AWS credentials using `aws configure` in above step, uncomment `aws_access_key` and `aws_secret_key` in `terraform.tfvars` and input the required keys there. -- If an HA cluster need to be deployed, change the `instance_count` variable to 3 or more. -- There are more optional variables which can be tweaked under `terraform.tfvars`. - -**NOTE** you may need to use ` terraform init -upgrade` to upgrade provider versions - -Execute the below commands to start deployment. - -```bash -terraform init -terraform plan -terraform apply -``` - -The login details will be displayed in the screen once the deployment is successful. It will have the details as below. - -```bash -rancher_hostname = "https://rancher..sslip.io" -rancher_password = "initial-admin-password" -``` + - `aws_region` to suit your region + - `instance_count` to specify the number of instances to create + - `ssh_username` to specify the user used to create the VMs (default "ubuntu") +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../modules/infra/aws/README.md). -- If storing multiple AWS credentials in `~/.aws/credentials`, set the profile when running terraform. +**NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** ```bash -AWS_PROFILE= terraform plan -AWS_PROFILE= terraform apply +terraform init -upgrade && terraform apply -auto-approve ``` -- Destroy the resources when cluster is no more needed. +- Destroy the resources when finished ```bash -terraform destroy +terraform destroy -auto-approve ``` -**IMPORTANT**: Please retire the services which are deployed using these terraform modules within 48 hours. Soon there will be automation to retire the service automatically after 48 hours but till that is in place it will be the users responsibility to not keep it running more than 48 hours. - -### Advanced - -Target a specific resource/module to action the changes only for that resource/module - -For example, target only the `rke_cluster` resource to re-run the equivalent of `rke up` - -```bash -terraform apply -target module.rke.rke_cluster.this -target module.rke.local_file.kube_config_yaml -``` - -This also updates the kube_config generated by RKE. - -### Notes - -A log file for the RKE provisioning is written to `rke.log` - See full argument list for each module in use: - - [AWS](../../../../modules/infra/aws) - - [RKE](../../../../modules/distribution/rke) - - [Rancher](../../../../modules/rancher) - -### Known Issues -- Terraform plan shows below warnings which can be ignored: - -```bash -Warning: Value for undeclared variable - -The root module does not declare a variable named "ssh_private_key_path" but a value was found in file "terraform.tfvars". If you meant to use this value, add a "variable" block to the configuration. - -Invalid attribute in provider configuration - -with module.rancher_install.provider["registry.terraform.io/hashicorp/kubernetes"], -on ../../../../modules/rancher/provider.tf line 7, in provider "kubernetes": -7: provider "kubernetes" { -``` -- Terraform apply shows below warnings and errors. Please rerun the terraform apply again and it will be successful [(Issue #22)](#22). - -```bash -Warning: - -Helm release "rancher" was created but has a failed status. Use the `helm` command to investigate the error, correct it, then run Terraform again. - -Error: 1 error occurred: -* Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": failed to call webhook: Post "https://rke2-ingress-nginx-controller-admission.kube-system.svc:443/networking/v1/ingresses?timeout=10s": no endpoints available for service "rke2-ingress-nginx-controller-admission" -``` + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - RKE: https://github.com/rancher/tf-rancher-up/tree/main/modules/distribution/rke + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/recipes/standalone/aws/rke/docs.md b/recipes/standalone/aws/rke/docs.md index 1494e0c9..4ad01afa 100644 --- a/recipes/standalone/aws/rke/docs.md +++ b/recipes/standalone/aws/rke/docs.md @@ -1,54 +1,57 @@ ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | +| [ssh](#requirement\_ssh) | 2.6.0 | ## Providers -No providers. +| Name | Version | +|------|---------| +| [null](#provider\_null) | n/a | ## Modules | Name | Source | Version | |------|--------|---------| -| [cluster-nodes](#module\_cluster-nodes) | ../../../../modules/infra/aws | n/a | +| [aws\_ec2\_upstream\_cluster](#module\_aws\_ec2\_upstream\_cluster) | ../../../../modules/infra/aws/ec2 | n/a | | [rke](#module\_rke) | ../../../../modules/distribution/rke | n/a | ## Resources -No resources. +| Name | Type | +|------|------| +| [null_resource.wait_docker_startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.wait_k8s_services_startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [aws\_access\_key](#input\_aws\_access\_key) | AWS access key used to create infrastructure | `string` | `null` | no | | [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | -| [aws\_secret\_key](#input\_aws\_secret\_key) | AWS secret key used to create AWS infrastructure | `string` | `null` | no | -| [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `null` | no | -| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `null` | no | -| [dependency](#input\_dependency) | An optional variable to add a dependency from another resource (not used) | `any` | `null` | no | +| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | +| [create\_vpc](#input\_create\_vpc) | n/a | `any` | `null` | no | | [docker\_version](#input\_docker\_version) | Docker version to install on nodes | `string` | `"20.10"` | no | -| [install\_docker](#input\_install\_docker) | Should install docker while creating the instance | `bool` | `true` | no | -| [instance\_count](#input\_instance\_count) | Number of EC2 instances to create | `number` | `null` | no | -| [instance\_disk\_size](#input\_instance\_disk\_size) | Specify root disk size (GB) | `string` | `null` | no | -| [instance\_security\_group](#input\_instance\_security\_group) | Provide a pre-existing security group ID | `string` | `null` | no | -| [instance\_type](#input\_instance\_type) | Instance type used for all EC2 instances | `string` | `null` | no | +| [install\_docker](#input\_install\_docker) | Install Docker while creating the instances | `bool` | `true` | no | +| [instance\_count](#input\_instance\_count) | n/a | `any` | n/a | yes | | [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | | [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | -| [kubernetes\_version](#input\_kubernetes\_version) | Kubernetes version to use for the RKE cluster | `string` | `null` | no | -| [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | `null` | no | -| [spot\_instances](#input\_spot\_instances) | Use spot instances | `bool` | `null` | no | -| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | Specify the SSH key name to use (that's already present in AWS) | `string` | `null` | no | -| [ssh\_key\_pair\_path](#input\_ssh\_key\_pair\_path) | Path to the SSH private key used as the key pair (that's already present in AWS) | `string` | `null` | no | -| [ssh\_username](#input\_ssh\_username) | Username used for SSH with sudo access | `string` | `"ubuntu"` | no | -| [subnet\_id](#input\_subnet\_id) | VPC Subnet ID to create the instance(s) in | `string` | `null` | no | +| [prefix](#input\_prefix) | n/a | `any` | n/a | yes | +| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | n/a | `any` | `null` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `any` | n/a | yes | +| [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | Waiting time (in seconds) | `number` | `120` | no | ## Outputs | Name | Description | |------|-------------| -| [dependency](#output\_dependency) | n/a | | [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | | [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | -| [kubeconfig\_filename](#output\_kubeconfig\_filename) | n/a | -| [kubeconfig\_yaml](#output\_kubeconfig\_yaml) | n/a | +| [kube\_config\_path](#output\_kube\_config\_path) | n/a | diff --git a/recipes/standalone/aws/rke/main.tf b/recipes/standalone/aws/rke/main.tf index e586988b..4d7dea06 100644 --- a/recipes/standalone/aws/rke/main.tf +++ b/recipes/standalone/aws/rke/main.tf @@ -1,18 +1,31 @@ -module "cluster-nodes" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = var.instance_count - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = var.create_ssh_key_pair - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - ssh_username = var.ssh_username - spot_instances = var.spot_instances - aws_region = var.aws_region - create_security_group = var.create_security_group - instance_security_group = var.instance_security_group - subnet_id = var.subnet_id +locals { + create_ssh_key_pair = var.create_ssh_key_pair == null ? false : true + ssh_key_pair_name = var.ssh_key_pair_name == null ? "tf-rancher-up-${var.prefix}" : var.ssh_key_pair_name + local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path + local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + create_vpc = var.create_vpc == null ? false : true + kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd + kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" +} + +module "aws_ec2_upstream_cluster" { + source = "../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + # create_ssh_key_pair = var.create_ssh_key_pair + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path + # create_vpc = var.create_vpc + # vpc_id = var.vpc_id + # subnet_id = var.subnet_id + # create_security_group = var.create_security_group + instance_count = var.instance_count + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + # instance_security_group_id = var.instance_security_group_id + ssh_username = var.ssh_username user_data = templatefile("${path.module}/user_data.tmpl", { install_docker = var.install_docker @@ -20,27 +33,41 @@ module "cluster-nodes" { docker_version = var.docker_version } ) + # bastion_host = var.bastion_host + # iam_instance_profile = var.iam_instance_profile + # tags = var.tags +} + +resource "null_resource" "wait_docker_startup" { + depends_on = [module.aws_ec2_upstream_cluster.instances_public_ip] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } } module "rke" { source = "../../../../modules/distribution/rke" prefix = var.prefix - dependency = module.cluster-nodes.dependency - ssh_private_key_path = module.cluster-nodes.ssh_key_path + dependency = [null_resource.wait_docker_startup] + ssh_private_key_path = local.local_ssh_private_key_path node_username = var.ssh_username - kube_config_path = var.kube_config_path - kube_config_filename = var.kube_config_filename - kubernetes_version = var.kubernetes_version + # kubernetes_version = var.kubernetes_version - rancher_nodes = [for instance_ips in module.cluster-nodes.instance_ips : + rancher_nodes = [for instance_ips in module.aws_ec2_upstream_cluster.instance_ips : { public_ip = instance_ips.public_ip, private_ip = instance_ips.private_ip, roles = ["etcd", "controlplane", "worker"], - ssh_key_path = module.cluster-nodes.ssh_key_path - ssh_key = null - node_username = module.cluster-nodes.node_username + ssh_key_path = local.local_ssh_private_key_path, + ssh_key = null, hostname_override = null } ] } + +resource "null_resource" "wait_k8s_services_startup" { + depends_on = [module.rke] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } +} diff --git a/recipes/standalone/aws/rke/outputs.tf b/recipes/standalone/aws/rke/outputs.tf index 8cc5675c..3ce0f7d3 100644 --- a/recipes/standalone/aws/rke/outputs.tf +++ b/recipes/standalone/aws/rke/outputs.tf @@ -1,19 +1,11 @@ output "instances_public_ip" { - value = module.cluster-nodes.instances_public_ip + value = module.aws_ec2_upstream_cluster.instances_public_ip } output "instances_private_ip" { - value = module.cluster-nodes.instances_private_ip + value = module.aws_ec2_upstream_cluster.instances_private_ip } -output "dependency" { - value = module.rke.dependency -} - -output "kubeconfig_filename" { - value = module.rke.rke_kubeconfig_filename -} - -output "kubeconfig_yaml" { - value = module.rke.kube_config_yaml +output "kube_config_path" { + value = local.kc_file } diff --git a/recipes/standalone/aws/rke/provider.tf b/recipes/standalone/aws/rke/provider.tf new file mode 100644 index 00000000..8e915083 --- /dev/null +++ b/recipes/standalone/aws/rke/provider.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/recipes/standalone/aws/rke/terraform.tfvars.example b/recipes/standalone/aws/rke/terraform.tfvars.example index c155c6ef..4a701c8f 100644 --- a/recipes/standalone/aws/rke/terraform.tfvars.example +++ b/recipes/standalone/aws/rke/terraform.tfvars.example @@ -1,48 +1,99 @@ ###### !! Required variables !! ###### -## -- Terraform will use the default ~/.aws/credentials file or environment variables to determine the access/secret keys. Uncomment the below only if necessary. -# aws_access_key = "ACCESS_KEY_HERE" -# aws_secret_key = "SECRET_KEY_HERE" +## -- The prefix used in front of all AWS resources +prefix = "" -## -- AWS region to create the resources, uncomment one or adjust as needed -# aws_region = "us-east-1" # US, Virginia -# aws_region = "us-west-2" # US, Oregon -# aws_region = "eu-west-1" # EU, Ireland -# aws_region = "eu-west-1" # EU, Frankfurt -# aws_region = "ap-southeast-2" # AU, Sydney -# aws_region = "ap-south-1" # IN, Mumbai +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = -## -- Set the prefix for the name tag on instancrease created. A default prefix (rancher-terraform) if not provided. -prefix = "my-name-here" +## -- AWS Region to create the resources +aws_region = "" -###### !! Optional variables !! ###### +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html -## -- Password to set when installing Rancher, otherwise use default (initial-admin-password) -# rancher_password = "at-least-12-characters" +## -- Specify if a new SSH key pair needs to be created for the instances +# create_ssh_key_pair = true -## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository -# rancher_version = "2.7.3" +## -- If you want to use an existing key pair, specify its name +# ssh_key_pair_name = null -## -- Override the default k8s version used by RKE -# kubernetes_version = "v1.24.10-rancher4-1" +#Ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html + +## -- The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_private_key_path = null + +## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_public_key_path = null + +## - Specify whether VPC / Subnet should be created for the instances +# create_vpc = null + +## -- Range of private IPs available for the AWS VPC +# vpc_ip_cidr_range = "10.0.0.0/16" -## -- Number and type of EC2 instances to launch -instance_count = 1 +## -- AWS VPC used for all resources +# vpc_id = null + +## -- AWS Subnet used for all resources +# subnet_id = null + +## -- AWS Security Group used for all resources +# create_security_group = null + +#Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html + +## -- The number of nodes +instance_count = 3 + +## -- The name of a AWS EC2 machine type # instance_type = "t3.medium" ## -- Use spot instances # spot_instances = false -##### SSH -## -- (A) Create a new keypair in AWS -create_ssh_key_pair = true -## -- Override the default (./${prefix}_ssh_private_key.pem) path where this SSH key is written -# ssh_private_key_path = "/path/to/private/key.pem" +## -- Size of the disk attached to each node, specified in GB +# instance_disk_size = 80 + +## -- If the Security Group was created manually, it can be entered here +# instance_security_group_id = null + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" -## -- (B) Provide an existing keypair name in AWS to use for nodes, the matching private key file for this keypair also must be provided so RKE can SSH to the launched nodes -# ssh_key_pair_name = "aws_keypair_name" -# ssh_key_pair_path = "/path/to/private/key.pem" -##### +## -- Script that will run when the VMs start +# user_data = "" -## -- Override the default (${prefix}_kube_config.yml) kubeconfig file/path -# kube_config_path = "~/.kube/rancher-terraform.yml" \ No newline at end of file +## -- Bastion host configuration to access the instances +# bastion_host = null + +## -- IAM Instance Profile to assign to the instances/nodes +# iam_instance_profile = null + +## -- User-provided tags for the resources +# tags = {} + +## -- Install Docker while creating the instance +# install_docker = true + +## -- Docker version to install on nodes +# docker_version = "20.10" + +## -- Waiting time (in seconds) +# waiting_time = 180 + +## -- Override the default k8s version used by RKE +# kubernetes_version = "v1.24.10-rancher4-1" + +## -- K8s Ingress Controller +# ingress_provider = nginx + +## -- Hostname to set when installing Rancher +# rancher_hostname = "rancher" + +## -- Password to set when installing Rancher +rancher_password = "at-least-12-characters" + +## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository +# rancher_version = "2.7.3" diff --git a/recipes/standalone/aws/rke/variables.tf b/recipes/standalone/aws/rke/variables.tf index fc4c46dc..3d7afebd 100644 --- a/recipes/standalone/aws/rke/variables.tf +++ b/recipes/standalone/aws/rke/variables.tf @@ -1,19 +1,10 @@ -variable "dependency" { - description = "An optional variable to add a dependency from another resource (not used)" - default = null -} +variable "prefix" {} -variable "aws_access_key" { - type = string - description = "AWS access key used to create infrastructure" - default = null -} +# variable "aws_access_key" {} -variable "aws_secret_key" { - type = string - description = "AWS secret key used to create AWS infrastructure" - default = null -} +# variable "aws_secret_key" {} + +# variable "aws_session_token" {} variable "aws_region" { type = string @@ -56,51 +47,69 @@ variable "aws_region" { } } -variable "prefix" { - type = string - description = "Prefix added to names of all resources" - default = null +variable "create_ssh_key_pair" { + default = null } -variable "instance_count" { - type = number - description = "Number of EC2 instances to create" - default = null +variable "ssh_key_pair_name" { + default = null } -variable "instance_type" { - type = string - description = "Instance type used for all EC2 instances" - default = null +variable "ssh_private_key_path" { + default = null } -variable "instance_disk_size" { - type = string - description = "Specify root disk size (GB)" - default = null +variable "ssh_public_key_path" { + default = null } -variable "kube_config_path" { - description = "The path to write the kubeconfig for the RKE cluster" - type = string - default = null +variable "create_vpc" { + default = null } -variable "kube_config_filename" { - description = "Filename to write the kube config" - type = string - default = null -} +# variable "vpc_ip_cidr_range" {} -variable "kubernetes_version" { - type = string - description = "Kubernetes version to use for the RKE cluster" +# variable "vpc_id" {} + +# variable "subnet_id" {} + +# variable "create_security_group" {} + +variable "instance_count" {} + +# variable "instance_type" {} + +# variable "spot_instances" {} + +# variable "instance_disk_size" {} + +# variable "instance_security_group_id" {} + +variable "ssh_username" {} + +variable "user_data" { + description = "User data content for EC2 instance(s)" default = null } +#variable "bastion_host" { +# type = object({ +# address = string +# user = string +# ssh_key = string +# ssh_key_path = string +# }) +# default = null +# description = "Bastion host configuration to access the instances" +#} + +# variable "iam_instance_profile" {} + +# variable "tags" {} + variable "install_docker" { type = bool - description = "Should install docker while creating the instance" + description = "Install Docker while creating the instances" default = true } @@ -110,51 +119,21 @@ variable "docker_version" { default = "20.10" } -variable "create_ssh_key_pair" { - type = bool - description = "Specify if a new SSH key pair needs to be created for the instances" - default = null +variable "waiting_time" { + description = "Waiting time (in seconds)" + default = 120 } -variable "ssh_key_pair_name" { - type = string - description = "Specify the SSH key name to use (that's already present in AWS)" - default = null -} +# variable "kubernetes_version" {} -variable "ssh_key_pair_path" { - type = string - description = "Path to the SSH private key used as the key pair (that's already present in AWS)" - default = null -} - -variable "ssh_username" { - type = string - description = "Username used for SSH with sudo access" - default = "ubuntu" -} - -variable "spot_instances" { - type = bool - description = "Use spot instances" - default = null -} - -variable "subnet_id" { +variable "kube_config_path" { + description = "The path to write the kubeconfig for the RKE cluster" type = string - description = "VPC Subnet ID to create the instance(s) in" - default = null -} - -variable "create_security_group" { - type = bool - description = "Should create the security group associated with the instance(s)" default = null } -# TODO: Add a check based on above value -variable "instance_security_group" { +variable "kube_config_filename" { + description = "Filename to write the kube config" type = string - description = "Provide a pre-existing security group ID" default = null } diff --git a/recipes/upstream/aws/k3s/README.md b/recipes/upstream/aws/k3s/README.md index 427b01bc..2161f73a 100644 --- a/recipes/upstream/aws/k3s/README.md +++ b/recipes/upstream/aws/k3s/README.md @@ -1,6 +1,6 @@ -# Upstream | AWS | K3S +# Upstream | AWS | EC2 x K3S -This module is used to establish a Rancher (local) management cluster using AWS and K3S. +This module is used to establish a Rancher (local) management cluster using [AWS EC2](https://aws.amazon.com/ec2/) and [K3s](https://docs.k3s.io/). Documentation can be found [here](./docs.md). @@ -11,77 +11,30 @@ git clone https://github.com/rancherlabs/tf-rancher-up.git cd recipes/upstream/aws/k3s ``` -- Copy `terraform.tfvars.example` to `terraform.tfvars` -- Edit `terraform.tfvars` +- Copy `./terraform.tfvars.exmaple` to `./terraform.tfvars` +- Edit `./terraform.tfvars` - Update the required variables: - - `aws_region` to suit your region - - uncomment `instance_type` and change the instance type if needed. - `prefix` to give the resources an identifiable name (eg, your initials or first name) - - Recommended: `spot_instances` can be set to `true` to use spot instances -- Check your AWS credentials are configured in `~/.aws/credentials`, terraform will use these by default. Refer the [`aws configure`](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html#cli-configure-files-methods) command on how to do this. -- If you don't want to configure AWS credentials using `aws configure` in above step, uncomment `aws_access_key` and `aws_secret_key` in `terraform.tfvars` and input the required keys there. -- If an HA cluster need to be deployed, change the `instance_count` variable to 3 or more. -- There are more optional variables which can be tweaked under `terraform.tfvars`. - -**NOTE** you may need to use ` terraform init -upgrade` to upgrade provider versions - -Execute the below commands to start deployment. - -```bash -terraform init -terraform plan -var-file terraform.tfvars -terraform apply -var-file terraform.tfvars -``` -The login details will be displayed in the screen once the deployment is successful. It will have the details as below. - -```bash -rancher_hostname = "https://rancher..sslip.io" -rancher_password = "initial-admin-password" -``` + - `aws_region` to suit your region + - `server_nodes_count` to specify the number of Master nodes to create (to maintain ETCD quorum, the value must be 1, 3, or 5) + - `worker_nodes_count` to specify the number of Worker nodes to create + - `ssh_username` to specify the user used to create the VMs (default "ubuntu") + - `rancher_hostname` in order to reach the Rancher console via DNS name + - `rancher_password` to configure the initial Admin password (the password must be at least 12 characters) +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../modules/infra/aws/README.md). -- If storing multiple AWS credentials in `~/.aws/credentials`, set the profile when running terraform. +**NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** ```bash -AWS_PROFILE= terraform plan -var-file terraform.tfvars -AWS_PROFILE= terraform apply -var-file terraform.tfvars +terraform init -upgrade && terraform apply -auto-approve ``` -- Destroy the resources when cluster is no more needed. +- Destroy the resources when finished ```bash -terraform destroy -var-file terraform.tfvars +terraform destroy -auto-approve ``` -**IMPORTANT**: Please retire the services which are deployed using these terraform modules within 48 hours. Soon there will be automation to retire the service automatically after 48 hours but till that is in place it will be the users responsibility to not keep it running more than 48 hours. - -### Notes - -The user data automatically sets up each node for use with kubectl (also alias to k) and crictl when logged in. See full argument list for each module in use: - - [AWS](../../../../modules/infra/aws) - - [K3S](../../../../modules/distribution/k3s) - - [Rancher](../../../../modules/rancher) - -### Known Issues -- Terraform plan shows below warnings which can be ignored: - -```bash -Warning: Value for undeclared variable - -The root module does not declare a variable named "ssh_private_key_path" but a value was found in file "terraform.tfvars". If you meant to use this value, add a "variable" block to the configuration. - -Invalid attribute in provider configuration - -with module.rancher_install.provider["registry.terraform.io/hashicorp/kubernetes"], -on ../../../../modules/rancher/provider.tf line 7, in provider "kubernetes": -7: provider "kubernetes" { -``` -- Terraform apply shows below warnings and errors. Please rerun the terraform apply again and it will be successful[(Issue #22)](#22). - -```bash -Warning: - -Helm release "rancher" was created but has a failed status. Use the `helm` command to investigate the error, correct it, then run Terraform again. - -Error: 1 error occurred: -* Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": failed to call webhook: Post "https://rke2-ingress-nginx-controller-admission.kube-system.svc:443/networking/v1/ingresses?timeout=10s": no endpoints available for service "rke2-ingress-nginx-controller-admission" -``` + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - K3s: https://github.com/rancherlabs/tf-rancher-up/tree/main/modules/distribution/k3s + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/recipes/upstream/aws/k3s/docs.md b/recipes/upstream/aws/k3s/docs.md index d85c7101..2132e2f9 100644 --- a/recipes/upstream/aws/k3s/docs.md +++ b/recipes/upstream/aws/k3s/docs.md @@ -2,6 +2,10 @@ | Name | Version | |------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | | [ssh](#requirement\_ssh) | 2.6.0 | ## Providers @@ -9,6 +13,7 @@ | Name | Version | |------|---------| | [local](#provider\_local) | n/a | +| [null](#provider\_null) | n/a | | [ssh](#provider\_ssh) | 2.6.0 | ## Modules @@ -16,10 +21,10 @@ | Name | Source | Version | |------|--------|---------| | [k3s\_additional](#module\_k3s\_additional) | ../../../../modules/distribution/k3s | n/a | -| [k3s\_additional\_servers](#module\_k3s\_additional\_servers) | ../../../../modules/infra/aws | n/a | +| [k3s\_additional\_servers](#module\_k3s\_additional\_servers) | ../../../../modules/infra/aws/ec2 | n/a | +| [k3s\_additional\_workers](#module\_k3s\_additional\_workers) | ../../../../modules/infra/aws/ec2 | n/a | | [k3s\_first](#module\_k3s\_first) | ../../../../modules/distribution/k3s | n/a | -| [k3s\_first\_server](#module\_k3s\_first\_server) | ../../../../modules/infra/aws | n/a | -| [k3s\_workers](#module\_k3s\_workers) | ../../../../modules/infra/aws | n/a | +| [k3s\_first\_server](#module\_k3s\_first\_server) | ../../../../modules/infra/aws/ec2 | n/a | | [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | ## Resources @@ -27,7 +32,7 @@ | Name | Type | |------|------| | [local_file.kube_config_yaml](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [local_file.kube_config_yaml_backup](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [null_resource.wait_k8s_services_startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | | [ssh_resource.retrieve_kubeconfig](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource | | [local_file.ssh_private_key](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source | @@ -35,41 +40,38 @@ | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [aws\_access\_key](#input\_aws\_access\_key) | AWS access key used to create infrastructure | `string` | `null` | no | | [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | -| [aws\_secret\_key](#input\_aws\_secret\_key) | AWS secret key used to create AWS infrastructure | `string` | `null` | no | -| [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `null` | no | -| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `null` | no | -| [instance\_disk\_size](#input\_instance\_disk\_size) | Specify root disk size (GB) | `string` | `null` | no | -| [instance\_security\_group](#input\_instance\_security\_group) | Provide a pre-existing security group ID | `string` | `null` | no | -| [instance\_type](#input\_instance\_type) | Instance type used for all EC2 instances | `string` | `null` | no | +| [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | +| [create\_security\_group](#input\_create\_security\_group) | n/a | `any` | `null` | no | +| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | +| [create\_vpc](#input\_create\_vpc) | n/a | `any` | `null` | no | +| [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | n/a | `any` | `null` | no | | [k3s\_channel](#input\_k3s\_channel) | K3s channel to use, the latest patch version for the provided minor version will be used | `string` | `null` | no | -| [k3s\_config](#input\_k3s\_config) | Additional k3s configuration to add to the config.yaml file | `any` | `null` | no | -| [k3s\_token](#input\_k3s\_token) | Token to use when configuring k3s nodes | `any` | `null` | no | -| [k3s\_version](#input\_k3s\_version) | Kubernetes version to use for the k3s cluster | `string` | `null` | no | +| [k3s\_config](#input\_k3s\_config) | Additional K3S configuration to add to the config.yaml file | `any` | `null` | no | +| [k3s\_token](#input\_k3s\_token) | Token to use when configuring K3S nodes | `any` | `null` | no | +| [k3s\_version](#input\_k3s\_version) | Kubernetes version to use for the K3S cluster | `string` | `"v1.28.9+k3s1"` | no | | [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | | [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | -| [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | `null` | no | -| [rancher\_bootstrap\_password](#input\_rancher\_bootstrap\_password) | Password to use for bootstrapping Rancher (min 12 characters) | `string` | `"initial-admin-password"` | no | -| [rancher\_password](#input\_rancher\_password) | Password to use for Rancher (min 12 characters) | `string` | `null` | no | -| [rancher\_replicas](#input\_rancher\_replicas) | Value for replicas when installing the Rancher helm chart | `number` | `3` | no | +| [prefix](#input\_prefix) | n/a | `any` | n/a | yes | +| [rancher\_hostname](#input\_rancher\_hostname) | n/a | `any` | n/a | yes | +| [rancher\_ingress\_class\_name](#input\_rancher\_ingress\_class\_name) | Rancher ingressClassName value | `string` | `"traefik"` | no | +| [rancher\_password](#input\_rancher\_password) | n/a | `string` | n/a | yes | +| [rancher\_service\_type](#input\_rancher\_service\_type) | Rancher serviceType value | `string` | `"ClusterIP"` | no | | [rancher\_version](#input\_rancher\_version) | Rancher version to install | `string` | `null` | no | -| [server\_instance\_count](#input\_server\_instance\_count) | Number of server EC2 instances to create | `number` | `null` | no | -| [spot\_instances](#input\_spot\_instances) | Use spot instances | `bool` | `null` | no | -| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | Specify the SSH key name to use (that's already present in AWS) | `string` | `null` | no | -| [ssh\_key\_pair\_path](#input\_ssh\_key\_pair\_path) | Path to the SSH private key used as the key pair (that's already present in AWS) | `string` | `null` | no | -| [ssh\_username](#input\_ssh\_username) | Username used for SSH with sudo access | `string` | `"ubuntu"` | no | -| [subnet\_id](#input\_subnet\_id) | VPC Subnet ID to create the instance(s) in | `string` | `null` | no | -| [wait](#input\_wait) | An optional wait before installing the Rancher helm chart | `string` | `"20s"` | no | -| [worker\_instance\_count](#input\_worker\_instance\_count) | Number of worker EC2 instances to create | `number` | `null` | no | +| [server\_nodes\_count](#input\_server\_nodes\_count) | The number of Server nodes | `number` | `3` | no | +| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | n/a | `any` | `null` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `any` | n/a | yes | +| [subnet\_id](#input\_subnet\_id) | n/a | `any` | `null` | no | +| [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | +| [vpc\_id](#input\_vpc\_id) | n/a | `any` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | Waiting time (in seconds) | `number` | `180` | no | +| [worker\_nodes\_count](#input\_worker\_nodes\_count) | n/a | `any` | n/a | yes | ## Outputs | Name | Description | |------|-------------| -| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | -| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | -| [rancher\_admin\_token](#output\_rancher\_admin\_token) | Rancher API token for the admin user | -| [rancher\_bootstrap\_password](#output\_rancher\_bootstrap\_password) | n/a | -| [rancher\_hostname](#output\_rancher\_hostname) | n/a | -| [rancher\_url](#output\_rancher\_url) | n/a | +| [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | +| [rancher\_url](#output\_rancher\_url) | Rancher URL | diff --git a/recipes/upstream/aws/k3s/main.tf b/recipes/upstream/aws/k3s/main.tf index c30b9afa..b75e839f 100644 --- a/recipes/upstream/aws/k3s/main.tf +++ b/recipes/upstream/aws/k3s/main.tf @@ -1,7 +1,16 @@ locals { - kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd - kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" - kc_file_backup = "${local.kc_file}.backup" + create_ssh_key_pair = var.create_ssh_key_pair == null ? false : true + ssh_key_pair_name = var.ssh_key_pair_name == null ? "tf-rancher-up-${var.prefix}" : var.ssh_key_pair_name + local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path + local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + create_vpc = var.create_vpc == null ? false : true + vpc_id = var.vpc_id == null ? module.k3s_first_server.vpc[0].id : var.vpc_id + subnet_id = var.subnet_id == null ? module.k3s_first_server.subnet[0].id : var.subnet_id + create_security_group = var.create_security_group == null ? false : true + instance_security_group_id = local.create_security_group == "true" ? null : module.k3s_first_server.security_group[0].id + kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd + kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" + kc_file_backup = "${local.kc_file}.backup" } module "k3s_first" { @@ -13,21 +22,24 @@ module "k3s_first" { } module "k3s_first_server" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = 1 - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = var.create_ssh_key_pair - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - ssh_username = var.ssh_username - spot_instances = var.spot_instances - aws_region = var.aws_region - create_security_group = var.create_security_group - instance_security_group = var.instance_security_group - subnet_id = var.subnet_id - user_data = module.k3s_first.k3s_server_user_data + source = "../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + # create_ssh_key_pair = var.create_ssh_key_pair + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path + # create_vpc = var.create_vpc + # vpc_id = var.vpc_id + # subnet_id = var.subnet_id + # create_security_group = var.create_security_group + instance_count = 1 + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + # instance_security_group_id = var.instance_security_group_id + ssh_username = var.ssh_username + user_data = module.k3s_first.k3s_server_user_data } module "k3s_additional" { @@ -40,82 +52,107 @@ module "k3s_additional" { } module "k3s_additional_servers" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = var.server_instance_count - 1 - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = false - ssh_key_pair_name = module.k3s_first_server.ssh_key_pair_name - ssh_key_pair_path = pathexpand(module.k3s_first_server.ssh_key_path) - ssh_username = var.ssh_username - spot_instances = var.spot_instances - tag_begin = 2 - aws_region = var.aws_region - create_security_group = false - instance_security_group = module.k3s_first_server.sg-id - subnet_id = var.subnet_id - user_data = module.k3s_additional.k3s_server_user_data + source = "../../../../modules/infra/aws/ec2" + prefix = "${var.prefix}-additional-server" + aws_region = var.aws_region + create_ssh_key_pair = local.create_ssh_key_pair + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path + create_vpc = local.create_vpc + vpc_id = local.vpc_id + subnet_id = local.subnet_id + create_security_group = local.create_security_group + instance_count = var.server_nodes_count - 1 + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + instance_security_group_id = local.instance_security_group_id + ssh_username = var.ssh_username + user_data = module.k3s_additional.k3s_server_user_data } - -module "k3s_workers" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = var.worker_instance_count - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = false - ssh_key_pair_name = module.k3s_first_server.ssh_key_pair_name - ssh_key_pair_path = pathexpand(module.k3s_first_server.ssh_key_path) - ssh_username = var.ssh_username - spot_instances = var.spot_instances - aws_region = var.aws_region - create_security_group = false - instance_security_group = module.k3s_first_server.sg-id - subnet_id = var.subnet_id - user_data = module.k3s_additional.k3s_worker_user_data +module "k3s_additional_workers" { + source = "../../../../modules/infra/aws/ec2" + prefix = "${var.prefix}-worker" + aws_region = var.aws_region + create_ssh_key_pair = local.create_ssh_key_pair + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path + create_vpc = local.create_vpc + vpc_id = local.vpc_id + subnet_id = local.subnet_id + create_security_group = local.create_security_group + instance_count = var.worker_nodes_count + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + instance_security_group_id = local.instance_security_group_id + ssh_username = var.ssh_username + user_data = module.k3s_additional.k3s_worker_user_data } - data "local_file" "ssh_private_key" { - depends_on = [module.k3s_first_server] - filename = pathexpand(module.k3s_first_server.ssh_key_path) + depends_on = [module.k3s_additional_workers] + + filename = local.local_ssh_private_key_path } resource "ssh_resource" "retrieve_kubeconfig" { + depends_on = [data.local_file.ssh_private_key] + host = module.k3s_first_server.instances_public_ip[0] commands = [ "sudo sed 's/127.0.0.1/${module.k3s_first_server.instances_public_ip[0]}/g' /etc/rancher/k3s/k3s.yaml" ] user = var.ssh_username private_key = data.local_file.ssh_private_key.content + retry_delay = "60s" } resource "local_file" "kube_config_yaml" { + depends_on = [ssh_resource.retrieve_kubeconfig] + filename = local.kc_file - content = ssh_resource.retrieve_kubeconfig.result file_permission = "0600" + content = ssh_resource.retrieve_kubeconfig.result } -resource "local_file" "kube_config_yaml_backup" { - filename = local.kc_file_backup - content = ssh_resource.retrieve_kubeconfig.result - file_permission = "0600" +provider "kubernetes" { + config_path = local_file.kube_config_yaml.filename +} + +provider "helm" { + kubernetes { + config_path = local_file.kube_config_yaml.filename + } +} + +resource "null_resource" "wait_k8s_services_startup" { + depends_on = [local_file.kube_config_yaml] + + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } } locals { - rancher_hostname = join(".", ["rancher", module.k3s_first_server.instances_public_ip[0], "sslip.io"]) + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.k3s_first_server.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.k3s_first_server.instances_public_ip[0], "sslip.io"]) } module "rancher_install" { source = "../../../../modules/rancher" - dependency = var.server_instance_count > 1 ? module.k3s_additional_servers.dependency : module.k3s_first_server.dependency + dependency = [null_resource.wait_k8s_services_startup] kubeconfig_file = local_file.kube_config_yaml.filename rancher_hostname = local.rancher_hostname - rancher_replicas = min(var.rancher_replicas, var.server_instance_count) - rancher_bootstrap_password = var.rancher_bootstrap_password + rancher_bootstrap_password = var.rancher_password rancher_password = var.rancher_password + bootstrap_rancher = var.bootstrap_rancher rancher_version = var.rancher_version - wait = var.wait + rancher_additional_helm_values = [ + "replicas: ${var.worker_nodes_count}", + "ingress.ingressClassName: ${var.rancher_ingress_class_name}", + "service.type: ${var.rancher_service_type}" + ] } diff --git a/recipes/upstream/aws/k3s/outputs.tf b/recipes/upstream/aws/k3s/outputs.tf index 5dd2766a..defc2d99 100644 --- a/recipes/upstream/aws/k3s/outputs.tf +++ b/recipes/upstream/aws/k3s/outputs.tf @@ -1,25 +1,9 @@ -output "instances_public_ip" { - value = concat([module.k3s_first_server.instances_public_ip], [module.k3s_additional_servers.instances_public_ip]) -} - -output "instances_private_ip" { - value = concat([module.k3s_first_server.instances_private_ip], [module.k3s_additional_servers.instances_private_ip]) -} - -output "rancher_hostname" { - value = local.rancher_hostname -} - output "rancher_url" { - value = "https://${local.rancher_hostname}" -} - -output "rancher_bootstrap_password" { - value = var.rancher_bootstrap_password + description = "Rancher URL" + value = "https://${module.rancher_install.rancher_hostname}" } -output "rancher_admin_token" { - description = "Rancher API token for the admin user" - value = module.rancher_install.rancher_admin_token - sensitive = true +output "rancher_password" { + description = "Rancher Initial Custom Password" + value = var.rancher_password } diff --git a/recipes/upstream/aws/k3s/provider.tf b/recipes/upstream/aws/k3s/provider.tf index 6997a762..8e915083 100644 --- a/recipes/upstream/aws/k3s/provider.tf +++ b/recipes/upstream/aws/k3s/provider.tf @@ -1,8 +1,36 @@ terraform { required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + ssh = { source = "loafoe/ssh" version = "2.6.0" } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } } -} \ No newline at end of file + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/recipes/upstream/aws/k3s/terraform.tfvars.example b/recipes/upstream/aws/k3s/terraform.tfvars.example index c73ad2a8..9fd47570 100644 --- a/recipes/upstream/aws/k3s/terraform.tfvars.example +++ b/recipes/upstream/aws/k3s/terraform.tfvars.example @@ -1,53 +1,108 @@ ###### !! Required variables !! ###### -## -- Terraform will use the default ~/.aws/credentials file or environment variables to determine the access/secret keys. Uncomment the below only if necessary. -# aws_access_key = "ACCESS_KEY_HERE" -# aws_secret_key = "SECRET_KEY_HERE" +## -- The prefix used in front of all AWS resources +prefix = "" -## -- AWS region to create the resources, uncomment one or adjust as needed -# aws_region = "us-east-1" # US, Virginia -# aws_region = "us-west-2" # US, Oregon -# aws_region = "eu-west-1" # EU, Ireland -# aws_region = "eu-west-1" # EU, Frankfurt -# aws_region = "ap-southeast-2" # AU, Sydney -# aws_region = "ap-south-1" # IN, Mumbai +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = -## -- Set the prefix for the name tag on instancrease created. A default prefix (rancher-terraform) if not provided. -prefix = "my-name-here" +## -- AWS Region to create the resources +aws_region = "" -###### !! Optional variables !! ###### +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html -## -- Password to set when installing Rancher, otherwise use default (initial-admin-password) -# rancher_password = "at-least-12-characters" +## -- Specify if a new SSH key pair needs to be created for the instances +# create_ssh_key_pair = true -## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository -# rancher_version = "2.7.3" +## -- If you want to use an existing key pair, specify its name +# ssh_key_pair_name = null + +#Ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html + +## -- The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_private_key_path = null + +## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_public_key_path = null + +## - Specify whether VPC / Subnet should be created for the instances +# create_vpc = null + +## -- Range of private IPs available for the AWS VPC +# vpc_ip_cidr_range = "10.0.0.0/16" -## -- Override the default k8s version or channel used by K3S -# k3s_version = "v1.24.14+k3s1" -k3s_channel = "v1.25" +## -- AWS VPC used for all resources +# vpc_id = null -## -- Number and type of EC2 instances to launch -server_instance_count = 1 -worker_instance_count = 1 +## -- AWS Subnet used for all resources +# subnet_id = null + +## -- AWS Security Group used for all resources +# create_security_group = null + +#Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html + +## -- The number of Server nodes +server_nodes_count = 1 + +## -- The number of Worker nodes +worker_nodes_count = 1 + +## -- The name of a AWS EC2 machine type # instance_type = "t3.medium" ## -- Use spot instances # spot_instances = false +## -- Size of the disk attached to each node, specified in GB +# instance_disk_size = 80 + +## -- If the Security Group was created manually, it can be entered here +# instance_security_group_id = null + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" + +## -- Script that will run when the VMs start +# user_data = "" + +## -- Waiting time (in seconds) +# waiting_time = 180 + +## -- K3S version +# k3s_version = "v1.28.3+k3sr2" + +## -- K3s channel +# k3s_channel = + ## -- K3S token, override the programmatically generated token # k3s_token = "string here" -##### SSH -## -- (A) Create a new keypair in AWS -create_ssh_key_pair = true -## -- Override the default (./${prefix}_ssh_private_key.pem) path where this SSH key is written -# ssh_private_key_path = "/path/to/private/key.pem" +## -- K3S custom config file +# k3s_config = "" + +## -- K3S KUBECONFIG file path +# kube_config_path = "" + +## -- K3S KUBECONFIG file +# kube_config_filename = "" + +## -- Bootstrap the Rancher installation +# bootstrap_rancher = false + +## -- Hostname to set when installing Rancher +rancher_hostname = "rancher" + +## -- Password to set when installing Rancher +rancher_password = "at-least-12-characters" + +## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository +# rancher_version = "2.7.3" -## -- (B) Provide an existing keypair name in AWS to use for nodes, the matching private key file for this keypair also must be provided so RKE can SSH to the launched nodes -# ssh_key_pair_name = "aws_keypair_name" -# ssh_key_pair_path = "/path/to/private/key.pem" -##### +## -- Rancher ingressClassName value +# rancher_ingress_class_name = "nginx" -## -- Override the default (${prefix}_kube_config.yml) kubeconfig file/path -# kube_config_path = "~/.kube/rancher-terraform.yml" +## -- Rancher serviceType value +# rancher_service_type = "ClusterIP" diff --git a/recipes/upstream/aws/k3s/variables.tf b/recipes/upstream/aws/k3s/variables.tf index 1c13e035..d8c11e83 100644 --- a/recipes/upstream/aws/k3s/variables.tf +++ b/recipes/upstream/aws/k3s/variables.tf @@ -1,14 +1,10 @@ -variable "aws_access_key" { - type = string - description = "AWS access key used to create infrastructure" - default = null -} +variable "prefix" {} -variable "aws_secret_key" { - type = string - description = "AWS secret key used to create AWS infrastructure" - default = null -} +# variable "aws_access_key" {} + +# variable "aws_secret_key" {} + +# variable "aws_session_token" {} variable "aws_region" { type = string @@ -51,40 +47,82 @@ variable "aws_region" { } } -variable "prefix" { - type = string - description = "Prefix added to names of all resources" - default = null +variable "create_ssh_key_pair" { + default = null } -variable "server_instance_count" { - type = number - description = "Number of server EC2 instances to create" - default = null +variable "ssh_key_pair_name" { + default = null } -variable "worker_instance_count" { - type = number - description = "Number of worker EC2 instances to create" - default = null +variable "ssh_private_key_path" { + default = null } -variable "instance_type" { - type = string - description = "Instance type used for all EC2 instances" - default = null +variable "ssh_public_key_path" { + default = null } -variable "instance_disk_size" { - type = string - description = "Specify root disk size (GB)" +variable "create_vpc" { + default = null +} + +# variable "vpc_ip_cidr_range" {} + +variable "vpc_id" { + default = null +} + +variable "subnet_id" { + default = null +} + +variable "create_security_group" { + default = null +} + +variable "server_nodes_count" { + description = "The number of Server nodes" + default = 3 + + validation { + condition = contains([ + 1, + 3, + 5, + ], var.server_nodes_count) + error_message = "Invalid number of Server nodes specified! The value must be 1, 3 or 5 (ETCD quorum)." + } +} + +variable "worker_nodes_count" {} + +# variable "instance_type" {} + +# variable "spot_instances" {} + +# variable "instance_disk_size" {} + +variable "instance_security_group_id" { + default = null +} + +variable "ssh_username" {} + +variable "user_data" { + description = "User data content for EC2 instance(s)" default = null } +variable "waiting_time" { + description = "Waiting time (in seconds)" + default = 180 +} + variable "k3s_version" { type = string - description = "Kubernetes version to use for the k3s cluster" - default = null + description = "Kubernetes version to use for the K3S cluster" + default = "v1.28.9+k3s1" #Version compatible with Rancher v2.8.3 } variable "k3s_channel" { @@ -94,12 +132,12 @@ variable "k3s_channel" { } variable "k3s_token" { - description = "Token to use when configuring k3s nodes" + description = "Token to use when configuring K3S nodes" default = null } variable "k3s_config" { - description = "Additional k3s configuration to add to the config.yaml file" + description = "Additional K3S configuration to add to the config.yaml file" default = null } @@ -115,85 +153,35 @@ variable "kube_config_filename" { default = null } -variable "rancher_bootstrap_password" { - description = "Password to use for bootstrapping Rancher (min 12 characters)" - default = "initial-admin-password" - type = string +variable "bootstrap_rancher" { + description = "Bootstrap the Rancher installation" + type = bool + default = true } +variable "rancher_hostname" {} + variable "rancher_password" { - description = "Password to use for Rancher (min 12 characters)" - default = null - type = string + type = string validation { condition = length(var.rancher_password) >= 12 - error_message = "The password provided for Rancher (rancher_password) must be at least 12 characters" + error_message = "The password must be at least 12 characters." } } variable "rancher_version" { description = "Rancher version to install" - default = null - type = string -} - -variable "rancher_replicas" { - description = "Value for replicas when installing the Rancher helm chart" - default = 3 - type = number -} - -variable "create_ssh_key_pair" { - type = bool - description = "Specify if a new SSH key pair needs to be created for the instances" - default = null -} - -variable "ssh_key_pair_name" { type = string - description = "Specify the SSH key name to use (that's already present in AWS)" default = null } -variable "ssh_key_pair_path" { - type = string - description = "Path to the SSH private key used as the key pair (that's already present in AWS)" - default = null -} - -variable "ssh_username" { - type = string - description = "Username used for SSH with sudo access" - default = "ubuntu" -} - -variable "spot_instances" { - type = bool - description = "Use spot instances" - default = null -} - -variable "subnet_id" { - type = string - description = "VPC Subnet ID to create the instance(s) in" - default = null -} - -variable "create_security_group" { - type = bool - description = "Should create the security group associated with the instance(s)" - default = null -} - -# TODO: Add a check based on above value -variable "instance_security_group" { - type = string - description = "Provide a pre-existing security group ID" - default = null +variable "rancher_ingress_class_name" { + description = "Rancher ingressClassName value" + default = "traefik" } -variable "wait" { - description = "An optional wait before installing the Rancher helm chart" - default = "20s" +variable "rancher_service_type" { + description = "Rancher serviceType value" + default = "ClusterIP" } diff --git a/recipes/upstream/aws/rke/README.md b/recipes/upstream/aws/rke/README.md index ae7ac039..aec12520 100644 --- a/recipes/upstream/aws/rke/README.md +++ b/recipes/upstream/aws/rke/README.md @@ -1,6 +1,6 @@ -# Upstream | AWS | RKE +# Upstream | AWS | EC2 x RKE -This module is used to establish a Rancher (local) management cluster using AWS and RKE. +This module is used to establish a Rancher (local) management cluster using [AWS EC2](https://aws.amazon.com/ec2/) and [RKE](https://rke.docs.rancher.com/). Documentation can be found [here](./docs.md). @@ -11,90 +11,29 @@ git clone https://github.com/rancherlabs/tf-rancher-up.git cd recipes/upstream/aws/rke ``` -- Copy `terraform.tfvars.example` to `terraform.tfvars` -- Edit `terraform.tfvars` +- Copy `./terraform.tfvars.exmaple` to `./terraform.tfvars` +- Edit `./terraform.tfvars` - Update the required variables: - - `aws_region` to suit your region - `prefix` to give the resources an identifiable name (eg, your initials or first name) - - Recommended: `spot_instances` can be set to `true` to use spot instances -- Check your AWS credentials are configured in `~/.aws/credentials`, terraform will use these by default. Refer the [`aws configure`](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html#cli-configure-files-methods) command on how to do this. -- If you don't want to configure AWS credentials using `aws configure` in above step, uncomment `aws_access_key` and `aws_secret_key` in `terraform.tfvars` and input the required keys there. -- If an HA cluster need to be deployed, change the `instance_count` variable to 3 or more. -- There are more optional variables which can be tweaked under `terraform.tfvars`. - -**NOTE** you may need to use ` terraform init -upgrade` to upgrade provider versions - -Execute the below commands to start deployment. - -```bash -terraform init -terraform plan -terraform apply -``` - -The login details will be displayed in the screen once the deployment is successful. It will have the details as below. - -```bash -rancher_hostname = "https://rancher..sslip.io" -rancher_password = "initial-admin-password" -``` + - `aws_region` to suit your region + - `instance_count` to specify the number of instances to create + - `ssh_username` to specify the user used to create the VMs (default "ubuntu") + - `rancher_hostname` in order to reach the Rancher console via DNS name + - `rancher_password` to configure the initial Admin password (the password must be at least 12 characters) +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../modules/infra/aws/README.md). -- If storing multiple AWS credentials in `~/.aws/credentials`, set the profile when running terraform. +**NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** ```bash -AWS_PROFILE= terraform plan -AWS_PROFILE= terraform apply +terraform init -upgrade && terraform apply -auto-approve ``` -- Destroy the resources when cluster is no more needed. +- Destroy the resources when finished ```bash -terraform destroy +terraform destroy -auto-approve ``` -**IMPORTANT**: Please retire the services which are deployed using these terraform modules within 48 hours. Soon there will be automation to retire the service automatically after 48 hours but till that is in place it will be the users responsibility to not keep it running more than 48 hours. - -### Advanced - -Target a specific resource/module to action the changes only for that resource/module - -For example, target only the `rke_cluster` resource to re-run the equivalent of `rke up` - -```bash -terraform apply -target module.rke.rke_cluster.this -target module.rke.local_file.kube_config_yaml -``` - -This also updates the kube_config generated by RKE. - -### Notes - -A log file for the RKE provisioning is written to `rke.log` - See full argument list for each module in use: - - [AWS](../../../../modules/infra/aws) - - [RKE](../../../../modules/distribution/rke) - - [Rancher](../../../../modules/rancher) - -### Known Issues -- Terraform plan shows below warnings which can be ignored: - -```bash -Warning: Value for undeclared variable - -The root module does not declare a variable named "ssh_private_key_path" but a value was found in file "terraform.tfvars". If you meant to use this value, add a "variable" block to the configuration. - -Invalid attribute in provider configuration - -with module.rancher_install.provider["registry.terraform.io/hashicorp/kubernetes"], -on ../../../../modules/rancher/provider.tf line 7, in provider "kubernetes": -7: provider "kubernetes" { -``` -- Terraform apply shows below warnings and errors. Please rerun the terraform apply again and it will be successful [(Issue #22)](#22). - -```bash -Warning: - -Helm release "rancher" was created but has a failed status. Use the `helm` command to investigate the error, correct it, then run Terraform again. - -Error: 1 error occurred: -* Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": failed to call webhook: Post "https://rke2-ingress-nginx-controller-admission.kube-system.svc:443/networking/v1/ingresses?timeout=10s": no endpoints available for service "rke2-ingress-nginx-controller-admission" -``` + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - RKE: https://github.com/rancher/tf-rancher-up/tree/main/modules/distribution/rke + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/recipes/upstream/aws/rke/docs.md b/recipes/upstream/aws/rke/docs.md index 15a21777..63db4c1f 100644 --- a/recipes/upstream/aws/rke/docs.md +++ b/recipes/upstream/aws/rke/docs.md @@ -1,6 +1,12 @@ ## Requirements -No requirements. +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | +| [ssh](#requirement\_ssh) | 2.6.0 | ## Providers @@ -11,7 +17,7 @@ No providers. | Name | Source | Version | |------|--------|---------| | [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | -| [rke](#module\_rke) | ../../../../recipes/standalone/aws/rke | n/a | +| [rke\_cluster](#module\_rke\_cluster) | ../../../../recipes/standalone/aws/rke | n/a | ## Resources @@ -24,35 +30,43 @@ No resources. | [aws\_access\_key](#input\_aws\_access\_key) | AWS access key used to create infrastructure | `string` | `null` | no | | [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | | [aws\_secret\_key](#input\_aws\_secret\_key) | AWS secret key used to create AWS infrastructure | `string` | `null` | no | +| [bastion\_host](#input\_bastion\_host) | Bastion host configuration to access the instances |
object({
address = string
user = string
ssh_key = string
ssh_key_path = string
})
| `null` | no | +| [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | | [cert\_manager\_helm\_repository](#input\_cert\_manager\_helm\_repository) | Helm repository for Cert Manager chart | `string` | `null` | no | | [cert\_manager\_helm\_repository\_password](#input\_cert\_manager\_helm\_repository\_password) | Private Cert Manager helm repository password | `string` | `null` | no | | [cert\_manager\_helm\_repository\_username](#input\_cert\_manager\_helm\_repository\_username) | Private Cert Manager helm repository username | `string` | `null` | no | -| [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `null` | no | -| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `null` | no | -| [dependency](#input\_dependency) | An optional variable to add a dependency from another resource (not used) | `any` | `null` | no | +| [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `true` | no | +| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `true` | no | +| [create\_vpc](#input\_create\_vpc) | Specify whether VPC / Subnet should be created for the instances | `bool` | `true` | no | | [docker\_version](#input\_docker\_version) | Docker version to install on nodes | `string` | `"20.10"` | no | -| [install\_docker](#input\_install\_docker) | Should install docker while creating the instance | `bool` | `true` | no | -| [instance\_count](#input\_instance\_count) | Number of EC2 instances to create | `number` | `null` | no | -| [instance\_disk\_size](#input\_instance\_disk\_size) | Specify root disk size (GB) | `string` | `null` | no | -| [instance\_security\_group](#input\_instance\_security\_group) | Provide a pre-existing security group ID | `string` | `null` | no | -| [instance\_type](#input\_instance\_type) | Instance type used for all EC2 instances | `string` | `null` | no | +| [iam\_instance\_profile](#input\_iam\_instance\_profile) | Specify IAM Instance Profile to assign to the instances/nodes | `string` | `null` | no | +| [ingress\_provider](#input\_ingress\_provider) | Ingress controller provider | `string` | `"nginx"` | no | +| [install\_docker](#input\_install\_docker) | Install Docker while creating the instances | `bool` | `true` | no | +| [instance\_count](#input\_instance\_count) | Number of EC2 instances to create | `number` | `3` | no | +| [instance\_disk\_size](#input\_instance\_disk\_size) | Specify root disk size (GB) | `string` | `"80"` | no | +| [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | Provide a pre-existing security group ID | `string` | `null` | no | +| [instance\_type](#input\_instance\_type) | Instance type used for all EC2 instances | `string` | `"t3.medium"` | no | | [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | | [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | | [kubernetes\_version](#input\_kubernetes\_version) | Kubernetes version to use for the RKE cluster | `string` | `null` | no | | [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | `null` | no | -| [rancher\_bootstrap\_password](#input\_rancher\_bootstrap\_password) | Password to use for bootstrapping Rancher (min 12 characters) | `string` | `"initial-admin-password"` | no | | [rancher\_helm\_repository](#input\_rancher\_helm\_repository) | Helm repository for Rancher chart | `string` | `null` | no | | [rancher\_helm\_repository\_password](#input\_rancher\_helm\_repository\_password) | Private Rancher helm repository password | `string` | `null` | no | | [rancher\_helm\_repository\_username](#input\_rancher\_helm\_repository\_username) | Private Rancher helm repository username | `string` | `null` | no | -| [rancher\_password](#input\_rancher\_password) | Password to use for Rancher (min 12 characters) | `string` | `null` | no | -| [rancher\_replicas](#input\_rancher\_replicas) | Value for replicas when installing the Rancher helm chart | `number` | `3` | no | +| [rancher\_hostname](#input\_rancher\_hostname) | Hostname to set when installing Rancher | `string` | `null` | no | +| [rancher\_password](#input\_rancher\_password) | n/a | `string` | n/a | yes | | [rancher\_version](#input\_rancher\_version) | Rancher version to install | `string` | `null` | no | -| [spot\_instances](#input\_spot\_instances) | Use spot instances | `bool` | `null` | no | -| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | Specify the SSH key name to use (that's already present in AWS) | `string` | `null` | no | -| [ssh\_key\_pair\_path](#input\_ssh\_key\_pair\_path) | Path to the SSH private key used as the key pair (that's already present in AWS) | `string` | `null` | no | +| [spot\_instances](#input\_spot\_instances) | Use spot instances | `bool` | `false` | no | +| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | If you want to use an existing key pair, specify its name | `string` | `null` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform) | `string` | `null` | no | +| [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform) | `any` | `null` | no | | [ssh\_username](#input\_ssh\_username) | Username used for SSH with sudo access | `string` | `"ubuntu"` | no | | [subnet\_id](#input\_subnet\_id) | VPC Subnet ID to create the instance(s) in | `string` | `null` | no | -| [wait](#input\_wait) | An optional wait before installing the Rancher helm chart | `string` | `"20s"` | no | +| [tag\_begin](#input\_tag\_begin) | When module is being called more than once, begin tagging from this number | `number` | `1` | no | +| [tags](#input\_tags) | User-provided tags for the resources | `map(string)` | `{}` | no | +| [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | +| [vpc\_id](#input\_vpc\_id) | VPC ID to create the instance(s) in | `string` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | Waiting time (in seconds) | `number` | `120` | no | ## Outputs @@ -60,7 +74,5 @@ No resources. |------|-------------| | [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | | [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | -| [rancher\_admin\_token](#output\_rancher\_admin\_token) | Rancher API token for the admin user | -| [rancher\_bootstrap\_password](#output\_rancher\_bootstrap\_password) | n/a | -| [rancher\_hostname](#output\_rancher\_hostname) | n/a | -| [rancher\_url](#output\_rancher\_url) | n/a | +| [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | +| [rancher\_url](#output\_rancher\_url) | Rancher URL | diff --git a/recipes/upstream/aws/rke/main.tf b/recipes/upstream/aws/rke/main.tf index 5def4ea9..f0df9804 100644 --- a/recipes/upstream/aws/rke/main.tf +++ b/recipes/upstream/aws/rke/main.tf @@ -1,52 +1,52 @@ -module "rke" { - source = "../../../../recipes/standalone/aws/rke" - - aws_access_key = var.aws_access_key - aws_secret_key = var.aws_secret_key - aws_region = var.aws_region - - dependency = var.dependency - prefix = var.prefix - instance_count = var.instance_count - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - spot_instances = var.spot_instances - install_docker = var.install_docker - docker_version = var.docker_version - - subnet_id = var.subnet_id - create_ssh_key_pair = var.create_ssh_key_pair - create_security_group = var.create_security_group - instance_security_group = var.instance_security_group - - ssh_username = var.ssh_username - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - +module "rke_cluster" { + source = "../../../../recipes/standalone/aws/rke" + prefix = var.prefix + aws_region = var.aws_region + # create_ssh_key_pair = var.create_ssh_key_pair + # ssh_key_pair_name = var.ssh_key_pair_name + # ssh_private_key_path = var.ssh_private_key_path + # ssh_public_key_path = var.ssh_public_key_path + # create_vpc = var.create_vpc + # vpc_id = var.vpc_id + # subnet_id = var.subnet_id + # create_security_group = var.create_security_group + instance_count = var.instance_count + #instance_type = var.instance_type + #spot_instances = var.spot_instances + #instance_disk_size = var.instance_disk_size + # instance_security_group_id = var.instance_security_group_id + ssh_username = var.ssh_username + install_docker = var.install_docker + docker_version = var.docker_version + # bastion_host = var.bastion_host + # iam_instance_profile = var.iam_instance_profile + # tags = var.tags + # kubernetes_version = var.kubernetes_version kube_config_path = var.kube_config_path kube_config_filename = var.kube_config_filename - kubernetes_version = var.kubernetes_version - } locals { - rancher_hostname = join(".", ["rancher", module.rke.instances_public_ip[0], "sslip.io"]) + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.rke_cluster.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.rke_cluster.instances_public_ip[0], "sslip.io"]) + } module "rancher_install" { source = "../../../../modules/rancher" - dependency = module.rke.dependency - kubeconfig_file = module.rke.kubeconfig_filename + dependency = [module.rke_cluster] + kubeconfig_file = module.rke_cluster.kube_config_path rancher_hostname = local.rancher_hostname - rancher_replicas = min(var.rancher_replicas, var.instance_count) - rancher_bootstrap_password = var.rancher_bootstrap_password + rancher_bootstrap_password = var.rancher_password rancher_password = var.rancher_password + bootstrap_rancher = var.bootstrap_rancher rancher_version = var.rancher_version - wait = var.wait rancher_helm_repository = var.rancher_helm_repository rancher_helm_repository_username = var.rancher_helm_repository_username rancher_helm_repository_password = var.rancher_helm_repository_password cert_manager_helm_repository = var.cert_manager_helm_repository cert_manager_helm_repository_username = var.cert_manager_helm_repository_username cert_manager_helm_repository_password = var.cert_manager_helm_repository_password + rancher_additional_helm_values = [ + "replicas: ${var.instance_count}" + ] } diff --git a/recipes/upstream/aws/rke/outputs.tf b/recipes/upstream/aws/rke/outputs.tf index c4b35f64..25550770 100644 --- a/recipes/upstream/aws/rke/outputs.tf +++ b/recipes/upstream/aws/rke/outputs.tf @@ -1,25 +1,17 @@ output "instances_public_ip" { - value = module.rke.instances_public_ip + value = module.rke_cluster.instances_public_ip } output "instances_private_ip" { - value = module.rke.instances_private_ip -} - -output "rancher_hostname" { - value = local.rancher_hostname + value = module.rke_cluster.instances_private_ip } output "rancher_url" { - value = "https://${local.rancher_hostname}" -} - -output "rancher_bootstrap_password" { - value = var.rancher_bootstrap_password + description = "Rancher URL" + value = "https://${module.rancher_install.rancher_hostname}" } -output "rancher_admin_token" { - description = "Rancher API token for the admin user" - value = module.rancher_install.rancher_admin_token - sensitive = true +output "rancher_password" { + description = "Rancher Initial Custom Password" + value = var.rancher_password } diff --git a/recipes/upstream/aws/rke/provider.tf b/recipes/upstream/aws/rke/provider.tf new file mode 100644 index 00000000..8e915083 --- /dev/null +++ b/recipes/upstream/aws/rke/provider.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/recipes/upstream/aws/rke/terraform.tfvars.example b/recipes/upstream/aws/rke/terraform.tfvars.example index c155c6ef..4a701c8f 100644 --- a/recipes/upstream/aws/rke/terraform.tfvars.example +++ b/recipes/upstream/aws/rke/terraform.tfvars.example @@ -1,48 +1,99 @@ ###### !! Required variables !! ###### -## -- Terraform will use the default ~/.aws/credentials file or environment variables to determine the access/secret keys. Uncomment the below only if necessary. -# aws_access_key = "ACCESS_KEY_HERE" -# aws_secret_key = "SECRET_KEY_HERE" +## -- The prefix used in front of all AWS resources +prefix = "" -## -- AWS region to create the resources, uncomment one or adjust as needed -# aws_region = "us-east-1" # US, Virginia -# aws_region = "us-west-2" # US, Oregon -# aws_region = "eu-west-1" # EU, Ireland -# aws_region = "eu-west-1" # EU, Frankfurt -# aws_region = "ap-southeast-2" # AU, Sydney -# aws_region = "ap-south-1" # IN, Mumbai +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = -## -- Set the prefix for the name tag on instancrease created. A default prefix (rancher-terraform) if not provided. -prefix = "my-name-here" +## -- AWS Region to create the resources +aws_region = "" -###### !! Optional variables !! ###### +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html -## -- Password to set when installing Rancher, otherwise use default (initial-admin-password) -# rancher_password = "at-least-12-characters" +## -- Specify if a new SSH key pair needs to be created for the instances +# create_ssh_key_pair = true -## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository -# rancher_version = "2.7.3" +## -- If you want to use an existing key pair, specify its name +# ssh_key_pair_name = null -## -- Override the default k8s version used by RKE -# kubernetes_version = "v1.24.10-rancher4-1" +#Ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html + +## -- The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_private_key_path = null + +## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_public_key_path = null + +## - Specify whether VPC / Subnet should be created for the instances +# create_vpc = null + +## -- Range of private IPs available for the AWS VPC +# vpc_ip_cidr_range = "10.0.0.0/16" -## -- Number and type of EC2 instances to launch -instance_count = 1 +## -- AWS VPC used for all resources +# vpc_id = null + +## -- AWS Subnet used for all resources +# subnet_id = null + +## -- AWS Security Group used for all resources +# create_security_group = null + +#Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html + +## -- The number of nodes +instance_count = 3 + +## -- The name of a AWS EC2 machine type # instance_type = "t3.medium" ## -- Use spot instances # spot_instances = false -##### SSH -## -- (A) Create a new keypair in AWS -create_ssh_key_pair = true -## -- Override the default (./${prefix}_ssh_private_key.pem) path where this SSH key is written -# ssh_private_key_path = "/path/to/private/key.pem" +## -- Size of the disk attached to each node, specified in GB +# instance_disk_size = 80 + +## -- If the Security Group was created manually, it can be entered here +# instance_security_group_id = null + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" -## -- (B) Provide an existing keypair name in AWS to use for nodes, the matching private key file for this keypair also must be provided so RKE can SSH to the launched nodes -# ssh_key_pair_name = "aws_keypair_name" -# ssh_key_pair_path = "/path/to/private/key.pem" -##### +## -- Script that will run when the VMs start +# user_data = "" -## -- Override the default (${prefix}_kube_config.yml) kubeconfig file/path -# kube_config_path = "~/.kube/rancher-terraform.yml" \ No newline at end of file +## -- Bastion host configuration to access the instances +# bastion_host = null + +## -- IAM Instance Profile to assign to the instances/nodes +# iam_instance_profile = null + +## -- User-provided tags for the resources +# tags = {} + +## -- Install Docker while creating the instance +# install_docker = true + +## -- Docker version to install on nodes +# docker_version = "20.10" + +## -- Waiting time (in seconds) +# waiting_time = 180 + +## -- Override the default k8s version used by RKE +# kubernetes_version = "v1.24.10-rancher4-1" + +## -- K8s Ingress Controller +# ingress_provider = nginx + +## -- Hostname to set when installing Rancher +# rancher_hostname = "rancher" + +## -- Password to set when installing Rancher +rancher_password = "at-least-12-characters" + +## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository +# rancher_version = "2.7.3" diff --git a/recipes/upstream/aws/rke/variables.tf b/recipes/upstream/aws/rke/variables.tf index a31f6f05..d471b68c 100644 --- a/recipes/upstream/aws/rke/variables.tf +++ b/recipes/upstream/aws/rke/variables.tf @@ -1,5 +1,6 @@ -variable "dependency" { - description = "An optional variable to add a dependency from another resource (not used)" +variable "prefix" { + type = string + description = "Prefix added to names of all resources" default = null } @@ -56,141 +57,194 @@ variable "aws_region" { } } -variable "prefix" { - type = string - description = "Prefix added to names of all resources" - default = null +variable "create_ssh_key_pair" { + type = bool + description = "Specify if a new SSH key pair needs to be created for the instances" + default = true } -variable "instance_count" { - type = number - description = "Number of EC2 instances to create" +variable "ssh_key_pair_name" { + type = string + description = "If you want to use an existing key pair, specify its name" default = null } -variable "instance_type" { +variable "ssh_private_key_path" { type = string - description = "Instance type used for all EC2 instances" + description = "The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform)" default = null } -variable "instance_disk_size" { - type = string - description = "Specify root disk size (GB)" +variable "ssh_public_key_path" { + description = "The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform)" default = null } -variable "kube_config_path" { - description = "The path to write the kubeconfig for the RKE cluster" - type = string - default = null +variable "create_vpc" { + type = bool + description = "Specify whether VPC / Subnet should be created for the instances" + default = true } -variable "kube_config_filename" { - description = "Filename to write the kube config" +variable "vpc_id" { type = string + description = "VPC ID to create the instance(s) in" default = null } -variable "kubernetes_version" { +variable "subnet_id" { type = string - description = "Kubernetes version to use for the RKE cluster" + description = "VPC Subnet ID to create the instance(s) in" default = null } -variable "install_docker" { +variable "create_security_group" { type = bool - description = "Should install docker while creating the instance" + description = "Should create the security group associated with the instance(s)" default = true + nullable = false } -variable "docker_version" { +variable "instance_count" { + type = number + description = "Number of EC2 instances to create" + default = 3 + nullable = false +} + +variable "instance_type" { type = string - description = "Docker version to install on nodes" - default = "20.10" + description = "Instance type used for all EC2 instances" + default = "t3.medium" + nullable = false +} + +variable "spot_instances" { + type = bool + description = "Use spot instances" + default = false + nullable = false } -variable "rancher_bootstrap_password" { - description = "Password to use for bootstrapping Rancher (min 12 characters)" - default = "initial-admin-password" +variable "instance_disk_size" { type = string + description = "Specify root disk size (GB)" + default = "80" + nullable = false } -variable "rancher_password" { - description = "Password to use for Rancher (min 12 characters)" +variable "instance_security_group_id" { + type = string + description = "Provide a pre-existing security group ID" default = null +} + +variable "ssh_username" { type = string + description = "Username used for SSH with sudo access" + default = "ubuntu" + nullable = false +} - validation { - condition = length(var.rancher_password) >= 12 - error_message = "The password provided for Rancher (rancher_password) must be at least 12 characters" - } +variable "user_data" { + description = "User data content for EC2 instance(s)" + default = null } -variable "rancher_version" { - description = "Rancher version to install" +variable "bastion_host" { + type = object({ + address = string + user = string + ssh_key = string + ssh_key_path = string + }) default = null + description = "Bastion host configuration to access the instances" +} + +variable "iam_instance_profile" { type = string + description = "Specify IAM Instance Profile to assign to the instances/nodes" + default = null } -variable "rancher_replicas" { - description = "Value for replicas when installing the Rancher helm chart" - default = 3 +variable "tag_begin" { type = number + description = "When module is being called more than once, begin tagging from this number" + default = 1 } -variable "create_ssh_key_pair" { +variable "tags" { + description = "User-provided tags for the resources" + type = map(string) + default = {} +} + +variable "install_docker" { type = bool - description = "Specify if a new SSH key pair needs to be created for the instances" - default = null + description = "Install Docker while creating the instances" + default = true } -variable "ssh_key_pair_name" { +variable "docker_version" { type = string - description = "Specify the SSH key name to use (that's already present in AWS)" - default = null + description = "Docker version to install on nodes" + default = "20.10" +} + +variable "waiting_time" { + description = "Waiting time (in seconds)" + default = 120 } -variable "ssh_key_pair_path" { +variable "kubernetes_version" { type = string - description = "Path to the SSH private key used as the key pair (that's already present in AWS)" + description = "Kubernetes version to use for the RKE cluster" default = null } -variable "ssh_username" { - type = string - description = "Username used for SSH with sudo access" - default = "ubuntu" +variable "ingress_provider" { + description = "Ingress controller provider" + default = "nginx" } -variable "spot_instances" { +variable "bootstrap_rancher" { + description = "Bootstrap the Rancher installation" type = bool - description = "Use spot instances" - default = null + default = true } -variable "subnet_id" { +variable "kube_config_path" { + description = "The path to write the kubeconfig for the RKE cluster" type = string - description = "VPC Subnet ID to create the instance(s) in" default = null } -variable "create_security_group" { - type = bool - description = "Should create the security group associated with the instance(s)" +variable "kube_config_filename" { + description = "Filename to write the kube config" + type = string default = null } -# TODO: Add a check based on above value -variable "instance_security_group" { +variable "rancher_hostname" { + description = "Hostname to set when installing Rancher" type = string - description = "Provide a pre-existing security group ID" default = null } -variable "wait" { - description = "An optional wait before installing the Rancher helm chart" - default = "20s" +variable "rancher_password" { + type = string + + validation { + condition = length(var.rancher_password) >= 12 + error_message = "The password must be at least 12 characters." + } +} + +variable "rancher_version" { + description = "Rancher version to install" + type = string + default = null } variable "rancher_helm_repository" { diff --git a/recipes/upstream/aws/rke2/README.md b/recipes/upstream/aws/rke2/README.md index 4dd089e5..ccfe949e 100644 --- a/recipes/upstream/aws/rke2/README.md +++ b/recipes/upstream/aws/rke2/README.md @@ -1,6 +1,6 @@ -# Upstream | AWS | RKE2 +# Upstream | AWS | EC2 x RKE2 -This module is used to establish a Rancher (local) management cluster using AWS and RKE2. +This module is used to establish a Rancher (local) management cluster using [AWS EC2](https://aws.amazon.com/ec2/) and [RKE2](https://docs.rke2.io/). Documentation can be found [here](./docs.md). @@ -11,76 +11,29 @@ git clone https://github.com/rancherlabs/tf-rancher-up.git cd recipes/upstream/aws/rke2 ``` -- Copy `terraform.tfvars.example` to `terraform.tfvars` -- Edit `terraform.tfvars` +- Copy `./terraform.tfvars.exmaple` to `./terraform.tfvars` +- Edit `./terraform.tfvars` - Update the required variables: - - `aws_region` to suit your region - `prefix` to give the resources an identifiable name (eg, your initials or first name) - - Recommended: `spot_instances` can be set to `true` to use spot instances -- Check your AWS credentials are configured in `~/.aws/credentials`, terraform will use these by default. Refer the [`aws configure`](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html#cli-configure-files-methods) command on how to do this. -- If you don't want to configure AWS credentials using `aws configure` in above step, uncomment `aws_access_key` and `aws_secret_key` in `terraform.tfvars` and input the required keys there. -- If an HA cluster need to be deployed, change the `instance_count` variable to 3 or more. -- There are more optional variables which can be tweaked under `terraform.tfvars`. - -**NOTE** you may need to use ` terraform init -upgrade` to upgrade provider versions - -Execute the below commands to start deployment. - -```bash -terraform init -terraform plan -terraform apply -``` -The login details will be displayed in the screen once the deployment is successful. It will have the details as below. - -```bash -rancher_hostname = "https://rancher..sslip.io" -rancher_password = "initial-admin-password" -``` + - `aws_region` to suit your region + - `instance_count` to specify the number of instances to create + - `ssh_username` to specify the user used to create the VMs (default "ubuntu") + - `rancher_hostname` in order to reach the Rancher console via DNS name + - `rancher_password` to configure the initial Admin password (the password must be at least 12 characters) +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../modules/infra/aws/README.md). -- If storing multiple AWS credentials in `~/.aws/credentials`, set the profile when running terraform. +**NB: If you want to use all the configurable variables in the `terraform.tfvars` file, you will need to uncomment them there and in the `variables.tf` and `main.tf` files.** ```bash -AWS_PROFILE= terraform plan -AWS_PROFILE= terraform apply +terraform init -upgrade && terraform apply -auto-approve ``` -- Destroy the resources when cluster is no more needed. +- Destroy the resources when finished ```bash -terraform destroy +terraform destroy -auto-approve ``` -**IMPORTANT**: Please retire the services which are deployed using these terraform modules within 48 hours. Soon there will be automation to retire the service automatically after 48 hours but till that is in place it will be the users responsibility to not keep it running more than 48 hours. - -### Notes - -The user data automatically sets up each node for use with kubectl (also alias to k) and crictl when logged in. See full argument list for each module in use: - - [AWS](../../../../modules/infra/aws) - - [RKE2](../../../../modules/distribution/rke2) - - [Rancher](../../../../modules/rancher) - -### Known Issues -- Terraform plan shows below warnings which can be ignored: - -```bash -Warning: Value for undeclared variable - -The root module does not declare a variable named "ssh_private_key_path" but a value was found in file "terraform.tfvars". If you meant to use this value, add a "variable" block to the configuration. - -Invalid attribute in provider configuration - -with module.rancher_install.provider["registry.terraform.io/hashicorp/kubernetes"], -on ../../../../modules/rancher/provider.tf line 7, in provider "kubernetes": -7: provider "kubernetes" { -``` -- Terraform apply shows below warnings and errors. Please rerun terraform apply again, and it will be successful[(Issue #22)](#22). - -```bash -Warning: - -Helm release "rancher" was created but has a failed status. Use the `helm` command to investigate the error, correct it, then run Terraform again. - -Error: 1 error occurred: -* Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": failed to call webhook: Post "https://rke2-ingress-nginx-controller-admission.kube-system.svc:443/networking/v1/ingresses?timeout=10s": no endpoints available for service "rke2-ingress-nginx-controller-admission" -``` + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - RKE2: https://github.com/rancher/tf-rancher-up/tree/main/modules/distribution/rke2 + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/recipes/upstream/aws/rke2/docs.md b/recipes/upstream/aws/rke2/docs.md index 36b24c76..42af12a9 100644 --- a/recipes/upstream/aws/rke2/docs.md +++ b/recipes/upstream/aws/rke2/docs.md @@ -2,13 +2,18 @@ | Name | Version | |------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | | [ssh](#requirement\_ssh) | 2.6.0 | ## Providers | Name | Version | |------|---------| -| [local](#provider\_local) | n/a | +| [local](#provider\_local) | 2.5.1 | +| [null](#provider\_null) | 3.2.2 | | [ssh](#provider\_ssh) | 2.6.0 | ## Modules @@ -17,16 +22,16 @@ |------|--------|---------| | [rancher\_install](#module\_rancher\_install) | ../../../../modules/rancher | n/a | | [rke2\_additional](#module\_rke2\_additional) | ../../../../modules/distribution/rke2 | n/a | -| [rke2\_additional\_servers](#module\_rke2\_additional\_servers) | ../../../../modules/infra/aws | n/a | +| [rke2\_additional\_servers](#module\_rke2\_additional\_servers) | ../../../../modules/infra/aws/ec2 | n/a | | [rke2\_first](#module\_rke2\_first) | ../../../../modules/distribution/rke2 | n/a | -| [rke2\_first\_server](#module\_rke2\_first\_server) | ../../../../modules/infra/aws | n/a | +| [rke2\_first\_server](#module\_rke2\_first\_server) | ../../../../modules/infra/aws/ec2 | n/a | ## Resources | Name | Type | |------|------| | [local_file.kube_config_yaml](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | -| [local_file.kube_config_yaml_backup](https://registry.terraform.io/providers/hashicorp/local/latest/docs/resources/file) | resource | +| [null_resource.wait_k8s_services_startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | | [ssh_resource.retrieve_kubeconfig](https://registry.terraform.io/providers/loafoe/ssh/2.6.0/docs/resources/resource) | resource | | [local_file.ssh_private_key](https://registry.terraform.io/providers/hashicorp/local/latest/docs/data-sources/file) | data source | @@ -34,39 +39,36 @@ | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [aws\_access\_key](#input\_aws\_access\_key) | AWS access key used to create infrastructure | `string` | `null` | no | | [aws\_region](#input\_aws\_region) | AWS region used for all resources | `string` | `"us-east-1"` | no | -| [aws\_secret\_key](#input\_aws\_secret\_key) | AWS secret key used to create AWS infrastructure | `string` | `null` | no | -| [create\_security\_group](#input\_create\_security\_group) | Should create the security group associated with the instance(s) | `bool` | `null` | no | -| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | Specify if a new SSH key pair needs to be created for the instances | `bool` | `null` | no | -| [instance\_count](#input\_instance\_count) | Number of EC2 instances to create | `number` | `null` | no | -| [instance\_disk\_size](#input\_instance\_disk\_size) | Specify root disk size (GB) | `string` | `null` | no | -| [instance\_security\_group](#input\_instance\_security\_group) | Provide a pre-existing security group ID | `string` | `null` | no | -| [instance\_type](#input\_instance\_type) | Instance type used for all EC2 instances | `string` | `null` | no | +| [bootstrap\_rancher](#input\_bootstrap\_rancher) | Bootstrap the Rancher installation | `bool` | `true` | no | +| [create\_security\_group](#input\_create\_security\_group) | n/a | `any` | `null` | no | +| [create\_ssh\_key\_pair](#input\_create\_ssh\_key\_pair) | n/a | `any` | `null` | no | +| [create\_vpc](#input\_create\_vpc) | n/a | `any` | `null` | no | +| [instance\_count](#input\_instance\_count) | n/a | `any` | n/a | yes | +| [instance\_security\_group\_id](#input\_instance\_security\_group\_id) | n/a | `any` | `null` | no | | [kube\_config\_filename](#input\_kube\_config\_filename) | Filename to write the kube config | `string` | `null` | no | | [kube\_config\_path](#input\_kube\_config\_path) | The path to write the kubeconfig for the RKE cluster | `string` | `null` | no | -| [prefix](#input\_prefix) | Prefix added to names of all resources | `string` | `null` | no | -| [rancher\_bootstrap\_password](#input\_rancher\_bootstrap\_password) | Password to use for bootstrapping Rancher (min 12 characters) | `string` | `"initial-admin-password"` | no | -| [rancher\_password](#input\_rancher\_password) | Password to use for Rancher (min 12 characters) | `string` | `null` | no | -| [rancher\_replicas](#input\_rancher\_replicas) | Value for replicas when installing the Rancher helm chart | `number` | `3` | no | +| [prefix](#input\_prefix) | n/a | `any` | n/a | yes | +| [rancher\_hostname](#input\_rancher\_hostname) | n/a | `any` | n/a | yes | +| [rancher\_ingress\_class\_name](#input\_rancher\_ingress\_class\_name) | Rancher ingressClassName value | `string` | `"nginx"` | no | +| [rancher\_password](#input\_rancher\_password) | n/a | `string` | n/a | yes | +| [rancher\_service\_type](#input\_rancher\_service\_type) | Rancher serviceType value | `string` | `"ClusterIP"` | no | | [rancher\_version](#input\_rancher\_version) | Rancher version to install | `string` | `null` | no | | [rke2\_config](#input\_rke2\_config) | Additional RKE2 configuration to add to the config.yaml file | `any` | `null` | no | | [rke2\_token](#input\_rke2\_token) | Token to use when configuring RKE2 nodes | `any` | `null` | no | | [rke2\_version](#input\_rke2\_version) | Kubernetes version to use for the RKE2 cluster | `string` | `null` | no | -| [spot\_instances](#input\_spot\_instances) | Use spot instances | `bool` | `null` | no | -| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | Specify the SSH key name to use (that's already present in AWS) | `string` | `null` | no | -| [ssh\_key\_pair\_path](#input\_ssh\_key\_pair\_path) | Path to the SSH private key used as the key pair (that's already present in AWS) | `string` | `null` | no | -| [ssh\_username](#input\_ssh\_username) | Username used for SSH with sudo access | `string` | `"ubuntu"` | no | -| [subnet\_id](#input\_subnet\_id) | VPC Subnet ID to create the instance(s) in | `string` | `null` | no | -| [wait](#input\_wait) | An optional wait before installing the Rancher helm chart | `string` | `"20s"` | no | +| [ssh\_key\_pair\_name](#input\_ssh\_key\_pair\_name) | n/a | `any` | `null` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_public\_key\_path](#input\_ssh\_public\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `any` | n/a | yes | +| [subnet\_id](#input\_subnet\_id) | n/a | `any` | `null` | no | +| [user\_data](#input\_user\_data) | User data content for EC2 instance(s) | `any` | `null` | no | +| [vpc\_id](#input\_vpc\_id) | n/a | `any` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | Waiting time (in seconds) | `number` | `180` | no | ## Outputs | Name | Description | |------|-------------| -| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | -| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | -| [rancher\_admin\_token](#output\_rancher\_admin\_token) | Rancher API token for the admin user | -| [rancher\_hostname](#output\_rancher\_hostname) | n/a | -| [rancher\_password](#output\_rancher\_password) | n/a | -| [rancher\_url](#output\_rancher\_url) | n/a | +| [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | +| [rancher\_url](#output\_rancher\_url) | Rancher URL | diff --git a/recipes/upstream/aws/rke2/main.tf b/recipes/upstream/aws/rke2/main.tf index b4c6cc0a..438745ff 100644 --- a/recipes/upstream/aws/rke2/main.tf +++ b/recipes/upstream/aws/rke2/main.tf @@ -1,7 +1,16 @@ locals { - kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd - kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" - kc_file_backup = "${local.kc_file}.backup" + create_ssh_key_pair = var.create_ssh_key_pair == null ? false : true + ssh_key_pair_name = var.ssh_key_pair_name == null ? "tf-rancher-up-${var.prefix}" : var.ssh_key_pair_name + local_ssh_private_key_path = var.ssh_private_key_path == null ? "${path.cwd}/${var.prefix}-ssh_private_key.pem" : var.ssh_private_key_path + local_ssh_public_key_path = var.ssh_public_key_path == null ? "${path.cwd}/${var.prefix}-ssh_public_key.pem" : var.ssh_public_key_path + create_vpc = var.create_vpc == null ? false : true + vpc_id = var.vpc_id == null ? module.rke2_first_server.vpc[0].id : var.vpc_id + subnet_id = var.subnet_id == null ? module.rke2_first_server.subnet[0].id : var.subnet_id + create_security_group = var.create_security_group == null ? false : true + instance_security_group_id = local.create_security_group == "true" ? null : module.rke2_first_server.security_group[0].id + kc_path = var.kube_config_path != null ? var.kube_config_path : path.cwd + kc_file = var.kube_config_filename != null ? "${local.kc_path}/${var.kube_config_filename}" : "${local.kc_path}/${var.prefix}_kube_config.yml" + kc_file_backup = "${local.kc_file}.backup" } module "rke2_first" { @@ -12,21 +21,24 @@ module "rke2_first" { } module "rke2_first_server" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = 1 - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = var.create_ssh_key_pair - ssh_key_pair_name = var.ssh_key_pair_name - ssh_key_pair_path = var.ssh_key_pair_path - ssh_username = var.ssh_username - spot_instances = var.spot_instances - aws_region = var.aws_region - create_security_group = var.create_security_group - instance_security_group = var.ssh_key_pair_name - subnet_id = var.subnet_id - user_data = module.rke2_first.rke2_user_data + source = "../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + # create_ssh_key_pair = var.create_ssh_key_pair + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path + # create_vpc = var.create_vpc + # vpc_id = var.vpc_id + # subnet_id = var.subnet_id + # create_security_group = var.create_security_group + instance_count = 1 + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + # instance_security_group_id = var.instance_security_group_id + ssh_username = var.ssh_username + user_data = module.rke2_first.rke2_user_data } module "rke2_additional" { @@ -38,30 +50,35 @@ module "rke2_additional" { } module "rke2_additional_servers" { - source = "../../../../modules/infra/aws" - prefix = var.prefix - instance_count = var.instance_count - 1 - instance_type = var.instance_type - instance_disk_size = var.instance_disk_size - create_ssh_key_pair = false - ssh_key_pair_name = module.rke2_first_server.ssh_key_pair_name - ssh_key_pair_path = module.rke2_first_server.ssh_key_path - ssh_username = var.ssh_username - spot_instances = var.spot_instances - tag_begin = 2 - aws_region = var.aws_region - create_security_group = false - instance_security_group = module.rke2_first_server.sg-id - subnet_id = var.subnet_id - user_data = module.rke2_additional.rke2_user_data + source = "../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + create_ssh_key_pair = local.create_ssh_key_pair + ssh_key_pair_name = local.ssh_key_pair_name + ssh_private_key_path = local.local_ssh_private_key_path + ssh_public_key_path = local.local_ssh_public_key_path + create_vpc = local.create_vpc + vpc_id = local.vpc_id + subnet_id = local.subnet_id + create_security_group = local.create_security_group + instance_count = var.instance_count - 1 + # instance_type = var.instance_type + # spot_instances = var.spot_instances + # instance_disk_size = var.instance_disk_size + instance_security_group_id = local.instance_security_group_id + ssh_username = var.ssh_username + user_data = module.rke2_additional.rke2_user_data } data "local_file" "ssh_private_key" { - depends_on = [module.rke2_first_server] - filename = module.rke2_first_server.ssh_key_path + depends_on = [module.rke2_additional_servers] + + filename = local.local_ssh_private_key_path } resource "ssh_resource" "retrieve_kubeconfig" { + depends_on = [data.local_file.ssh_private_key] + host = module.rke2_first_server.instances_public_ip[0] commands = [ "sudo sed 's/127.0.0.1/${module.rke2_first_server.instances_public_ip[0]}/g' /etc/rancher/rke2/rke2.yaml" @@ -71,29 +88,47 @@ resource "ssh_resource" "retrieve_kubeconfig" { } resource "local_file" "kube_config_yaml" { + depends_on = [ssh_resource.retrieve_kubeconfig] + filename = local.kc_file - content = ssh_resource.retrieve_kubeconfig.result file_permission = "0600" + content = ssh_resource.retrieve_kubeconfig.result } -resource "local_file" "kube_config_yaml_backup" { - filename = local.kc_file_backup - content = ssh_resource.retrieve_kubeconfig.result - file_permission = "0600" +provider "kubernetes" { + config_path = local_file.kube_config_yaml.filename +} + +provider "helm" { + kubernetes { + config_path = local_file.kube_config_yaml.filename + } +} + +resource "null_resource" "wait_k8s_services_startup" { + depends_on = [local_file.kube_config_yaml] + + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } } locals { - rancher_hostname = join(".", ["rancher", module.rke2_first_server.instances_public_ip[0], "sslip.io"]) + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.rke2_first_server.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.rke2_first_server.instances_public_ip[0], "sslip.io"]) } module "rancher_install" { source = "../../../../modules/rancher" - dependency = var.instance_count > 1 ? module.rke2_additional_servers.dependency : module.rke2_first_server.dependency + dependency = [null_resource.wait_k8s_services_startup] kubeconfig_file = local_file.kube_config_yaml.filename rancher_hostname = local.rancher_hostname - rancher_replicas = min(var.rancher_replicas, var.instance_count) - rancher_bootstrap_password = var.rancher_bootstrap_password + rancher_bootstrap_password = var.rancher_password rancher_password = var.rancher_password + bootstrap_rancher = var.bootstrap_rancher rancher_version = var.rancher_version - wait = var.wait + rancher_additional_helm_values = [ + "replicas: ${var.instance_count}", + "ingress.ingressClassName: ${var.rancher_ingress_class_name}", + "service.type: ${var.rancher_service_type}" + ] } diff --git a/recipes/upstream/aws/rke2/outputs.tf b/recipes/upstream/aws/rke2/outputs.tf index 25659cfc..34d05284 100644 --- a/recipes/upstream/aws/rke2/outputs.tf +++ b/recipes/upstream/aws/rke2/outputs.tf @@ -1,25 +1,19 @@ -output "instances_public_ip" { - value = concat([module.rke2_first_server.instances_public_ip], [module.rke2_additional_servers.instances_public_ip]) -} - -output "instances_private_ip" { - value = concat([module.rke2_first_server.instances_private_ip], [module.rke2_additional_servers.instances_private_ip]) -} +# Uncomment for debugging purposes +#output "rke2_first_server_config_file" { +# value = nonsensitive(module.rke2_first.rke2_user_data) +#} -output "rancher_hostname" { - value = local.rancher_hostname -} +# Uncomment for debugging purposes +#output "rke2_additional_servers_config_file" { +# value = nonsensitive(module.rke2_additional.rke2_user_data) +#} output "rancher_url" { - value = "https://${local.rancher_hostname}" + description = "Rancher URL" + value = "https://${module.rancher_install.rancher_hostname}" } output "rancher_password" { - value = var.rancher_bootstrap_password -} - -output "rancher_admin_token" { - description = "Rancher API token for the admin user" - value = module.rancher_install.rancher_admin_token - sensitive = true + description = "Rancher Initial Custom Password" + value = var.rancher_password } diff --git a/recipes/upstream/aws/rke2/provider.tf b/recipes/upstream/aws/rke2/provider.tf index 6997a762..8e915083 100644 --- a/recipes/upstream/aws/rke2/provider.tf +++ b/recipes/upstream/aws/rke2/provider.tf @@ -1,8 +1,36 @@ terraform { required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + ssh = { source = "loafoe/ssh" version = "2.6.0" } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } } -} \ No newline at end of file + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/recipes/upstream/aws/rke2/terraform.tfvars.example b/recipes/upstream/aws/rke2/terraform.tfvars.example index f084ca75..6aa5b1f3 100644 --- a/recipes/upstream/aws/rke2/terraform.tfvars.example +++ b/recipes/upstream/aws/rke2/terraform.tfvars.example @@ -1,51 +1,102 @@ ###### !! Required variables !! ###### -## -- Terraform will use the default ~/.aws/credentials file or environment variables to determine the access/secret keys. Uncomment the below only if necessary. -# aws_access_key = "ACCESS_KEY_HERE" -# aws_secret_key = "SECRET_KEY_HERE" +## -- The prefix used in front of all AWS resources +prefix = "" -## -- AWS region to create the resources, uncomment one or adjust as needed -# aws_region = "us-east-1" # US, Virginia -# aws_region = "us-west-2" # US, Oregon -# aws_region = "eu-west-1" # EU, Ireland -# aws_region = "eu-west-1" # EU, Frankfurt -# aws_region = "ap-southeast-2" # AU, Sydney -# aws_region = "ap-south-1" # IN, Mumbai +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = -## -- Set the prefix for the name tag on instancrease created. A default prefix (rancher-terraform) if not provided. -prefix = "my-name-here" +## -- AWS Region to create the resources +aws_region = "" -###### !! Optional variables !! ###### +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html -## -- Password to set when installing Rancher, otherwise use default (initial-admin-password) -# rancher_password = "at-least-12-characters" +## -- Specify if a new SSH key pair needs to be created for the instances +# create_ssh_key_pair = true -## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository -# rancher_version = "2.7.3" +## -- If you want to use an existing key pair, specify its name +# ssh_key_pair_name = null + +#Ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html + +## -- The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_private_key_path = null + +## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_public_key_path = null + +## - Specify whether VPC / Subnet should be created for the instances +# create_vpc = null + +## -- Range of private IPs available for the AWS VPC +# vpc_ip_cidr_range = "10.0.0.0/16" -## -- Override the default k8s version used by RKE2 -# rke2_version = "v1.25.10+rke2r1" +## -- AWS VPC used for all resources +# vpc_id = null -## -- Number and type of EC2 instances to launch -instance_count = 1 +## -- AWS Subnet used for all resources +# subnet_id = null + +## -- AWS Security Group used for all resources +# create_security_group = null + +#Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html + +## -- The number of nodes +instance_count = 3 + +## -- The name of a AWS EC2 machine type # instance_type = "t3.medium" ## -- Use spot instances # spot_instances = false +## -- Size of the disk attached to each node, specified in GB +# instance_disk_size = 80 + +## -- If the Security Group was created manually, it can be entered here +# instance_security_group_id = null + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" + +## -- Script that will run when the VMs start +# user_data = "" + +## -- Waiting time (in seconds) +# waiting_time = 180 + +## -- RKE2 version +# rke2_version = "v1.28.3+rke2r2" + ## -- RKE2 token, override the programmatically generated token # rke2_token = "string here" -##### SSH -## -- (A) Create a new keypair in AWS -create_ssh_key_pair = true -## -- Override the default (./${prefix}_ssh_private_key.pem) path where this SSH key is written -# ssh_private_key_path = "/path/to/private/key.pem" +## -- RKE2 custom config file +# rke2_config = "" + +## -- RKE2 KUBECONFIG file path +# kube_config_path = "" + +## -- RKE2 KUBECONFIG file +# kube_config_filename = "" + +## -- Bootstrap the Rancher installation +# bootstrap_rancher = false + +## -- Hostname to set when installing Rancher +rancher_hostname = "rancher" + +## -- Password to set when installing Rancher +rancher_password = "at-least-12-characters" + +## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository +# rancher_version = "2.7.3" -## -- (B) Provide an existing keypair name in AWS to use for nodes, the matching private key file for this keypair also must be provided so RKE can SSH to the launched nodes -# ssh_key_pair_name = "aws_keypair_name" -# ssh_key_pair_path = "/path/to/private/key.pem" -##### +## -- Rancher ingressClassName value +# rancher_ingress_class_name = "nginx" -## -- Override the default (${prefix}_kube_config.yml) kubeconfig file/path -# kube_config_path = "~/.kube/rancher-terraform.yml" +## -- Rancher serviceType value +# rancher_service_type = "ClusterIP" diff --git a/recipes/upstream/aws/rke2/variables.tf b/recipes/upstream/aws/rke2/variables.tf index 3e27b687..c0607891 100644 --- a/recipes/upstream/aws/rke2/variables.tf +++ b/recipes/upstream/aws/rke2/variables.tf @@ -1,14 +1,10 @@ -variable "aws_access_key" { - type = string - description = "AWS access key used to create infrastructure" - default = null -} +variable "prefix" {} -variable "aws_secret_key" { - type = string - description = "AWS secret key used to create AWS infrastructure" - default = null -} +# variable "aws_access_key" {} + +# variable "aws_secret_key" {} + +# variable "aws_session_token" {} variable "aws_region" { type = string @@ -51,30 +47,64 @@ variable "aws_region" { } } -variable "prefix" { - type = string - description = "Prefix added to names of all resources" - default = null +variable "create_ssh_key_pair" { + default = null } -variable "instance_count" { - type = number - description = "Number of EC2 instances to create" - default = null +variable "ssh_key_pair_name" { + default = null } -variable "instance_type" { - type = string - description = "Instance type used for all EC2 instances" - default = null +variable "ssh_private_key_path" { + default = null } -variable "instance_disk_size" { - type = string - description = "Specify root disk size (GB)" +variable "ssh_public_key_path" { + default = null +} + +variable "create_vpc" { + default = null +} + +# variable "vpc_ip_cidr_range" {} + +variable "vpc_id" { + default = null +} + +variable "subnet_id" { + default = null +} + +variable "create_security_group" { + default = null +} + +variable "instance_count" {} + +# variable "instance_type" {} + +# variable "spot_instances" {} + +# variable "instance_disk_size" {} + +variable "instance_security_group_id" { + default = null +} + +variable "ssh_username" {} + +variable "user_data" { + description = "User data content for EC2 instance(s)" default = null } +variable "waiting_time" { + description = "Waiting time (in seconds)" + default = 180 +} + variable "rke2_version" { type = string description = "Kubernetes version to use for the RKE2 cluster" @@ -103,85 +133,35 @@ variable "kube_config_filename" { default = null } -variable "rancher_bootstrap_password" { - description = "Password to use for bootstrapping Rancher (min 12 characters)" - default = "initial-admin-password" - type = string +variable "bootstrap_rancher" { + description = "Bootstrap the Rancher installation" + type = bool + default = true } +variable "rancher_hostname" {} + variable "rancher_password" { - description = "Password to use for Rancher (min 12 characters)" - default = null - type = string + type = string validation { condition = length(var.rancher_password) >= 12 - error_message = "The password provided for Rancher (rancher_password) must be at least 12 characters" + error_message = "The password must be at least 12 characters." } } variable "rancher_version" { description = "Rancher version to install" - default = null - type = string -} - -variable "rancher_replicas" { - description = "Value for replicas when installing the Rancher helm chart" - default = 3 - type = number -} - -variable "create_ssh_key_pair" { - type = bool - description = "Specify if a new SSH key pair needs to be created for the instances" - default = null -} - -variable "ssh_key_pair_name" { - type = string - description = "Specify the SSH key name to use (that's already present in AWS)" - default = null -} - -variable "ssh_key_pair_path" { type = string - description = "Path to the SSH private key used as the key pair (that's already present in AWS)" default = null } -variable "ssh_username" { - type = string - description = "Username used for SSH with sudo access" - default = "ubuntu" -} - -variable "spot_instances" { - type = bool - description = "Use spot instances" - default = null -} - -variable "subnet_id" { - type = string - description = "VPC Subnet ID to create the instance(s) in" - default = null -} - -variable "create_security_group" { - type = bool - description = "Should create the security group associated with the instance(s)" - default = null -} - -# TODO: Add a check based on above value -variable "instance_security_group" { - type = string - description = "Provide a pre-existing security group ID" - default = null +variable "rancher_ingress_class_name" { + description = "Rancher ingressClassName value" + default = "nginx" } -variable "wait" { - description = "An optional wait before installing the Rancher helm chart" - default = "20s" +variable "rancher_service_type" { + description = "Rancher serviceType value" + default = "ClusterIP" } diff --git a/tests/modules/infra/aws/README.md b/tests/modules/infra/aws/README.md deleted file mode 100644 index bb1fa9d2..00000000 --- a/tests/modules/infra/aws/README.md +++ /dev/null @@ -1 +0,0 @@ -This directory has code to test the aws infra [module](../../../../modules/infra/aws). diff --git a/tests/modules/infra/aws/ec2/README.md b/tests/modules/infra/aws/ec2/README.md new file mode 100644 index 00000000..b27c012d --- /dev/null +++ b/tests/modules/infra/aws/ec2/README.md @@ -0,0 +1,32 @@ +# TEST - AWS EC2 instances deploy + +This directory has code to test the AWS EC2 [module](../../../../../modules/infra/aws/ec2). + +Documentation can be found [here](./docs.md). + +## Usage + +```bash +git clone https://github.com/rancherlabs/tf-rancher-up.git +cd test/modules/infra/aws/ec2 +``` + +- Edit `./variables.tf` + - Update the required variables: + - `prefix` to give the resources an identifiable name (eg, your initials or first name) + - `aws_region` to suit your region + - `instance_count` to specify the number of instances to create + - `ssh_username` to specify the user used to create the VMs (default "ubuntu") +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../../modules/infra/aws/README.md). + +```bash +terraform init --upgrade ; terraform apply --auto-approve +``` + +- Destroy the resources when finished +```bash +terraform destroy --auto-approve +``` + +See full argument list for each module in use: + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 diff --git a/tests/modules/infra/aws/ec2/docs.md b/tests/modules/infra/aws/ec2/docs.md new file mode 100644 index 00000000..42859dea --- /dev/null +++ b/tests/modules/infra/aws/ec2/docs.md @@ -0,0 +1,40 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | +| [ssh](#requirement\_ssh) | 2.6.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [aws-ec2-upstream-cluster](#module\_aws-ec2-upstream-cluster) | ../../../../../modules/infra/aws/ec2 | n/a | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws\_region](#input\_aws\_region) | n/a | `string` | `"us-east-1"` | no | +| [instance\_count](#input\_instance\_count) | n/a | `number` | `1` | no | +| [prefix](#input\_prefix) | n/a | `string` | `"ec2-test"` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `string` | `"ubuntu"` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | +| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | diff --git a/tests/modules/infra/aws/ec2/main.tf b/tests/modules/infra/aws/ec2/main.tf new file mode 100644 index 00000000..f3f90174 --- /dev/null +++ b/tests/modules/infra/aws/ec2/main.tf @@ -0,0 +1,7 @@ +module "aws-ec2-upstream-cluster" { + source = "../../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + instance_count = var.instance_count + ssh_username = var.ssh_username +} diff --git a/tests/modules/infra/aws/ec2/outputs.tf b/tests/modules/infra/aws/ec2/outputs.tf new file mode 100644 index 00000000..28474230 --- /dev/null +++ b/tests/modules/infra/aws/ec2/outputs.tf @@ -0,0 +1,7 @@ +output "instances_public_ip" { + value = module.aws-ec2-upstream-cluster.instances_public_ip +} + +output "instances_private_ip" { + value = module.aws-ec2-upstream-cluster.instances_private_ip +} diff --git a/tests/modules/infra/aws/ec2/provider.tf b/tests/modules/infra/aws/ec2/provider.tf new file mode 100644 index 00000000..8e915083 --- /dev/null +++ b/tests/modules/infra/aws/ec2/provider.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/tests/modules/infra/aws/ec2/terraform.tfvars.example b/tests/modules/infra/aws/ec2/terraform.tfvars.example new file mode 100644 index 00000000..f5d4fd1e --- /dev/null +++ b/tests/modules/infra/aws/ec2/terraform.tfvars.example @@ -0,0 +1,20 @@ +###### !! Required variables !! ###### + +## -- The prefix used in front of all AWS resources +prefix = "" + +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = + +## -- AWS Region to create the resources +aws_region = "" + +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html + +## -- The number of nodes +instance_count = 1 + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" diff --git a/tests/modules/infra/aws/ec2/user_data.tmpl b/tests/modules/infra/aws/ec2/user_data.tmpl new file mode 100644 index 00000000..e794cfa6 --- /dev/null +++ b/tests/modules/infra/aws/ec2/user_data.tmpl @@ -0,0 +1,9 @@ +#!/bin/bash + +if [ ${install_docker} = true ] + then + echo 'Installing Docker' + export DEBIAN_FRONTEND=noninteractive + curl -sSL https://releases.rancher.com/install-docker/${docker_version}.sh | sh - + sudo usermod -aG docker ${username} +fi diff --git a/tests/modules/infra/aws/ec2/variables.tf b/tests/modules/infra/aws/ec2/variables.tf new file mode 100644 index 00000000..efdb192c --- /dev/null +++ b/tests/modules/infra/aws/ec2/variables.tf @@ -0,0 +1,19 @@ +variable "prefix" { + default = "ec2-test" +} + +variable "aws_region" { + default = "us-east-1" +} + +variable "ssh_private_key_path" { + default = null +} + +variable "instance_count" { + default = 1 +} + +variable "ssh_username" { + default = "ubuntu" +} diff --git a/tests/modules/infra/aws/main.tf b/tests/modules/infra/aws/main.tf deleted file mode 100644 index ab6acd57..00000000 --- a/tests/modules/infra/aws/main.tf +++ /dev/null @@ -1,29 +0,0 @@ -module "test1_all_defaults" { - source = "../../../../modules/infra/aws" - - instance_count = 1 - create_security_group = false - create_ssh_key_pair = true - instance_security_group = "default" -} - -module "test2_specify_sg" { - source = "../../../../modules/infra/aws" - - instance_count = 1 - create_security_group = false - create_ssh_key_pair = true - instance_security_group = "default" -} - -resource "aws_vpc" "for_test3" { - -} - -module "test3_specify_dynamic_vpc" { - source = "../../../../modules/infra/aws" - - instance_count = 1 - create_ssh_key_pair = true - vpc_id = aws_vpc.for_test3.id -} diff --git a/tests/recipes/rke/split-roles/aws/README.md b/tests/recipes/rke/split-roles/aws/README.md new file mode 100644 index 00000000..d3790e82 --- /dev/null +++ b/tests/recipes/rke/split-roles/aws/README.md @@ -0,0 +1,31 @@ +# RKE | With split roles | AWS + +This module helps to create an RKE cluster with split roles (master, worker) on AWS infrastructure. + +Documentation can be found [here](./docs.md). + +## Usage + +```bash +git clone https://github.com/rancherlabs/tf-rancher-up.git +cd tests/recipes/rke/split-roles/aws +``` + +- Edit `./variables.tf` + - Update the required variables (`prefix`, `aws_region`, `server_nodes_count`, `worker_nodes_count`, and `ssh_username`). +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../../modules/infra/aws/README.md). + +```bash +terraform init --upgrade ; terraform apply --auto-approve +``` + +- Destroy the resources when finished +```bash +terraform destroy --auto-approve + +``` + +See full argument list for each module in use: + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - RKE: https://github.com/rancher/tf-rancher-up/tree/main/modules/distribution/rke + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/tests/recipes/rke/split-roles/aws/docs.md b/tests/recipes/rke/split-roles/aws/docs.md new file mode 100644 index 00000000..48e9c812 --- /dev/null +++ b/tests/recipes/rke/split-roles/aws/docs.md @@ -0,0 +1,44 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | +| [ssh](#requirement\_ssh) | 2.6.0 | + +## Providers + +No providers. + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [aws-ec2-upstream-master-nodes](#module\_aws-ec2-upstream-master-nodes) | ../../../../../modules/infra/aws/ec2 | n/a | +| [aws-ec2-upstream-worker-nodes](#module\_aws-ec2-upstream-worker-nodes) | ../../../../../modules/infra/aws/ec2 | n/a | + +## Resources + +No resources. + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws\_region](#input\_aws\_region) | n/a | `string` | `"us-east-1"` | no | +| [prefix](#input\_prefix) | n/a | `string` | `"ec2-test"` | no | +| [server\_nodes\_count](#input\_server\_nodes\_count) | n/a | `number` | `3` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `string` | `"ubuntu"` | no | +| [worker\_nodes\_count](#input\_worker\_nodes\_count) | n/a | `number` | `3` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | +| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | +| [security\_group](#output\_security\_group) | n/a | +| [subnet](#output\_subnet) | n/a | +| [vpc](#output\_vpc) | n/a | diff --git a/tests/recipes/rke/split-roles/aws/main.tf b/tests/recipes/rke/split-roles/aws/main.tf index ed638dd3..9f693428 100644 --- a/tests/recipes/rke/split-roles/aws/main.tf +++ b/tests/recipes/rke/split-roles/aws/main.tf @@ -1,21 +1,15 @@ -module "test1_default" { - source = "../../../../../recipes/rke/split-roles/aws" - - prefix = "test1_default" - aws_access_key = var.aws_access_key - aws_secret_key = var.aws_secret_key - aws_region = "us-west-2" - create_ssh_key_pair = true +module "aws-ec2-upstream-master-nodes" { + source = "../../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + instance_count = var.server_nodes_count + ssh_username = var.ssh_username } -module "test2_pass_existing_key" { - source = "../../../../../recipes/rke/split-roles/aws" - - prefix = "test1_default" - aws_access_key = var.aws_access_key - aws_secret_key = var.aws_secret_key - aws_region = "us-west-2" - create_ssh_key_pair = true - ssh_key_pair_name = "junk" - ssh_key_pair_path = "~/somepath" +module "aws-ec2-upstream-worker-nodes" { + source = "../../../../../modules/infra/aws/ec2" + prefix = "${var.prefix}-w" + aws_region = var.aws_region + instance_count = var.worker_nodes_count + ssh_username = var.ssh_username } diff --git a/tests/recipes/rke/split-roles/aws/outputs.tf b/tests/recipes/rke/split-roles/aws/outputs.tf new file mode 100644 index 00000000..02d833fb --- /dev/null +++ b/tests/recipes/rke/split-roles/aws/outputs.tf @@ -0,0 +1,19 @@ +output "instances_private_ip" { + value = concat([module.aws-ec2-upstream-master-nodes.instances_private_ip], [module.aws-ec2-upstream-worker-nodes.instances_private_ip]) +} + +output "instances_public_ip" { + value = concat([module.aws-ec2-upstream-master-nodes.instances_public_ip], [module.aws-ec2-upstream-worker-nodes.instances_public_ip]) +} + +output "vpc" { + value = module.aws-ec2-upstream-master-nodes.vpc[0].id +} + +output "subnet" { + value = module.aws-ec2-upstream-master-nodes.subnet[0].id +} + +output "security_group" { + value = module.aws-ec2-upstream-master-nodes.security_group[0].id +} diff --git a/tests/recipes/rke/split-roles/aws/provider.tf b/tests/recipes/rke/split-roles/aws/provider.tf new file mode 100644 index 00000000..8e915083 --- /dev/null +++ b/tests/recipes/rke/split-roles/aws/provider.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/tests/recipes/rke/split-roles/aws/variables.tf b/tests/recipes/rke/split-roles/aws/variables.tf index 21e0b5af..382f6564 100644 --- a/tests/recipes/rke/split-roles/aws/variables.tf +++ b/tests/recipes/rke/split-roles/aws/variables.tf @@ -1,11 +1,19 @@ -variable "aws_access_key" { - type = string - description = "AWS access key used to create infrastructure" - default = null +variable "prefix" { + default = "ec2-test" } -variable "aws_secret_key" { - type = string - description = "AWS secret key used to create AWS infrastructure" - default = null +variable "aws_region" { + default = "us-east-1" +} + +variable "server_nodes_count" { + default = 3 +} + +variable "worker_nodes_count" { + default = 3 +} + +variable "ssh_username" { + default = "ubuntu" } diff --git a/tests/recipes/upstream/aws/rke/README.md b/tests/recipes/upstream/aws/rke/README.md new file mode 100644 index 00000000..d06f70fe --- /dev/null +++ b/tests/recipes/upstream/aws/rke/README.md @@ -0,0 +1,31 @@ +# Upstream | AWS | EC2 x RKE + +This directory contains the code for testing the AWS EC2 x RKE x Rancher modules. + +Documentation can be found [here](./docs.md). + +## Usage + +```bash +git clone https://github.com/rancherlabs/tf-rancher-up.git +cd tests/recipes/upstream/aws/rke +``` + +- Edit `./variables.tf` + - Update the required variables (`prefix`, `aws_region`, `ssh_private_key_path`, `instance_count`, `ssh_username`, `user_data`, `install_docker`, `docker_version`, `waiting_time`, `ingress_provider`, `bootstrap_rancher`, `rancher_hostname`, and `rancher_password`). +- Make sure you are logged into your AWS Account from your local Terminal. See the preparatory steps [here](../../../../../modules/infra/aws/README.md). + +```bash +terraform init --upgrade ; terraform apply --auto-approve +``` + +- Destroy the resources when finished +```bash +terraform destroy --auto-approve + +``` + +See full argument list for each module in use: + - AWS EC2: https://github.com/rancher/tf-rancher-up/tree/main/modules/infra/aws/ec2 + - RKE: https://github.com/rancher/tf-rancher-up/tree/main/modules/distribution/rke + - Rancher: https://github.com/rancher/tf-rancher-up/tree/main/modules/rancher diff --git a/tests/recipes/upstream/aws/rke/docs.md b/tests/recipes/upstream/aws/rke/docs.md new file mode 100644 index 00000000..5d51fd1b --- /dev/null +++ b/tests/recipes/upstream/aws/rke/docs.md @@ -0,0 +1,57 @@ +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.14 | +| [aws](#requirement\_aws) | 5.53.0 | +| [helm](#requirement\_helm) | >= 2.10.1 | +| [kubernetes](#requirement\_kubernetes) | >= 2.0.0 | +| [ssh](#requirement\_ssh) | 2.6.0 | + +## Providers + +| Name | Version | +|------|---------| +| [null](#provider\_null) | n/a | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [aws-ec2-upstream-cluster](#module\_aws-ec2-upstream-cluster) | ../../../../../modules/infra/aws/ec2 | n/a | +| [rancher\_install](#module\_rancher\_install) | ../../../../../modules/rancher | n/a | +| [rke](#module\_rke) | ../../../../../modules/distribution/rke | n/a | + +## Resources + +| Name | Type | +|------|------| +| [null_resource.wait-docker-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | +| [null_resource.wait-k8s-services-startup](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource | + +## Inputs + +| Name | Description | Type | Default | Required | +|------|-------------|------|---------|:--------:| +| [aws\_region](#input\_aws\_region) | n/a | `string` | `"us-east-1"` | no | +| [bootstrap\_rancher](#input\_bootstrap\_rancher) | n/a | `bool` | `true` | no | +| [docker\_version](#input\_docker\_version) | n/a | `string` | `"20.10"` | no | +| [ingress\_provider](#input\_ingress\_provider) | n/a | `string` | `"nginx"` | no | +| [install\_docker](#input\_install\_docker) | n/a | `bool` | `true` | no | +| [instance\_count](#input\_instance\_count) | n/a | `number` | `1` | no | +| [prefix](#input\_prefix) | n/a | `string` | `"ec2-test"` | no | +| [rancher\_hostname](#input\_rancher\_hostname) | n/a | `string` | `"rancher"` | no | +| [rancher\_password](#input\_rancher\_password) | n/a | `string` | `"at-least-12-characters"` | no | +| [ssh\_private\_key\_path](#input\_ssh\_private\_key\_path) | n/a | `any` | `null` | no | +| [ssh\_username](#input\_ssh\_username) | n/a | `string` | `"ubuntu"` | no | +| [user\_data](#input\_user\_data) | n/a | `any` | `null` | no | +| [waiting\_time](#input\_waiting\_time) | n/a | `number` | `180` | no | + +## Outputs + +| Name | Description | +|------|-------------| +| [instances\_private\_ip](#output\_instances\_private\_ip) | n/a | +| [instances\_public\_ip](#output\_instances\_public\_ip) | n/a | +| [rancher\_password](#output\_rancher\_password) | Rancher Initial Custom Password | +| [rancher\_url](#output\_rancher\_url) | Rancher URL | diff --git a/tests/recipes/upstream/aws/rke/main.tf b/tests/recipes/upstream/aws/rke/main.tf index 5491f35e..b870cd4f 100644 --- a/tests/recipes/upstream/aws/rke/main.tf +++ b/tests/recipes/upstream/aws/rke/main.tf @@ -1,11 +1,70 @@ -module "test1_default" { - source = "../../../../../recipes/upstream/aws/rke" - - prefix = "test1_default" - aws_access_key = var.aws_access_key - aws_secret_key = var.aws_secret_key - aws_region = "us-west-2" - create_ssh_key_pair = true - rancher_password = "this-is-an-insecure-password" - instance_count = 1 +module "aws-ec2-upstream-cluster" { + source = "../../../../../modules/infra/aws/ec2" + prefix = var.prefix + aws_region = var.aws_region + instance_count = var.instance_count + ssh_username = var.ssh_username + user_data = templatefile("${path.module}/user_data.tmpl", + { + install_docker = var.install_docker + username = var.ssh_username + docker_version = var.docker_version + } + ) +} + +resource "null_resource" "wait-docker-startup" { + depends_on = [module.aws-ec2-upstream-cluster.instances_public_ip] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } +} + +locals { + ssh_private_key_path = var.ssh_private_key_path != null ? var.ssh_private_key_path : "${path.cwd}/${var.prefix}-ssh_private_key.pem" +} + +module "rke" { + source = "../../../../../modules/distribution/rke" + prefix = var.prefix + dependency = [resource.null_resource.wait-docker-startup] + ssh_private_key_path = local.ssh_private_key_path + node_username = var.ssh_username + + rancher_nodes = [for instance_ips in module.aws-ec2-upstream-cluster.instance_ips : + { + public_ip = instance_ips.public_ip, + private_ip = instance_ips.private_ip, + roles = ["etcd", "controlplane", "worker"], + ssh_key_path = local.ssh_private_key_path, + ssh_key = null, + hostname_override = null + } + ] +} + +resource "null_resource" "wait-k8s-services-startup" { + depends_on = [module.rke] + provisioner "local-exec" { + command = "sleep ${var.waiting_time}" + } +} + +locals { + kubeconfig_file = "${path.cwd}/${var.prefix}_kube_config.yml" + rancher_hostname = var.rancher_hostname != null ? join(".", ["${var.rancher_hostname}", module.aws-ec2-upstream-cluster.instances_public_ip[0], "sslip.io"]) : join(".", ["rancher", module.aws-ec2-upstream-cluster.instances_public_ip[0], "sslip.io"]) + +} + +module "rancher_install" { + source = "../../../../../modules/rancher" + dependency = [null_resource.wait-k8s-services-startup] + kubeconfig_file = local.kubeconfig_file + rancher_hostname = local.rancher_hostname + rancher_bootstrap_password = var.rancher_password + rancher_password = var.rancher_password + bootstrap_rancher = var.bootstrap_rancher + rancher_additional_helm_values = [ + "replicas: ${var.instance_count}" + ] } diff --git a/tests/recipes/upstream/aws/rke/outputs.tf b/tests/recipes/upstream/aws/rke/outputs.tf new file mode 100644 index 00000000..3f0a3cc5 --- /dev/null +++ b/tests/recipes/upstream/aws/rke/outputs.tf @@ -0,0 +1,17 @@ +output "instances_public_ip" { + value = module.aws-ec2-upstream-cluster.instances_public_ip +} + +output "instances_private_ip" { + value = module.aws-ec2-upstream-cluster.instances_private_ip +} + +output "rancher_url" { + description = "Rancher URL" + value = "https://${module.rancher_install.rancher_hostname}" +} + +output "rancher_password" { + description = "Rancher Initial Custom Password" + value = var.rancher_password +} diff --git a/tests/recipes/upstream/aws/rke/provider.tf b/tests/recipes/upstream/aws/rke/provider.tf new file mode 100644 index 00000000..8e915083 --- /dev/null +++ b/tests/recipes/upstream/aws/rke/provider.tf @@ -0,0 +1,36 @@ +terraform { + required_providers { + aws = { + source = "hashicorp/aws" + version = "5.53.0" + } + + ssh = { + source = "loafoe/ssh" + version = "2.6.0" + } + + kubernetes = { + source = "hashicorp/kubernetes" + version = ">= 2.0.0" + } + + helm = { + source = "hashicorp/helm" + version = ">= 2.10.1" + } + } + + required_version = ">= 0.14" +} + +provider "aws" { + ## Option 1 - AWS CLI + # access_key = var.aws_access_key + # secret_key = var.aws_secret_key + # token = var.aws_session_token + ## Option 2 - Manually creating credential files + # shared_config_files = ["~/.aws/config"] + # shared_credentials_files = ["~/.aws/credentials"] + region = var.aws_region +} diff --git a/tests/recipes/upstream/aws/rke/terraform.tfvars.example b/tests/recipes/upstream/aws/rke/terraform.tfvars.example new file mode 100644 index 00000000..7787da60 --- /dev/null +++ b/tests/recipes/upstream/aws/rke/terraform.tfvars.example @@ -0,0 +1,96 @@ +###### !! Required variables !! ###### + +## -- The prefix used in front of all AWS resources +prefix = "" + +# -- The necessary variables for login via CLI to the AWS console. If you will use these variables, also uncomment them in the provider.tf file. If you don't configure anything, the ~/.aws/credentials file will be looked for +# aws_access_key = +# aws_secret_key = +# aws_session_token = + +## -- AWS Region to create the resources +aws_region = "" + +#Ref. https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html + +## -- Specify if a new SSH key pair needs to be created for the instances +# create_ssh_key_pair = true + +## -- If you want to use an existing key pair, specify its name +# ssh_key_pair_name = null + +#Ref. https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html + +## -- The full path where is present the pre-generated SSH PRIVATE key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_private_key_path = null + +## -- The full path where is present the pre-generated SSH PUBLIC key (not generated by Terraform); if "create_ssh_key_pair = false" this variable must be set +# ssh_public_key_path = null + +## -- Range of private IPs available for the AWS VPC +# vpc_ip_cidr_range = "10.0.0.0/16" + +## -- AWS VPC used for all resources +# vpc_id = null + +## -- AWS Subnet used for all resources +# subnet_id = null + +## -- AWS Security Group used for all resources +# create_security_group = null + +#Ref. https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html + +## -- The number of nodes +instance_count = 3 + +## -- The name of a AWS EC2 machine type +# instance_type = "t3.medium" + +## -- Use spot instances +# spot_instances = false + +## -- Size of the disk attached to each node, specified in GB +# instance_disk_size = 80 + +## -- If the Security Group was created manually, it can be entered here +# instance_security_group_id = null + +## -- Username used to allow access to VMs via SSH key exchange +ssh_username = "ubuntu" + +## -- Script that will run when the VMs start +# user_data = "" + +## -- Bastion host configuration to access the instances +# bastion_host = null + +## -- IAM Instance Profile to assign to the instances/nodes +# iam_instance_profile = null + +## -- User-provided tags for the resources +# tags = {} + +## -- Install Docker while creating the instance +# install_docker = true + +## -- Docker version to install on nodes +# docker_version = "20.10" + +## -- Waiting time (in seconds) +# waiting_time = 180 + +## -- Override the default k8s version used by RKE +# kubernetes_version = "v1.24.10-rancher4-1" + +## -- K8s Ingress Controller +# ingress_provider = nginx + +## -- Hostname to set when installing Rancher +# rancher_hostname = "rancher" + +## -- Password to set when installing Rancher +rancher_password = "at-least-12-characters" + +## -- Rancher version to use when installing the Rancher helm chart, otherwise use the latest in the stable repository +# rancher_version = "2.7.3" diff --git a/tests/recipes/upstream/aws/rke/user_data.tmpl b/tests/recipes/upstream/aws/rke/user_data.tmpl new file mode 100644 index 00000000..e794cfa6 --- /dev/null +++ b/tests/recipes/upstream/aws/rke/user_data.tmpl @@ -0,0 +1,9 @@ +#!/bin/bash + +if [ ${install_docker} = true ] + then + echo 'Installing Docker' + export DEBIAN_FRONTEND=noninteractive + curl -sSL https://releases.rancher.com/install-docker/${docker_version}.sh | sh - + sudo usermod -aG docker ${username} +fi diff --git a/tests/recipes/upstream/aws/rke/variables.tf b/tests/recipes/upstream/aws/rke/variables.tf index 21e0b5af..bca65038 100644 --- a/tests/recipes/upstream/aws/rke/variables.tf +++ b/tests/recipes/upstream/aws/rke/variables.tf @@ -1,11 +1,54 @@ -variable "aws_access_key" { - type = string - description = "AWS access key used to create infrastructure" - default = null +variable "prefix" { + default = "ec2-test" } -variable "aws_secret_key" { - type = string - description = "AWS secret key used to create AWS infrastructure" - default = null +variable "aws_region" { + default = "us-east-1" +} + +variable "ssh_private_key_path" { + default = null +} + +variable "instance_count" { + default = 1 +} + +variable "ssh_username" { + default = "ubuntu" +} + +variable "user_data" { + default = null +} + +variable "install_docker" { + type = bool + default = true +} + +variable "docker_version" { + type = string + default = "20.10" +} + +variable "waiting_time" { + default = 180 +} + +variable "ingress_provider" { + default = "nginx" +} + +variable "bootstrap_rancher" { + type = bool + default = true +} + +variable "rancher_hostname" { + default = "rancher" +} + +variable "rancher_password" { + default = "at-least-12-characters" }