Skip to content
This repository has been archived by the owner on Jul 21, 2023. It is now read-only.

Commit

Permalink
Rename peer_share_processor_names -> localities. (#154)
Browse files Browse the repository at this point in the history
Also, change job names for workflow-manager and sample-maker. We now put
the category of job (workflow-manager) first, so similar things group
together. Also, this eliminates the redundant copy of the locality name
in the job names - locality name is already in the namespace. New name
format:

workflow-manager-[INGESTOR]-[ENV]

Also, replace the example locality names in the demo tfvars with some
fictional places, so the role of the locality is clearer (since PHA is somewhat
overloaded). And use three localities instead of two to make it clear that
the N anticipated localities are different from the two anticipated ingestors
and two anticipated peers.
  • Loading branch information
jsha authored Nov 5, 2020
1 parent 378616f commit d6f14ea
Show file tree
Hide file tree
Showing 6 changed files with 41 additions and 30 deletions.
2 changes: 1 addition & 1 deletion terraform/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ If you're having problems, check `gcloud config list` and `kubectl config curren

## New clusters

To add a data share processor to support a new PHA in an existing region, add their PHA name to the `peer_share_processor_names` variable in the relevant `variables/<environment>.tfvars` file.
To add a data share processor to support a new locality, add that locality's name to the `localities` variable in the relevant `variables/<environment>.tfvars` file.

To bring up a whole new cluster, drop a `your-new-environment.tfvars` file in `variables`, fill in the required variables and then bootstrap it with:

Expand Down
28 changes: 15 additions & 13 deletions terraform/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ variable "machine_type" {
default = "e2.small"
}

variable "peer_share_processor_names" {
variable "localities" {
type = list(string)
}

Expand Down Expand Up @@ -145,14 +145,14 @@ data "http" "peer_share_processor_global_manifest" {
url = "https://${var.peer_share_processor_manifest_base_url}/global-manifest.json"
}

# While we create a distinct data share processor for each (ingestor, peer data
# share processor) pair, we only create one packet decryption key for each peer
# data share processor, and use it for all ingestors. Since the secret must be
# in a namespace and accessible from both data share processors, that means both
# data share processors must be in a single Kubernetes namespace, which we
# create here and pass into the data share processor module.
# While we create a distinct data share processor for each (ingestor, locality)
# pair, we only create one packet decryption key for each locality, and use it
# for all ingestors. Since the secret must be in a namespace and accessible
# from all of our data share processors, that means all data share processors
# associated with a given ingestor must be in a single Kubernetes namespace,
# which we create here and pass into the data share processor module.
resource "kubernetes_namespace" "namespaces" {
for_each = toset(var.peer_share_processor_names)
for_each = toset(var.localities)
metadata {
name = each.key
annotations = {
Expand All @@ -162,7 +162,7 @@ resource "kubernetes_namespace" "namespaces" {
}

resource "kubernetes_secret" "ingestion_packet_decryption_keys" {
for_each = toset(var.peer_share_processor_names)
for_each = toset(var.localities)
metadata {
name = "${var.environment}-${each.key}-ingestion-packet-decryption-key"
namespace = kubernetes_namespace.namespaces[each.key].metadata[0].name
Expand All @@ -181,12 +181,13 @@ resource "kubernetes_secret" "ingestion_packet_decryption_keys" {
}
}

# Now, we take the set product of peer share processor names x ingestor names to
# Now, we take the set product of localities x ingestor names to
# get the config values for all the data share processors we need to create.
locals {
peer_ingestor_pairs = {
for pair in setproduct(toset(var.peer_share_processor_names), keys(var.ingestors)) :
locality_ingestor_pairs = {
for pair in setproduct(toset(var.localities), keys(var.ingestors)) :
"${pair[0]}-${pair[1]}" => {
ingestor = pair[1]
kubernetes_namespace = kubernetes_namespace.namespaces[pair[0]].metadata[0].name
packet_decryption_key_kubernetes_secret = kubernetes_secret.ingestion_packet_decryption_keys[pair[0]].metadata[0].name
ingestor_aws_role_arn = lookup(jsondecode(data.http.ingestor_global_manifests[pair[1]].body).server-identity, "aws-iam-entity", "")
Expand All @@ -197,10 +198,11 @@ locals {
}

module "data_share_processors" {
for_each = local.peer_ingestor_pairs
for_each = local.locality_ingestor_pairs
source = "./modules/data_share_processor"
environment = var.environment
data_share_processor_name = each.key
ingestor = each.value.ingestor
gcp_region = var.gcp_region
gcp_project = var.gcp_project
kubernetes_namespace = each.value.kubernetes_namespace
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
variable "ingestor" {
type = string
}

variable "data_share_processor_name" {
type = string
}
Expand Down Expand Up @@ -347,6 +351,7 @@ resource "google_storage_bucket_iam_binding" "own_validation_bucket_admin" {
module "kubernetes" {
source = "../../modules/kubernetes/"
data_share_processor_name = var.data_share_processor_name
ingestor = var.ingestor
gcp_project = var.gcp_project
environment = var.environment
kubernetes_namespace = var.kubernetes_namespace
Expand Down
8 changes: 6 additions & 2 deletions terraform/modules/kubernetes/kubernetes.tf
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
variable "ingestor" {
type = string
}

variable "data_share_processor_name" {
type = string
}
Expand Down Expand Up @@ -250,7 +254,7 @@ resource "kubernetes_config_map" "aggregate_job_config_map" {

resource "kubernetes_cron_job" "workflow_manager" {
metadata {
name = "${var.environment}-${var.data_share_processor_name}-workflow-manager"
name = "workflow-manager-${var.ingestor}-${var.environment}"
namespace = var.kubernetes_namespace

annotations = {
Expand Down Expand Up @@ -311,7 +315,7 @@ resource "kubernetes_cron_job" "sample_maker" {
# buckets.
count = var.is_env_with_ingestor ? 1 : 0
metadata {
name = "${var.environment}-${var.data_share_processor_name}-sample-maker"
name = "sample-maker-${var.ingestor}-${var.environment}"
namespace = var.kubernetes_namespace

annotations = {
Expand Down
14 changes: 7 additions & 7 deletions terraform/variables/demo-gcp-peer.tfvars
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
environment = "demo-gcp-peer"
gcp_region = "us-west1"
gcp_project = "prio-bringup-290620"
machine_type = "e2-small"
peer_share_processor_names = ["test-pha-1", "test-pha-2"]
aws_region = "us-west-1"
manifest_domain = "isrg-prio.org"
environment = "demo-gcp-peer"
gcp_region = "us-west1"
gcp_project = "prio-bringup-290620"
machine_type = "e2-small"
localities = ["narnia", "gondor", "asgard"]
aws_region = "us-west-1"
manifest_domain = "isrg-prio.org"
managed_dns_zone = {
name = "manifests"
gcp_project = "prio-bringup-290620"
Expand Down
14 changes: 7 additions & 7 deletions terraform/variables/demo-gcp.tfvars
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
environment = "demo-gcp"
gcp_region = "us-west1"
gcp_project = "prio-bringup-290620"
machine_type = "e2-small"
peer_share_processor_names = ["test-pha-1", "test-pha-2"]
aws_region = "us-west-1"
manifest_domain = "isrg-prio.org"
environment = "demo-gcp"
gcp_region = "us-west1"
gcp_project = "prio-bringup-290620"
machine_type = "e2-small"
localities = ["narnia", "gondor", "asgard"]
aws_region = "us-west-1"
manifest_domain = "isrg-prio.org"
managed_dns_zone = {
name = "manifests"
gcp_project = "prio-bringup-290620"
Expand Down

0 comments on commit d6f14ea

Please sign in to comment.