Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

PLFM-8554: Configure IAM User to access S3 #3

Merged
merged 2 commits into from
Oct 31, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions cdk.json
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
"OwnerEmail": "[email protected]"
},
"STACK_NAME_PREFIX": "registry-dev",
"BUCKET_NAME": "dev-docker-registry.sagebase.org",
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bucket names are unique in a region. To ensure a bucket name is unique i would not set this param and let AWS create a unique bucket name. If you insist on naming the bucket then I would add add the AWS account ID somewhere to the name.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

bucket names are unique in a region.

In fact bucket names are unique across all standard regions, not just within a region. From the doc's

Amazon S3 supports global buckets, which means that each bucket name must be unique across all AWS accounts in all the AWS Regions within a partition. A partition is a grouping of Regions. AWS currently has three partitions: aws (Standard Regions), aws-cn (China Regions), and aws-us-gov (AWS GovCloud (US)).

I like the idea of letting CloudFormation create a unique name for a bucket however the Docker registry already has a bucket containing production data, and this deployment will have to link to that existing bucket, so CF can't choose the bucket name.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ahh ok, i thought this was creating the buckets.

"VPC_CIDR": "172.29.0.0/24",
"ACM_CERT_ARN": "arn:aws:acm:us-east-1:449435941126:certificate/bbd59a26-ad30-4b74-ad2d-194241801b22"
},
Expand All @@ -53,6 +54,7 @@
"OwnerEmail": "[email protected]"
},
"STACK_NAME_PREFIX": "registry-prod",
"BUCKET_NAME": "prod.docker.registry.sagebase.org",
"VPC_CIDR": "172.29.1.0/24",
"ACM_CERT_ARN": "arn:aws:acm:us-east-1:325565585839:certificate/7c42c355-3d69-4537-a5e6-428212db646f"
}
Expand Down
52 changes: 43 additions & 9 deletions docker_fargate/docker_fargate_stack.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,11 @@
from aws_cdk import (Stack,
aws_ec2 as ec2,
aws_s3 as s3,
aws_ecs as ecs,
aws_ecs_patterns as ecs_patterns,
aws_elasticloadbalancingv2 as elbv2,
aws_route53 as r53,
aws_iam as iam,
CfnOutput,
Duration,
Tags)
Expand All @@ -14,6 +16,8 @@
from constructs import Construct
from docker_fargate.generate_ssl_cert import cert_gen

from aws_cdk.aws_ecr_assets import Platform

ACM_CERT_ARN_CONTEXT = "ACM_CERT_ARN"
IMAGE_PATH_AND_TAG_CONTEXT = "IMAGE_PATH_AND_TAG"
PORT_NUMBER_CONTEXT = "PORT"
Expand All @@ -25,15 +29,22 @@
PRIVATE_KEY_FILE_NAME = "privatekey.pem"
CERTIFICATE_FILE_NAME = "certificate.pem"

def get_secret(scope: Construct, id: str, name: str) -> str:
BUCKET_NAME = "BUCKET_NAME"

SECRET_JSON_KEY="notification_auth"

def get_secret(scope: Construct, id: str, name: str, secret_json_key) -> str:
isecret = sm.Secret.from_secret_name_v2(scope, id, name)
return ecs.Secret.from_secrets_manager(isecret)
return ecs.Secret.from_secrets_manager(isecret, secret_json_key)
# see also: https://docs.aws.amazon.com/cdk/api/v1/python/aws_cdk.aws_ecs/Secret.html
# see also: ecs.Secret.from_ssm_parameter(ssm.IParameter(parameter_name=name))

def get_container_env(env: dict) -> dict:
return env.get(CONTAINER_ENV_NAME, {})

def get_bucket_name(env: dict) -> dict:
return env.get(BUCKET_NAME)

def get_certificate_arn(env: dict) -> str:
return env.get(ACM_CERT_ARN_CONTEXT)

Expand All @@ -43,14 +54,37 @@ def get_docker_image_name(env: dict):
def get_port(env: dict) -> int:
return int(env.get(PORT_NUMBER_CONTEXT))


class DockerFargateStack(Stack):

def __init__(self, scope: Construct, context: str, env: dict, vpc: ec2.Vpc, **kwargs) -> None:
stack_prefix = f'{env.get(config.STACK_NAME_PREFIX_CONTEXT)}'
stack_id = f'{stack_prefix}-DockerFargateStack'
super().__init__(scope, stack_id, **kwargs)

# set up the bucket
bucket_name=get_bucket_name(env)
bucket_arn=f"arn:aws:s3:::{bucket_name}"
bucket=s3.Bucket.from_bucket_attributes(self, id=bucket_name, bucket_arn=bucket_arn)

#
# Docker Registry cannot access the task role provided by
# ECS. The work-around is to define an IAM user, give the
# user bucket access, and pass its key pair to the container
# as environment variables.
#

# create a user
user = iam.User(self, "DockerRegistryUser")
# create a key pair, storing the secret in Secret Manager
access_key = iam.AccessKey(self, "AccessKey", user=user)
secret_stored_name = f'{env.get(config.STACK_NAME_PREFIX_CONTEXT)}-DockerFargateStack/{context}/access_key'
secret_stored_access_key = sm.Secret(self, secret_stored_name,
secret_string_value=access_key.secret_access_key
)

# give the user S3 access
bucket.grant_read_write(user)

cluster = ecs.Cluster(
self,
f'{stack_id}-Cluster',
Expand All @@ -59,10 +93,13 @@ def __init__(self, scope: Construct, context: str, env: dict, vpc: ec2.Vpc, **kw

secret_name = f'{env.get(config.STACK_NAME_PREFIX_CONTEXT)}-DockerFargateStack/{context}/ecs'
secrets = {
SECRETS_MANAGER_ENV_NAME: get_secret(self, secret_name, secret_name)
SECRET_JSON_KEY: get_secret(self, secret_name, secret_name, SECRET_JSON_KEY),
"AWS_SECRET_ACCESS_KEY": ecs.Secret.from_secrets_manager(secret_stored_access_key)
}

env_vars = get_container_env(env)
env_vars[BUCKET_NAME]=bucket_name
env_vars["AWS_ACCESS_KEY_ID"]=access_key.access_key_id

# Build the container image for the registry
# Need self-signed certificates to add to the image
Expand All @@ -75,6 +112,7 @@ def __init__(self, scope: Construct, context: str, env: dict, vpc: ec2.Vpc, **kw
# Now build the image, using the self-signed cert and key
image = ecs.ContainerImage.from_asset(
directory=".",
platform=Platform.LINUX_AMD64, # important to include when building locally, for testing
build_args={"stack":context} # 'dev' or 'prod'
)

Expand All @@ -90,10 +128,6 @@ def __init__(self, scope: Construct, context: str, env: dict, vpc: ec2.Vpc, **kw
get_certificate_arn(env),
)

#
# for options to pass to ApplicationLoadBalancedTaskImageOptions see:
# https://docs.aws.amazon.com/cdk/api/v1/python/aws_cdk.aws_ecs_patterns/ApplicationLoadBalancedTaskImageOptions.html#aws_cdk.aws_ecs_patterns.ApplicationLoadBalancedTaskImageOptions
#
load_balanced_fargate_service = ecs_patterns.ApplicationLoadBalancedFargateService(
self,
f'{stack_prefix}-Service',
Expand All @@ -109,7 +143,7 @@ def __init__(self, scope: Construct, context: str, env: dict, vpc: ec2.Vpc, **kw
target_protocol=elbv2.ApplicationProtocol.HTTPS,
certificate=cert,
protocol=elbv2.ApplicationProtocol.HTTPS,
ssl_policy=elbv2.SslPolicy.FORWARD_SECRECY_TLS12_RES, # Strong forward secrecy ciphers and TLS1.2 only.
ssl_policy=elbv2.SslPolicy.FORWARD_SECRECY_TLS12_RES # Strong forward secrecy ciphers and TLS1.2 only.
)

scalable_target = load_balanced_fargate_service.service.auto_scale_task_count(
Expand Down
4 changes: 2 additions & 2 deletions resources/dev/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ storage:
layerinfo: inmemory
s3:
region: us-east-1
bucket: dev-docker-registry.sagebase.org
bucket: bucket_name
encrypt: true
secure: true
v4auth: true
Expand All @@ -31,7 +31,7 @@ storage:
auth:
token:
realm: https://repo-dev.dev.sagebase.org/docker/v1/bearerToken
service: dev-docker-registry.dev.sagebase.org
service: docker-staging.dev.sagebase.org
issuer: www.synapse.org
rootcertbundle: /etc/docker/registry/token_signing_key_public_cert.pem

Expand Down
2 changes: 1 addition & 1 deletion resources/prod/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ storage:
layerinfo: inmemory
s3:
region: us-east-1
bucket: prod.docker.registry.sagebase.org
bucket: bucket_name
encrypt: true
secure: true
v4auth: true
Expand Down
5 changes: 5 additions & 0 deletions startup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,11 @@
# during ECS deployment comes from the AWS Secrets Manager.
sed -i "s/notification_auth/$notification_auth/g" /etc/docker/registry/config.yml

# Inject bucket name into config.yml
# The value is taken from the environment variable, `BUCKET_NAME` which,
# during ECS deployment comes from the cdk.json file.
sed -i "s/bucket_name/$BUCKET_NAME/g" /etc/docker/registry/config.yml

# this assumed a particular start-up for the container registry
# if the command changes in future versions, this will have to be updated too
/entrypoint.sh /etc/docker/registry/config.yml
Loading