From 419f660e6ad8acee8bed2bbad0cd60afd6c591c3 Mon Sep 17 00:00:00 2001 From: Aman Date: Tue, 28 Jan 2025 14:04:21 -0500 Subject: [PATCH 01/15] Created boilerplate for CloudhHSM list_tags --- moto/backend_index.py | 224 ----------------------- moto/cloudhsmv2/__init__.py | 1 + moto/cloudhsmv2/exceptions.py | 2 + moto/cloudhsmv2/models.py | 19 ++ moto/cloudhsmv2/responses.py | 40 ++++ moto/cloudhsmv2/urls.py | 11 ++ tests/test_cloudhsmv2/__init__.py | 0 tests/test_cloudhsmv2/test_cloudhsmv2.py | 16 ++ tests/test_cloudhsmv2/test_server.py | 13 ++ 9 files changed, 102 insertions(+), 224 deletions(-) create mode 100644 moto/cloudhsmv2/__init__.py create mode 100644 moto/cloudhsmv2/exceptions.py create mode 100644 moto/cloudhsmv2/models.py create mode 100644 moto/cloudhsmv2/responses.py create mode 100644 moto/cloudhsmv2/urls.py create mode 100644 tests/test_cloudhsmv2/__init__.py create mode 100644 tests/test_cloudhsmv2/test_cloudhsmv2.py create mode 100644 tests/test_cloudhsmv2/test_server.py diff --git a/moto/backend_index.py b/moto/backend_index.py index 9d8abc8295d6..1815c5d94021 100644 --- a/moto/backend_index.py +++ b/moto/backend_index.py @@ -1,225 +1 @@ # autogenerated by moto/scripts/update_backend_index.py -import re - -backend_url_patterns = [ - ("acm", re.compile("https?://acm\\.(.+)\\.amazonaws\\.com")), - ("acmpca", re.compile("https?://acm-pca\\.(.+)\\.amazonaws\\.com")), - ("amp", re.compile("https?://aps\\.(.+)\\.amazonaws\\.com")), - ("apigateway", re.compile("https?://apigateway\\.(.+)\\.amazonaws.com")), - ( - "apigatewaymanagementapi", - re.compile("https?://([^.]+\\.)*execute-api\\.[^.]+\\.amazonaws\\.com"), - ), - ("appconfig", re.compile("https?://appconfig\\.(.+)\\.amazonaws\\.com")), - ( - "applicationautoscaling", - re.compile("https?://application-autoscaling\\.(.+)\\.amazonaws.com"), - ), - ("appmesh", re.compile("https?://appmesh\\.(.+)\\.amazonaws\\.com")), - ("appsync", re.compile("https?://appsync\\.(.+)\\.amazonaws\\.com")), - ("athena", re.compile("https?://athena\\.(.+)\\.amazonaws\\.com")), - ("autoscaling", re.compile("https?://autoscaling\\.(.+)\\.amazonaws\\.com")), - ("awslambda", re.compile("https?://lambda\\.(.+)\\.amazonaws\\.com")), - ("backup", re.compile("https?://backup\\.(.+)\\.amazonaws\\.com")), - ("batch", re.compile("https?://batch\\.(.+)\\.amazonaws.com")), - ("bedrock", re.compile("https?://bedrock\\.(.+)\\.amazonaws\\.com")), - ("bedrockagent", re.compile("https?://bedrock-agent\\.(.+)\\.amazonaws\\.com")), - ("budgets", re.compile("https?://budgets\\.amazonaws\\.com")), - ("ce", re.compile("https?://ce\\.(.+)\\.amazonaws\\.com")), - ("cloudformation", re.compile("https?://cloudformation\\.(.+)\\.amazonaws\\.com")), - ("cloudfront", re.compile("https?://cloudfront\\.amazonaws\\.com")), - ("cloudfront", re.compile("https?://cloudfront\\.(.+)\\.amazonaws\\.com")), - ("cloudtrail", re.compile("https?://cloudtrail\\.(.+)\\.amazonaws\\.com")), - ("cloudwatch", re.compile("https?://monitoring\\.(.+)\\.amazonaws.com")), - ("codebuild", re.compile("https?://codebuild\\.(.+)\\.amazonaws\\.com")), - ("codecommit", re.compile("https?://codecommit\\.(.+)\\.amazonaws\\.com")), - ("codepipeline", re.compile("https?://codepipeline\\.(.+)\\.amazonaws\\.com")), - ("cognitoidentity", re.compile("https?://cognito-identity\\.(.+)\\.amazonaws.com")), - ("cognitoidp", re.compile("https?://cognito-idp\\.(.+)\\.amazonaws.com")), - ("comprehend", re.compile("https?://comprehend\\.(.+)\\.amazonaws\\.com")), - ("config", re.compile("https?://config\\.(.+)\\.amazonaws\\.com")), - ("databrew", re.compile("https?://databrew\\.(.+)\\.amazonaws.com")), - ("datapipeline", re.compile("https?://datapipeline\\.(.+)\\.amazonaws\\.com")), - ("datasync", re.compile("https?://(.*\\.)?(datasync)\\.(.+)\\.amazonaws.com")), - ("dax", re.compile("https?://dax\\.(.+)\\.amazonaws\\.com")), - ("directconnect", re.compile("https?://directconnect\\.(.+)\\.amazonaws\\.com")), - ("dms", re.compile("https?://dms\\.(.+)\\.amazonaws\\.com")), - ("ds", re.compile("https?://ds\\.(.+)\\.amazonaws\\.com")), - ("dsql", re.compile("https?://dsql\\.(.+)\\.api\\.aws")), - ("dynamodb", re.compile("https?://dynamodb\\.(.+)\\.amazonaws\\.com")), - ( - "dynamodbstreams", - re.compile("https?://streams\\.dynamodb\\.(.+)\\.amazonaws.com"), - ), - ("ebs", re.compile("https?://ebs\\.(.+)\\.amazonaws\\.com")), - ("ec2", re.compile("https?://ec2\\.(.+)\\.amazonaws\\.com(|\\.cn)")), - ( - "ec2instanceconnect", - re.compile("https?://ec2-instance-connect\\.(.+)\\.amazonaws\\.com"), - ), - ("ecr", re.compile("https?://ecr\\.(.+)\\.amazonaws\\.com")), - ("ecr", re.compile("https?://api\\.ecr\\.(.+)\\.amazonaws\\.com")), - ("ecs", re.compile("https?://ecs\\.(.+)\\.amazonaws\\.com")), - ("efs", re.compile("https?://elasticfilesystem\\.(.+)\\.amazonaws.com")), - ("efs", re.compile("https?://elasticfilesystem\\.amazonaws.com")), - ("eks", re.compile("https?://eks\\.(.+)\\.amazonaws.com")), - ("elasticache", re.compile("https?://elasticache\\.(.+)\\.amazonaws\\.com")), - ( - "elasticbeanstalk", - re.compile( - "https?://elasticbeanstalk\\.(?P[a-zA-Z0-9\\-_]+)\\.amazonaws.com" - ), - ), - ( - "elastictranscoder", - re.compile("https?://elastictranscoder\\.(.+)\\.amazonaws.com"), - ), - ("elb", re.compile("https?://elasticloadbalancing\\.(.+)\\.amazonaws.com")), - ("elbv2", re.compile("https?://elasticloadbalancing\\.(.+)\\.amazonaws.com")), - ("emr", re.compile("https?://(.+)\\.elasticmapreduce\\.amazonaws.com")), - ("emr", re.compile("https?://elasticmapreduce\\.(.+)\\.amazonaws.com")), - ("emrcontainers", re.compile("https?://emr-containers\\.(.+)\\.amazonaws\\.com")), - ("emrserverless", re.compile("https?://emr-serverless\\.(.+)\\.amazonaws\\.com")), - ("es", re.compile("https?://es\\.(.+)\\.amazonaws\\.com")), - ("events", re.compile("https?://events\\.(.+)\\.amazonaws\\.com")), - ("firehose", re.compile("https?://firehose\\.(.+)\\.amazonaws\\.com")), - ("forecast", re.compile("https?://forecast\\.(.+)\\.amazonaws\\.com")), - ("fsx", re.compile("https?://fsx\\.(.+)\\.amazonaws\\.com")), - ("glacier", re.compile("https?://glacier\\.(.+)\\.amazonaws.com")), - ("glue", re.compile("https?://glue\\.(.+)\\.amazonaws\\.com")), - ("greengrass", re.compile("https?://greengrass\\.(.+)\\.amazonaws.com")), - ("guardduty", re.compile("https?://guardduty\\.(.+)\\.amazonaws\\.com")), - ("iam", re.compile("https?://iam\\.(.*\\.)?amazonaws\\.com")), - ("identitystore", re.compile("https?://identitystore\\.(.+)\\.amazonaws\\.com")), - ("inspector2", re.compile("https?://inspector2\\.(.+)\\.amazonaws\\.com")), - ("instance_metadata", re.compile("http://169.254.169.254")), - ("iot", re.compile("https?://iot\\.(.+)\\.amazonaws\\.com")), - ("iotdata", re.compile("https?://data\\.iot\\.(.+)\\.amazonaws.com")), - ("iotdata", re.compile("https?://data-ats\\.iot\\.(.+)\\.amazonaws.com")), - ("ivs", re.compile("https?://ivs\\.(.+)\\.amazonaws\\.com")), - ("kafka", re.compile("https?://kafka\\.(.+)\\.amazonaws\\.com")), - ("kinesis", re.compile("https?://kinesis\\.(.+)\\.amazonaws\\.com")), - ("kinesis", re.compile("https?://(.+)\\.control-kinesis\\.(.+)\\.amazonaws\\.com")), - ("kinesis", re.compile("https?://(.+)\\.data-kinesis\\.(.+)\\.amazonaws\\.com")), - ("kinesisvideo", re.compile("https?://kinesisvideo\\.(.+)\\.amazonaws.com")), - ( - "kinesisvideoarchivedmedia", - re.compile("https?://.*\\.kinesisvideo\\.(.+)\\.amazonaws.com"), - ), - ("kms", re.compile("https?://kms\\.(.+)\\.amazonaws\\.com")), - ("lakeformation", re.compile("https?://lakeformation\\.(.+)\\.amazonaws\\.com")), - ("logs", re.compile("https?://logs\\.(.+)\\.amazonaws\\.com")), - ( - "managedblockchain", - re.compile("https?://managedblockchain\\.(.+)\\.amazonaws.com"), - ), - ("mediaconnect", re.compile("https?://mediaconnect\\.(.+)\\.amazonaws.com")), - ("medialive", re.compile("https?://medialive\\.(.+)\\.amazonaws.com")), - ("mediapackage", re.compile("https?://mediapackage\\.(.+)\\.amazonaws.com")), - ("mediastore", re.compile("https?://mediastore\\.(.+)\\.amazonaws\\.com")), - ("mediastoredata", re.compile("https?://data\\.mediastore\\.(.+)\\.amazonaws.com")), - ("memorydb", re.compile("https?://memory-db\\.(.+)\\.amazonaws\\.com")), - ( - "meteringmarketplace", - re.compile("https?://metering\\.marketplace\\.(.+)\\.amazonaws\\.com"), - ), - ( - "meteringmarketplace", - re.compile("https?://aws-marketplace\\.(.+)\\.amazonaws\\.com"), - ), - ("moto_api._internal", re.compile("https?://motoapi\\.amazonaws\\.com")), - ("mq", re.compile("https?://mq\\.(.+)\\.amazonaws\\.com")), - ("networkmanager", re.compile("https?://networkmanager\\.(.+)\\.amazonaws\\.com")), - ("opensearchserverless", re.compile("https?://aoss\\.(.+)\\.amazonaws\\.com")), - ("opsworks", re.compile("https?://opsworks\\.us-east-1\\.amazonaws.com")), - ("organizations", re.compile("https?://organizations\\.(.+)\\.amazonaws\\.com")), - ("osis", re.compile("https?://osis\\.(.+)\\.amazonaws\\.com")), - ("panorama", re.compile("https?://panorama\\.(.+)\\.amazonaws.com")), - ("personalize", re.compile("https?://personalize\\.(.+)\\.amazonaws\\.com")), - ("pinpoint", re.compile("https?://pinpoint\\.(.+)\\.amazonaws\\.com")), - ("polly", re.compile("https?://polly\\.(.+)\\.amazonaws.com")), - ("qldb", re.compile("https?://qldb\\.(.+)\\.amazonaws\\.com")), - ("quicksight", re.compile("https?://quicksight\\.(.+)\\.amazonaws\\.com")), - ("ram", re.compile("https?://ram\\.(.+)\\.amazonaws.com")), - ("rds", re.compile("https?://rds\\.(.+)\\.amazonaws\\.com")), - ("rds", re.compile("https?://rds\\.amazonaws\\.com")), - ("rdsdata", re.compile("https?://rds-data\\.(.+)\\.amazonaws\\.com")), - ("redshift", re.compile("https?://redshift\\.(.+)\\.amazonaws\\.com")), - ("redshiftdata", re.compile("https?://redshift-data\\.(.+)\\.amazonaws\\.com")), - ("rekognition", re.compile("https?://rekognition\\.(.+)\\.amazonaws\\.com")), - ("resiliencehub", re.compile("https?://resiliencehub\\.(.+)\\.amazonaws\\.com")), - ( - "resourcegroups", - re.compile("https?://resource-groups(-fips)?\\.(.+)\\.amazonaws.com"), - ), - ("resourcegroupstaggingapi", re.compile("https?://tagging\\.(.+)\\.amazonaws.com")), - ("robomaker", re.compile("https?://robomaker\\.(.+)\\.amazonaws\\.com")), - ("route53", re.compile("https?://route53(\\..+)?\\.amazonaws.com")), - ("route53domains", re.compile("https?://route53domains\\.(.+)\\.amazonaws\\.com")), - ( - "route53resolver", - re.compile("https?://route53resolver\\.(.+)\\.amazonaws\\.com"), - ), - ("s3", re.compile("https?://s3(?!(-control|tables))(.*)\\.amazonaws.com")), - ( - "s3", - re.compile( - "https?://(?P[a-zA-Z0-9\\-_.]*)\\.?s3(?!(-control|tables))(.*)\\.amazonaws.com" - ), - ), - ( - "s3control", - re.compile("https?://([0-9]+)\\.s3-control\\.(.+)\\.amazonaws\\.com"), - ), - ("s3tables", re.compile("https?://s3tables\\.(.+)\\.amazonaws\\.com")), - ("sagemaker", re.compile("https?://api\\.sagemaker\\.(.+)\\.amazonaws.com")), - ( - "sagemakermetrics", - re.compile("https?://metrics\\.sagemaker\\.(.+)\\.amazonaws\\.com"), - ), - ( - "sagemakerruntime", - re.compile("https?://runtime\\.sagemaker\\.(.+)\\.amazonaws\\.com"), - ), - ("scheduler", re.compile("https?://scheduler\\.(.+)\\.amazonaws\\.com")), - ("sdb", re.compile("https?://sdb\\.(.+)\\.amazonaws\\.com")), - ("secretsmanager", re.compile("https?://secretsmanager\\.(.+)\\.amazonaws\\.com")), - ( - "servicediscovery", - re.compile("https?://(data-)?servicediscovery\\.(.+)\\.amazonaws\\.com"), - ), - ("servicequotas", re.compile("https?://servicequotas\\.(.+)\\.amazonaws\\.com")), - ("ses", re.compile("https?://email\\.(.+)\\.amazonaws\\.com")), - ("ses", re.compile("https?://ses\\.(.+)\\.amazonaws\\.com")), - ("sesv2", re.compile("https?://email\\.(.+)\\.amazonaws\\.com")), - ("shield", re.compile("https?://shield\\.(.+)\\.amazonaws\\.com")), - ("signer", re.compile("https?://signer\\.(.+)\\.amazonaws\\.com")), - ("sns", re.compile("https?://sns\\.(.+)\\.amazonaws\\.com")), - ("sqs", re.compile("https?://(.*\\.)?(queue|sqs)\\.(.*\\.)?amazonaws\\.com")), - ("ssm", re.compile("https?://ssm\\.(.+)\\.amazonaws\\.com")), - ("ssm", re.compile("https?://ssm\\.(.+)\\.amazonaws\\.com\\.cn")), - ("ssoadmin", re.compile("https?://sso\\.(.+)\\.amazonaws\\.com")), - ("stepfunctions", re.compile("https?://states\\.(.+)\\.amazonaws.com")), - ("sts", re.compile("https?://sts\\.(.*\\.)?amazonaws\\.com")), - ("support", re.compile("https?://support\\.(.+)\\.amazonaws\\.com")), - ("swf", re.compile("https?://swf\\.(.+)\\.amazonaws\\.com")), - ("textract", re.compile("https?://textract\\.(.+)\\.amazonaws\\.com")), - ( - "timestreamquery", - re.compile("https?://query\\.timestream\\.(.+)\\.amazonaws\\.com"), - ), - ( - "timestreamwrite", - re.compile("https?://ingest\\.timestream\\.(.+)\\.amazonaws\\.com"), - ), - ( - "timestreamwrite", - re.compile("https?://ingest\\.timestream\\.(.+)\\.amazonaws\\.com/"), - ), - ("transcribe", re.compile("https?://transcribe\\.(.+)\\.amazonaws\\.com")), - ("transfer", re.compile("https?://transfer\\.(.+)\\.amazonaws\\.com")), - ("wafv2", re.compile("https?://wafv2\\.(.+)\\.amazonaws.com")), - ("workspaces", re.compile("https?://workspaces\\.(.+)\\.amazonaws\\.com")), - ("workspacesweb", re.compile("https?://workspaces-web\\.(.+)\\.amazonaws\\.com")), - ("xray", re.compile("https?://xray\\.(.+)\\.amazonaws.com")), -] diff --git a/moto/cloudhsmv2/__init__.py b/moto/cloudhsmv2/__init__.py new file mode 100644 index 000000000000..f64d2192ad66 --- /dev/null +++ b/moto/cloudhsmv2/__init__.py @@ -0,0 +1 @@ +from .models import cloudhsmv2_backends # noqa: F401 diff --git a/moto/cloudhsmv2/exceptions.py b/moto/cloudhsmv2/exceptions.py new file mode 100644 index 000000000000..f1877a951c51 --- /dev/null +++ b/moto/cloudhsmv2/exceptions.py @@ -0,0 +1,2 @@ +"""Exceptions raised by the cloudhsmv2 service.""" + diff --git a/moto/cloudhsmv2/models.py b/moto/cloudhsmv2/models.py new file mode 100644 index 000000000000..fa5cb894abb8 --- /dev/null +++ b/moto/cloudhsmv2/models.py @@ -0,0 +1,19 @@ +"""CloudHSMV2Backend class with methods for supported APIs.""" + +from moto.core.base_backend import BackendDict, BaseBackend + + +class CloudHSMV2Backend(BaseBackend): + """Implementation of CloudHSMV2 APIs.""" + + def __init__(self, region_name, account_id): + super().__init__(region_name, account_id) + + # add methods from here + + def list_tags(self, resource_id, next_token, max_results): + # implement here + return tag_list, next_token + + +cloudhsmv2_backends = BackendDict(CloudHSMV2Backend, "cloudhsmv2") diff --git a/moto/cloudhsmv2/responses.py b/moto/cloudhsmv2/responses.py new file mode 100644 index 000000000000..9e82eb38fc49 --- /dev/null +++ b/moto/cloudhsmv2/responses.py @@ -0,0 +1,40 @@ +"""Handles incoming cloudhsmv2 requests, invokes methods, returns responses.""" + +import json + +from moto.core.responses import BaseResponse + +from .models import cloudhsmv2_backends + + +class CloudHSMV2Response(BaseResponse): + """Handler for CloudHSMV2 requests and responses.""" + + def __init__(self): + super().__init__(service_name="cloudhsmv2") + + @property + def cloudhsmv2_backend(self): + """Return backend instance specific for this region.""" + # TODO + # cloudhsmv2_backends is not yet typed + # Please modify moto/backends.py to add the appropriate type annotations for this service + return cloudhsmv2_backends[self.current_account][self.region] + + # add methods from here + + def list_tags(self): + params = self._get_params() + resource_id = params.get("ResourceId") + next_token = params.get("NextToken") + max_results = params.get("MaxResults") + tag_list, next_token = self.cloudhsmv2_backend.list_tags( + resource_id=resource_id, + next_token=next_token, + max_results=max_results, + ) + # TODO: adjust response + return json.dumps(dict(tagList=tag_list, nextToken=next_token)) + + +# add templates from here diff --git a/moto/cloudhsmv2/urls.py b/moto/cloudhsmv2/urls.py new file mode 100644 index 000000000000..a091ea8f8b00 --- /dev/null +++ b/moto/cloudhsmv2/urls.py @@ -0,0 +1,11 @@ +"""cloudhsmv2 base URL and path.""" + +from .responses import CloudHSMV2Response + +url_bases = [ + r"https?://cloudhsmv2\.(.+)\.amazonaws\.com", +] + +url_paths = { + "{0}/$": CloudHSMV2Response.dispatch, +} diff --git a/tests/test_cloudhsmv2/__init__.py b/tests/test_cloudhsmv2/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/test_cloudhsmv2/test_cloudhsmv2.py b/tests/test_cloudhsmv2/test_cloudhsmv2.py new file mode 100644 index 000000000000..c3fd7b552d16 --- /dev/null +++ b/tests/test_cloudhsmv2/test_cloudhsmv2.py @@ -0,0 +1,16 @@ +"""Unit tests for cloudhsmv2-supported APIs.""" + +import boto3 + +from moto import mock_aws + +# See our Development Tips on writing tests for hints on how to write good tests: +# http://docs.getmoto.org/en/latest/docs/contributing/development_tips/tests.html + + +@mock_aws +def test_list_tags(): + client = boto3.client("cloudhsmv2", region_name="eu-west-1") + resp = client.list_tags() + + raise Exception("NotYetImplemented") diff --git a/tests/test_cloudhsmv2/test_server.py b/tests/test_cloudhsmv2/test_server.py new file mode 100644 index 000000000000..9c5fd94929c0 --- /dev/null +++ b/tests/test_cloudhsmv2/test_server.py @@ -0,0 +1,13 @@ +"""Test different server responses.""" + +import moto.server as server + + +def test_cloudhsmv2_list(): + backend = server.create_backend_app("cloudhsmv2") + test_client = backend.test_client() + + resp = test_client.get("/") + + assert resp.status_code == 200 + assert "?" in str(resp.data) From c40418e8c484e50c0bcb4c5a6e86aed97c86ca97 Mon Sep 17 00:00:00 2001 From: Aman Date: Mon, 3 Feb 2025 10:36:16 -0500 Subject: [PATCH 02/15] Most of tagging is working --- moto/backend_index.py | 225 +++++++++++++++++++++++ moto/cloudhsmv2/exceptions.py | 1 - moto/cloudhsmv2/models.py | 75 +++++++- moto/cloudhsmv2/responses.py | 36 +++- tests/test_cloudhsmv2/test_cloudhsmv2.py | 81 +++++++- 5 files changed, 407 insertions(+), 11 deletions(-) diff --git a/moto/backend_index.py b/moto/backend_index.py index 1815c5d94021..6440312dac97 100644 --- a/moto/backend_index.py +++ b/moto/backend_index.py @@ -1 +1,226 @@ # autogenerated by moto/scripts/update_backend_index.py +import re + +backend_url_patterns = [ + ("acm", re.compile("https?://acm\\.(.+)\\.amazonaws\\.com")), + ("acmpca", re.compile("https?://acm-pca\\.(.+)\\.amazonaws\\.com")), + ("amp", re.compile("https?://aps\\.(.+)\\.amazonaws\\.com")), + ("apigateway", re.compile("https?://apigateway\\.(.+)\\.amazonaws.com")), + ( + "apigatewaymanagementapi", + re.compile("https?://([^.]+\\.)*execute-api\\.[^.]+\\.amazonaws\\.com"), + ), + ("appconfig", re.compile("https?://appconfig\\.(.+)\\.amazonaws\\.com")), + ( + "applicationautoscaling", + re.compile("https?://application-autoscaling\\.(.+)\\.amazonaws.com"), + ), + ("appmesh", re.compile("https?://appmesh\\.(.+)\\.amazonaws\\.com")), + ("appsync", re.compile("https?://appsync\\.(.+)\\.amazonaws\\.com")), + ("athena", re.compile("https?://athena\\.(.+)\\.amazonaws\\.com")), + ("autoscaling", re.compile("https?://autoscaling\\.(.+)\\.amazonaws\\.com")), + ("awslambda", re.compile("https?://lambda\\.(.+)\\.amazonaws\\.com")), + ("backup", re.compile("https?://backup\\.(.+)\\.amazonaws\\.com")), + ("batch", re.compile("https?://batch\\.(.+)\\.amazonaws.com")), + ("bedrock", re.compile("https?://bedrock\\.(.+)\\.amazonaws\\.com")), + ("bedrockagent", re.compile("https?://bedrock-agent\\.(.+)\\.amazonaws\\.com")), + ("budgets", re.compile("https?://budgets\\.amazonaws\\.com")), + ("ce", re.compile("https?://ce\\.(.+)\\.amazonaws\\.com")), + ("cloudformation", re.compile("https?://cloudformation\\.(.+)\\.amazonaws\\.com")), + ("cloudfront", re.compile("https?://cloudfront\\.amazonaws\\.com")), + ("cloudfront", re.compile("https?://cloudfront\\.(.+)\\.amazonaws\\.com")), + ("cloudhsmv2", re.compile("https?://cloudhsmv2\\.(.+)\\.amazonaws\\.com")), + ("cloudtrail", re.compile("https?://cloudtrail\\.(.+)\\.amazonaws\\.com")), + ("cloudwatch", re.compile("https?://monitoring\\.(.+)\\.amazonaws.com")), + ("codebuild", re.compile("https?://codebuild\\.(.+)\\.amazonaws\\.com")), + ("codecommit", re.compile("https?://codecommit\\.(.+)\\.amazonaws\\.com")), + ("codepipeline", re.compile("https?://codepipeline\\.(.+)\\.amazonaws\\.com")), + ("cognitoidentity", re.compile("https?://cognito-identity\\.(.+)\\.amazonaws.com")), + ("cognitoidp", re.compile("https?://cognito-idp\\.(.+)\\.amazonaws.com")), + ("comprehend", re.compile("https?://comprehend\\.(.+)\\.amazonaws\\.com")), + ("config", re.compile("https?://config\\.(.+)\\.amazonaws\\.com")), + ("databrew", re.compile("https?://databrew\\.(.+)\\.amazonaws.com")), + ("datapipeline", re.compile("https?://datapipeline\\.(.+)\\.amazonaws\\.com")), + ("datasync", re.compile("https?://(.*\\.)?(datasync)\\.(.+)\\.amazonaws.com")), + ("dax", re.compile("https?://dax\\.(.+)\\.amazonaws\\.com")), + ("directconnect", re.compile("https?://directconnect\\.(.+)\\.amazonaws\\.com")), + ("dms", re.compile("https?://dms\\.(.+)\\.amazonaws\\.com")), + ("ds", re.compile("https?://ds\\.(.+)\\.amazonaws\\.com")), + ("dsql", re.compile("https?://dsql\\.(.+)\\.api\\.aws")), + ("dynamodb", re.compile("https?://dynamodb\\.(.+)\\.amazonaws\\.com")), + ( + "dynamodbstreams", + re.compile("https?://streams\\.dynamodb\\.(.+)\\.amazonaws.com"), + ), + ("ebs", re.compile("https?://ebs\\.(.+)\\.amazonaws\\.com")), + ("ec2", re.compile("https?://ec2\\.(.+)\\.amazonaws\\.com(|\\.cn)")), + ( + "ec2instanceconnect", + re.compile("https?://ec2-instance-connect\\.(.+)\\.amazonaws\\.com"), + ), + ("ecr", re.compile("https?://ecr\\.(.+)\\.amazonaws\\.com")), + ("ecr", re.compile("https?://api\\.ecr\\.(.+)\\.amazonaws\\.com")), + ("ecs", re.compile("https?://ecs\\.(.+)\\.amazonaws\\.com")), + ("efs", re.compile("https?://elasticfilesystem\\.(.+)\\.amazonaws.com")), + ("efs", re.compile("https?://elasticfilesystem\\.amazonaws.com")), + ("eks", re.compile("https?://eks\\.(.+)\\.amazonaws.com")), + ("elasticache", re.compile("https?://elasticache\\.(.+)\\.amazonaws\\.com")), + ( + "elasticbeanstalk", + re.compile( + "https?://elasticbeanstalk\\.(?P[a-zA-Z0-9\\-_]+)\\.amazonaws.com" + ), + ), + ( + "elastictranscoder", + re.compile("https?://elastictranscoder\\.(.+)\\.amazonaws.com"), + ), + ("elb", re.compile("https?://elasticloadbalancing\\.(.+)\\.amazonaws.com")), + ("elbv2", re.compile("https?://elasticloadbalancing\\.(.+)\\.amazonaws.com")), + ("emr", re.compile("https?://(.+)\\.elasticmapreduce\\.amazonaws.com")), + ("emr", re.compile("https?://elasticmapreduce\\.(.+)\\.amazonaws.com")), + ("emrcontainers", re.compile("https?://emr-containers\\.(.+)\\.amazonaws\\.com")), + ("emrserverless", re.compile("https?://emr-serverless\\.(.+)\\.amazonaws\\.com")), + ("es", re.compile("https?://es\\.(.+)\\.amazonaws\\.com")), + ("events", re.compile("https?://events\\.(.+)\\.amazonaws\\.com")), + ("firehose", re.compile("https?://firehose\\.(.+)\\.amazonaws\\.com")), + ("forecast", re.compile("https?://forecast\\.(.+)\\.amazonaws\\.com")), + ("fsx", re.compile("https?://fsx\\.(.+)\\.amazonaws\\.com")), + ("glacier", re.compile("https?://glacier\\.(.+)\\.amazonaws.com")), + ("glue", re.compile("https?://glue\\.(.+)\\.amazonaws\\.com")), + ("greengrass", re.compile("https?://greengrass\\.(.+)\\.amazonaws.com")), + ("guardduty", re.compile("https?://guardduty\\.(.+)\\.amazonaws\\.com")), + ("iam", re.compile("https?://iam\\.(.*\\.)?amazonaws\\.com")), + ("identitystore", re.compile("https?://identitystore\\.(.+)\\.amazonaws\\.com")), + ("inspector2", re.compile("https?://inspector2\\.(.+)\\.amazonaws\\.com")), + ("instance_metadata", re.compile("http://169.254.169.254")), + ("iot", re.compile("https?://iot\\.(.+)\\.amazonaws\\.com")), + ("iotdata", re.compile("https?://data\\.iot\\.(.+)\\.amazonaws.com")), + ("iotdata", re.compile("https?://data-ats\\.iot\\.(.+)\\.amazonaws.com")), + ("ivs", re.compile("https?://ivs\\.(.+)\\.amazonaws\\.com")), + ("kafka", re.compile("https?://kafka\\.(.+)\\.amazonaws\\.com")), + ("kinesis", re.compile("https?://kinesis\\.(.+)\\.amazonaws\\.com")), + ("kinesis", re.compile("https?://(.+)\\.control-kinesis\\.(.+)\\.amazonaws\\.com")), + ("kinesis", re.compile("https?://(.+)\\.data-kinesis\\.(.+)\\.amazonaws\\.com")), + ("kinesisvideo", re.compile("https?://kinesisvideo\\.(.+)\\.amazonaws.com")), + ( + "kinesisvideoarchivedmedia", + re.compile("https?://.*\\.kinesisvideo\\.(.+)\\.amazonaws.com"), + ), + ("kms", re.compile("https?://kms\\.(.+)\\.amazonaws\\.com")), + ("lakeformation", re.compile("https?://lakeformation\\.(.+)\\.amazonaws\\.com")), + ("logs", re.compile("https?://logs\\.(.+)\\.amazonaws\\.com")), + ( + "managedblockchain", + re.compile("https?://managedblockchain\\.(.+)\\.amazonaws.com"), + ), + ("mediaconnect", re.compile("https?://mediaconnect\\.(.+)\\.amazonaws.com")), + ("medialive", re.compile("https?://medialive\\.(.+)\\.amazonaws.com")), + ("mediapackage", re.compile("https?://mediapackage\\.(.+)\\.amazonaws.com")), + ("mediastore", re.compile("https?://mediastore\\.(.+)\\.amazonaws\\.com")), + ("mediastoredata", re.compile("https?://data\\.mediastore\\.(.+)\\.amazonaws.com")), + ("memorydb", re.compile("https?://memory-db\\.(.+)\\.amazonaws\\.com")), + ( + "meteringmarketplace", + re.compile("https?://metering\\.marketplace\\.(.+)\\.amazonaws\\.com"), + ), + ( + "meteringmarketplace", + re.compile("https?://aws-marketplace\\.(.+)\\.amazonaws\\.com"), + ), + ("moto_api._internal", re.compile("https?://motoapi\\.amazonaws\\.com")), + ("mq", re.compile("https?://mq\\.(.+)\\.amazonaws\\.com")), + ("networkmanager", re.compile("https?://networkmanager\\.(.+)\\.amazonaws\\.com")), + ("opensearchserverless", re.compile("https?://aoss\\.(.+)\\.amazonaws\\.com")), + ("opsworks", re.compile("https?://opsworks\\.us-east-1\\.amazonaws.com")), + ("organizations", re.compile("https?://organizations\\.(.+)\\.amazonaws\\.com")), + ("osis", re.compile("https?://osis\\.(.+)\\.amazonaws\\.com")), + ("panorama", re.compile("https?://panorama\\.(.+)\\.amazonaws.com")), + ("personalize", re.compile("https?://personalize\\.(.+)\\.amazonaws\\.com")), + ("pinpoint", re.compile("https?://pinpoint\\.(.+)\\.amazonaws\\.com")), + ("polly", re.compile("https?://polly\\.(.+)\\.amazonaws.com")), + ("qldb", re.compile("https?://qldb\\.(.+)\\.amazonaws\\.com")), + ("quicksight", re.compile("https?://quicksight\\.(.+)\\.amazonaws\\.com")), + ("ram", re.compile("https?://ram\\.(.+)\\.amazonaws.com")), + ("rds", re.compile("https?://rds\\.(.+)\\.amazonaws\\.com")), + ("rds", re.compile("https?://rds\\.amazonaws\\.com")), + ("rdsdata", re.compile("https?://rds-data\\.(.+)\\.amazonaws\\.com")), + ("redshift", re.compile("https?://redshift\\.(.+)\\.amazonaws\\.com")), + ("redshiftdata", re.compile("https?://redshift-data\\.(.+)\\.amazonaws\\.com")), + ("rekognition", re.compile("https?://rekognition\\.(.+)\\.amazonaws\\.com")), + ("resiliencehub", re.compile("https?://resiliencehub\\.(.+)\\.amazonaws\\.com")), + ( + "resourcegroups", + re.compile("https?://resource-groups(-fips)?\\.(.+)\\.amazonaws.com"), + ), + ("resourcegroupstaggingapi", re.compile("https?://tagging\\.(.+)\\.amazonaws.com")), + ("robomaker", re.compile("https?://robomaker\\.(.+)\\.amazonaws\\.com")), + ("route53", re.compile("https?://route53(\\..+)?\\.amazonaws.com")), + ("route53domains", re.compile("https?://route53domains\\.(.+)\\.amazonaws\\.com")), + ( + "route53resolver", + re.compile("https?://route53resolver\\.(.+)\\.amazonaws\\.com"), + ), + ("s3", re.compile("https?://s3(?!(-control|tables))(.*)\\.amazonaws.com")), + ( + "s3", + re.compile( + "https?://(?P[a-zA-Z0-9\\-_.]*)\\.?s3(?!(-control|tables))(.*)\\.amazonaws.com" + ), + ), + ( + "s3control", + re.compile("https?://([0-9]+)\\.s3-control\\.(.+)\\.amazonaws\\.com"), + ), + ("s3tables", re.compile("https?://s3tables\\.(.+)\\.amazonaws\\.com")), + ("sagemaker", re.compile("https?://api\\.sagemaker\\.(.+)\\.amazonaws.com")), + ( + "sagemakermetrics", + re.compile("https?://metrics\\.sagemaker\\.(.+)\\.amazonaws\\.com"), + ), + ( + "sagemakerruntime", + re.compile("https?://runtime\\.sagemaker\\.(.+)\\.amazonaws\\.com"), + ), + ("scheduler", re.compile("https?://scheduler\\.(.+)\\.amazonaws\\.com")), + ("sdb", re.compile("https?://sdb\\.(.+)\\.amazonaws\\.com")), + ("secretsmanager", re.compile("https?://secretsmanager\\.(.+)\\.amazonaws\\.com")), + ( + "servicediscovery", + re.compile("https?://(data-)?servicediscovery\\.(.+)\\.amazonaws\\.com"), + ), + ("servicequotas", re.compile("https?://servicequotas\\.(.+)\\.amazonaws\\.com")), + ("ses", re.compile("https?://email\\.(.+)\\.amazonaws\\.com")), + ("ses", re.compile("https?://ses\\.(.+)\\.amazonaws\\.com")), + ("sesv2", re.compile("https?://email\\.(.+)\\.amazonaws\\.com")), + ("shield", re.compile("https?://shield\\.(.+)\\.amazonaws\\.com")), + ("signer", re.compile("https?://signer\\.(.+)\\.amazonaws\\.com")), + ("sns", re.compile("https?://sns\\.(.+)\\.amazonaws\\.com")), + ("sqs", re.compile("https?://(.*\\.)?(queue|sqs)\\.(.*\\.)?amazonaws\\.com")), + ("ssm", re.compile("https?://ssm\\.(.+)\\.amazonaws\\.com")), + ("ssm", re.compile("https?://ssm\\.(.+)\\.amazonaws\\.com\\.cn")), + ("ssoadmin", re.compile("https?://sso\\.(.+)\\.amazonaws\\.com")), + ("stepfunctions", re.compile("https?://states\\.(.+)\\.amazonaws.com")), + ("sts", re.compile("https?://sts\\.(.*\\.)?amazonaws\\.com")), + ("support", re.compile("https?://support\\.(.+)\\.amazonaws\\.com")), + ("swf", re.compile("https?://swf\\.(.+)\\.amazonaws\\.com")), + ("textract", re.compile("https?://textract\\.(.+)\\.amazonaws\\.com")), + ( + "timestreamquery", + re.compile("https?://query\\.timestream\\.(.+)\\.amazonaws\\.com"), + ), + ( + "timestreamwrite", + re.compile("https?://ingest\\.timestream\\.(.+)\\.amazonaws\\.com"), + ), + ( + "timestreamwrite", + re.compile("https?://ingest\\.timestream\\.(.+)\\.amazonaws\\.com/"), + ), + ("transcribe", re.compile("https?://transcribe\\.(.+)\\.amazonaws\\.com")), + ("transfer", re.compile("https?://transfer\\.(.+)\\.amazonaws\\.com")), + ("wafv2", re.compile("https?://wafv2\\.(.+)\\.amazonaws.com")), + ("workspaces", re.compile("https?://workspaces\\.(.+)\\.amazonaws\\.com")), + ("workspacesweb", re.compile("https?://workspaces-web\\.(.+)\\.amazonaws\\.com")), + ("xray", re.compile("https?://xray\\.(.+)\\.amazonaws.com")), +] diff --git a/moto/cloudhsmv2/exceptions.py b/moto/cloudhsmv2/exceptions.py index f1877a951c51..e8b88b84fa6c 100644 --- a/moto/cloudhsmv2/exceptions.py +++ b/moto/cloudhsmv2/exceptions.py @@ -1,2 +1 @@ """Exceptions raised by the cloudhsmv2 service.""" - diff --git a/moto/cloudhsmv2/models.py b/moto/cloudhsmv2/models.py index fa5cb894abb8..ec7ce321dc61 100644 --- a/moto/cloudhsmv2/models.py +++ b/moto/cloudhsmv2/models.py @@ -8,12 +8,81 @@ class CloudHSMV2Backend(BaseBackend): def __init__(self, region_name, account_id): super().__init__(region_name, account_id) - - # add methods from here + self.tags = {} # Dict to store resource tags: {resource_id: [{Key: str, Value: str}]} def list_tags(self, resource_id, next_token, max_results): + """List tags for a CloudHSM resource. + + Args: + resource_id (str): The identifier of the resource to list tags for + next_token (str): Token for pagination + max_results (int): Maximum number of results to return + + Returns: + tuple: (list of tags, next token) + """ + + if resource_id not in self.tags: + return [], None + + tags = self.tags.get(resource_id, []) + + # Handle pagination + start_idx = 0 + if next_token: + try: + start_idx = int(next_token) + except ValueError: + start_idx = 0 + + if max_results is None: + max_results = 50 # Default AWS limit + + end_idx = start_idx + max_results + result_tags = tags[start_idx:end_idx] + + # Generate next token if there are more results + next_token = str(end_idx) if end_idx < len(tags) else None + + return result_tags, next_token + + def tag_resource(self, resource_id, tag_list): + """Add or update tags for a CloudHSM resource. + + Args: + resource_id (str): The identifier of the resource to tag + tag_list (list): List of tag dictionaries with 'Key' and 'Value' pairs + + Returns: + dict: Empty dictionary per AWS spec + + Raises: + ValueError: If resource_id or tag_list is None + """ + if resource_id is None: + raise ValueError("ResourceId must not be None") + if tag_list is None: + raise ValueError("TagList must not be None") + + if resource_id not in self.tags: + self.tags[resource_id] = [] + + # Update existing tags and add new ones + for new_tag in tag_list: + tag_exists = False + for existing_tag in self.tags[resource_id]: + if existing_tag["Key"] == new_tag["Key"]: + existing_tag["Value"] = new_tag["Value"] + tag_exists = True + break + if not tag_exists: + self.tags[resource_id].append(new_tag) + + return {} + + def untag_resource(self, resource_id, tag_key_list): # implement here - return tag_list, next_token + return cloudhsmv2_backends = BackendDict(CloudHSMV2Backend, "cloudhsmv2") diff --git a/moto/cloudhsmv2/responses.py b/moto/cloudhsmv2/responses.py index 9e82eb38fc49..11fa79721d74 100644 --- a/moto/cloudhsmv2/responses.py +++ b/moto/cloudhsmv2/responses.py @@ -21,20 +21,44 @@ def cloudhsmv2_backend(self): # Please modify moto/backends.py to add the appropriate type annotations for this service return cloudhsmv2_backends[self.current_account][self.region] - # add methods from here - def list_tags(self): - params = self._get_params() + # The params are coming as a JSON string, so we need to parse them first + raw_params = list(self._get_params().keys())[0] + params = json.loads(raw_params) + resource_id = params.get("ResourceId") next_token = params.get("NextToken") max_results = params.get("MaxResults") + tag_list, next_token = self.cloudhsmv2_backend.list_tags( resource_id=resource_id, next_token=next_token, max_results=max_results, ) - # TODO: adjust response - return json.dumps(dict(tagList=tag_list, nextToken=next_token)) + return 200, {}, json.dumps({"TagList": tag_list, "NextToken": next_token}) + + def tag_resource(self): + # The params are coming as a JSON string, so we need to parse them first + raw_params = list(self._get_params().keys())[0] + params = json.loads(raw_params) + + resource_id = params.get("ResourceId") + tag_list = params.get("TagList") + + self.cloudhsmv2_backend.tag_resource( + resource_id=resource_id, + tag_list=tag_list, + ) + return json.dumps(dict()) -# add templates from here + def untag_resource(self): + params = self._get_params() + resource_id = params.get("ResourceId") + tag_key_list = params.get("TagKeyList") + self.cloudhsmv2_backend.untag_resource( + resource_id=resource_id, + tag_key_list=tag_key_list, + ) + # TODO: adjust response + return json.dumps(dict()) diff --git a/tests/test_cloudhsmv2/test_cloudhsmv2.py b/tests/test_cloudhsmv2/test_cloudhsmv2.py index c3fd7b552d16..dd2b75be95c3 100644 --- a/tests/test_cloudhsmv2/test_cloudhsmv2.py +++ b/tests/test_cloudhsmv2/test_cloudhsmv2.py @@ -11,6 +11,85 @@ @mock_aws def test_list_tags(): client = boto3.client("cloudhsmv2", region_name="eu-west-1") - resp = client.list_tags() + + # Create tags for a resource + resource_id = "cluster-1234" + client.tag_resource( + ResourceId=resource_id, + TagList=[ + {"Key": "Environment", "Value": "Production"}, + {"Key": "Project", "Value": "Security"}, + ], + ) + + # Test listing all tags + response = client.list_tags(ResourceId=resource_id) + assert len(response["TagList"]) == 2 + assert {"Key": "Environment", "Value": "Production"} in response["TagList"] + assert {"Key": "Project", "Value": "Security"} in response["TagList"] + assert "NextToken" not in response + + # Test pagination + response = client.list_tags(ResourceId=resource_id, MaxResults=1) + assert len(response["TagList"]) == 1 + assert "NextToken" in response + + # Get next page + response = client.list_tags( + ResourceId=resource_id, MaxResults=1, NextToken=response["NextToken"] + ) + assert len(response["TagList"]) == 1 + assert "NextToken" not in response + + +@mock_aws +def test_tag_resource(): + client = boto3.client("cloudhsmv2", region_name="eu-west-1") + resource_id = "cluster-1234" + + # Test adding new tags + response = client.tag_resource( + ResourceId=resource_id, + TagList=[ + {"Key": "Environment", "Value": "Production"}, + {"Key": "Project", "Value": "Security"}, + ], + ) + + # Verify tags were added + tags = client.list_tags(ResourceId=resource_id)["TagList"] + assert len(tags) == 2 + assert {"Key": "Environment", "Value": "Production"} in tags + assert {"Key": "Project", "Value": "Security"} in tags + + # Test updating existing tag + response = client.tag_resource( + ResourceId=resource_id, + TagList=[ + {"Key": "Environment", "Value": "Development"} # Update existing tag + ], + ) + + # Verify tag was updated + tags = client.list_tags(ResourceId=resource_id)["TagList"] + assert len(tags) == 2 + assert {"Key": "Environment", "Value": "Development"} in tags + assert {"Key": "Project", "Value": "Security"} in tags + + +@mock_aws +def test_list_tags_empty_resource(): + client = boto3.client("cloudhsmv2", region_name="eu-west-1") + + # Test listing tags for resource with no tags + response = client.list_tags(ResourceId="non-existent-resource") + assert response["TagList"] == [] + assert "NextToken" not in response + + +@mock_aws +def test_untag_resource(): + client = boto3.client("cloudhsmv2", region_name="ap-southeast-1") + resp = client.untag_resource() raise Exception("NotYetImplemented") From ef15e33dd640cb70858e49a4cf60be8ceb73e7f1 Mon Sep 17 00:00:00 2001 From: Aman Date: Mon, 3 Feb 2025 14:35:49 -0500 Subject: [PATCH 03/15] All tests work --- moto/cloudhsmv2/models.py | 63 +++++++++++++++++------- moto/cloudhsmv2/responses.py | 10 ++-- tests/test_cloudhsmv2/test_cloudhsmv2.py | 46 ++++++++++------- 3 files changed, 76 insertions(+), 43 deletions(-) diff --git a/moto/cloudhsmv2/models.py b/moto/cloudhsmv2/models.py index ec7ce321dc61..9067e9125d72 100644 --- a/moto/cloudhsmv2/models.py +++ b/moto/cloudhsmv2/models.py @@ -1,6 +1,8 @@ """CloudHSMV2Backend class with methods for supported APIs.""" + from moto.core.base_backend import BackendDict, BaseBackend +from moto.utilities.paginator import Paginator class CloudHSMV2Backend(BaseBackend): @@ -8,7 +10,7 @@ class CloudHSMV2Backend(BaseBackend): def __init__(self, region_name, account_id): super().__init__(region_name, account_id) - self.tags = {} # Dict to store resource tags: {resource_id: [{Key: str, Value: str}]} + self.tags = {} def list_tags(self, resource_id, next_token, max_results): """List tags for a CloudHSM resource. @@ -21,30 +23,34 @@ def list_tags(self, resource_id, next_token, max_results): Returns: tuple: (list of tags, next token) """ - if resource_id not in self.tags: return [], None - tags = self.tags.get(resource_id, []) + tags = sorted(self.tags.get(resource_id, []), key=lambda x: x["Key"]) - # Handle pagination - start_idx = 0 + if not max_results: + return tags, None + + # Add padding to the token if it exists if next_token: - try: - start_idx = int(next_token) - except ValueError: - start_idx = 0 + padding = 4 - (len(next_token) % 4) + if padding != 4: + next_token = next_token + ("=" * padding) - if max_results is None: - max_results = 50 # Default AWS limit + paginator = Paginator( + max_results=max_results, + unique_attribute="Key", + starting_token=next_token, + fail_on_invalid_token=False, + ) - end_idx = start_idx + max_results - result_tags = tags[start_idx:end_idx] + results, token = paginator.paginate(tags) - # Generate next token if there are more results - next_token = str(end_idx) if end_idx < len(tags) else None + # Remove padding from the token before returning + if token: + token = token.rstrip("=") - return result_tags, next_token + return results, token def tag_resource(self, resource_id, tag_list): """Add or update tags for a CloudHSM resource. @@ -81,8 +87,29 @@ def tag_resource(self, resource_id, tag_list): return {} def untag_resource(self, resource_id, tag_key_list): - # implement here - return + """Remove tags from a CloudHSM resource. + + Args: + resource_id (str): The identifier of the resource to untag + tag_key_list (list): List of tag keys to remove + + Returns: + dict: Empty dictionary per AWS spec + + Raises: + ValueError: If resource_id or tag_key_list is None + """ + if resource_id is None: + raise ValueError("ResourceId must not be None") + if tag_key_list is None: + raise ValueError("TagKeyList must not be None") + + if resource_id in self.tags: + self.tags[resource_id] = [ + tag for tag in self.tags[resource_id] if tag["Key"] not in tag_key_list + ] + + return {} cloudhsmv2_backends = BackendDict(CloudHSMV2Backend, "cloudhsmv2") diff --git a/moto/cloudhsmv2/responses.py b/moto/cloudhsmv2/responses.py index 11fa79721d74..37b6b6c8b76f 100644 --- a/moto/cloudhsmv2/responses.py +++ b/moto/cloudhsmv2/responses.py @@ -16,13 +16,9 @@ def __init__(self): @property def cloudhsmv2_backend(self): """Return backend instance specific for this region.""" - # TODO - # cloudhsmv2_backends is not yet typed - # Please modify moto/backends.py to add the appropriate type annotations for this service return cloudhsmv2_backends[self.current_account][self.region] def list_tags(self): - # The params are coming as a JSON string, so we need to parse them first raw_params = list(self._get_params().keys())[0] params = json.loads(raw_params) @@ -39,7 +35,6 @@ def list_tags(self): return 200, {}, json.dumps({"TagList": tag_list, "NextToken": next_token}) def tag_resource(self): - # The params are coming as a JSON string, so we need to parse them first raw_params = list(self._get_params().keys())[0] params = json.loads(raw_params) @@ -53,12 +48,13 @@ def tag_resource(self): return json.dumps(dict()) def untag_resource(self): - params = self._get_params() + raw_params = list(self._get_params().keys())[0] + params = json.loads(raw_params) + resource_id = params.get("ResourceId") tag_key_list = params.get("TagKeyList") self.cloudhsmv2_backend.untag_resource( resource_id=resource_id, tag_key_list=tag_key_list, ) - # TODO: adjust response return json.dumps(dict()) diff --git a/tests/test_cloudhsmv2/test_cloudhsmv2.py b/tests/test_cloudhsmv2/test_cloudhsmv2.py index dd2b75be95c3..a7c2f06ce179 100644 --- a/tests/test_cloudhsmv2/test_cloudhsmv2.py +++ b/tests/test_cloudhsmv2/test_cloudhsmv2.py @@ -10,9 +10,8 @@ @mock_aws def test_list_tags(): - client = boto3.client("cloudhsmv2", region_name="eu-west-1") + client = boto3.client("cloudhsmv2", region_name="us-east-1") - # Create tags for a resource resource_id = "cluster-1234" client.tag_resource( ResourceId=resource_id, @@ -22,19 +21,16 @@ def test_list_tags(): ], ) - # Test listing all tags response = client.list_tags(ResourceId=resource_id) assert len(response["TagList"]) == 2 assert {"Key": "Environment", "Value": "Production"} in response["TagList"] assert {"Key": "Project", "Value": "Security"} in response["TagList"] assert "NextToken" not in response - # Test pagination response = client.list_tags(ResourceId=resource_id, MaxResults=1) assert len(response["TagList"]) == 1 assert "NextToken" in response - # Get next page response = client.list_tags( ResourceId=resource_id, MaxResults=1, NextToken=response["NextToken"] ) @@ -44,10 +40,9 @@ def test_list_tags(): @mock_aws def test_tag_resource(): - client = boto3.client("cloudhsmv2", region_name="eu-west-1") + client = boto3.client("cloudhsmv2", region_name="us-east-1") resource_id = "cluster-1234" - # Test adding new tags response = client.tag_resource( ResourceId=resource_id, TagList=[ @@ -56,21 +51,18 @@ def test_tag_resource(): ], ) - # Verify tags were added tags = client.list_tags(ResourceId=resource_id)["TagList"] assert len(tags) == 2 assert {"Key": "Environment", "Value": "Production"} in tags assert {"Key": "Project", "Value": "Security"} in tags - # Test updating existing tag response = client.tag_resource( ResourceId=resource_id, - TagList=[ - {"Key": "Environment", "Value": "Development"} # Update existing tag - ], + TagList=[{"Key": "Environment", "Value": "Development"}], ) - # Verify tag was updated + assert "ResponseMetadata" in response + tags = client.list_tags(ResourceId=resource_id)["TagList"] assert len(tags) == 2 assert {"Key": "Environment", "Value": "Development"} in tags @@ -79,9 +71,8 @@ def test_tag_resource(): @mock_aws def test_list_tags_empty_resource(): - client = boto3.client("cloudhsmv2", region_name="eu-west-1") + client = boto3.client("cloudhsmv2", region_name="us-east-1") - # Test listing tags for resource with no tags response = client.list_tags(ResourceId="non-existent-resource") assert response["TagList"] == [] assert "NextToken" not in response @@ -89,7 +80,26 @@ def test_list_tags_empty_resource(): @mock_aws def test_untag_resource(): - client = boto3.client("cloudhsmv2", region_name="ap-southeast-1") - resp = client.untag_resource() + client = boto3.client("cloudhsmv2", region_name="us-east-1") + resource_id = "cluster-1234" + + client.tag_resource( + ResourceId=resource_id, + TagList=[ + {"Key": "Environment", "Value": "Production"}, + {"Key": "Project", "Value": "Security"}, + {"Key": "Team", "Value": "DevOps"}, + ], + ) + + initial_tags = client.list_tags(ResourceId=resource_id)["TagList"] + assert len(initial_tags) == 3 + + response = client.untag_resource( + ResourceId=resource_id, TagKeyList=["Environment", "Team"] + ) + assert "ResponseMetadata" in response - raise Exception("NotYetImplemented") + remaining_tags = client.list_tags(ResourceId=resource_id)["TagList"] + assert len(remaining_tags) == 1 + assert {"Key": "Project", "Value": "Security"} in remaining_tags From 062705ab4fa417da6d4e65e45cde101ec3cb2ee4 Mon Sep 17 00:00:00 2001 From: Aman Date: Mon, 3 Feb 2025 15:27:37 -0500 Subject: [PATCH 04/15] Create Cluster Works --- moto/cloudhsmv2/models.py | 144 +++++++++++++++++++++++ moto/cloudhsmv2/responses.py | 63 ++++++++++ tests/test_cloudhsmv2/test_cloudhsmv2.py | 139 ++++++++++++++++++++++ 3 files changed, 346 insertions(+) diff --git a/moto/cloudhsmv2/models.py b/moto/cloudhsmv2/models.py index 9067e9125d72..d35458fde35f 100644 --- a/moto/cloudhsmv2/models.py +++ b/moto/cloudhsmv2/models.py @@ -1,16 +1,74 @@ """CloudHSMV2Backend class with methods for supported APIs.""" +import uuid +from typing import Dict, List, Optional from moto.core.base_backend import BackendDict, BaseBackend +from moto.core.utils import utcnow from moto.utilities.paginator import Paginator +class Cluster: + def __init__( + self, + backup_retention_policy: Optional[Dict[str, str]], + hsm_type: str, + source_backup_id: Optional[str], + subnet_ids: List[str], + network_type: str, + tag_list: Optional[List[Dict[str, str]]], + mode: str, + region_name: str, + ): + self.cluster_id = str(uuid.uuid4()) + self.backup_policy = "DEFAULT" + self.backup_retention_policy = backup_retention_policy + self.create_timestamp = utcnow() + self.hsms = [] + self.hsm_type = hsm_type + self.source_backup_id = source_backup_id + self.state = "CREATE_IN_PROGRESS" + self.state_message = "Cluster creation in progress" + self.subnet_mapping = {subnet_id: region_name for subnet_id in subnet_ids} + self.vpc_id = "vpc-" + str(uuid.uuid4())[:8] + self.network_type = network_type + self.certificates = { + "ClusterCsr": "", + "HsmCertificate": "", + "AwsHardwareCertificate": "", + "ManufacturerHardwareCertificate": "", + "ClusterCertificate": "", + } + self.tag_list = tag_list or [] + self.mode = mode + + def to_dict(self) -> Dict: + return { + "BackupPolicy": self.backup_policy, + "BackupRetentionPolicy": self.backup_retention_policy, + "ClusterId": self.cluster_id, + "CreateTimestamp": self.create_timestamp, + "Hsms": self.hsms, + "HsmType": self.hsm_type, + "SourceBackupId": self.source_backup_id, + "State": self.state, + "StateMessage": self.state_message, + "SubnetMapping": self.subnet_mapping, + "VpcId": self.vpc_id, + "NetworkType": self.network_type, + "Certificates": self.certificates, + "TagList": self.tag_list, + "Mode": self.mode, + } + + class CloudHSMV2Backend(BaseBackend): """Implementation of CloudHSMV2 APIs.""" def __init__(self, region_name, account_id): super().__init__(region_name, account_id) self.tags = {} + self.clusters = {} def list_tags(self, resource_id, next_token, max_results): """List tags for a CloudHSM resource. @@ -111,5 +169,91 @@ def untag_resource(self, resource_id, tag_key_list): return {} + def create_cluster( + self, + backup_retention_policy: Optional[Dict[str, str]], + hsm_type: str, + source_backup_id: Optional[str], + subnet_ids: List[str], + network_type: str, + tag_list: Optional[List[Dict[str, str]]], + mode: str, + ) -> Dict: + cluster = Cluster( + backup_retention_policy=backup_retention_policy, + hsm_type=hsm_type, + source_backup_id=source_backup_id, + subnet_ids=subnet_ids, + network_type=network_type, + tag_list=tag_list, + mode=mode, + region_name=self.region_name, + ) + self.clusters[cluster.cluster_id] = cluster + return cluster.to_dict() + + def delete_cluster(self, cluster_id: str) -> Dict: + """Delete a CloudHSM cluster. + + Args: + cluster_id (str): The identifier of the cluster to delete + + Returns: + dict: The deleted cluster details + + Raises: + ValueError: If cluster_id is not found + """ + if cluster_id not in self.clusters: + raise ValueError(f"Cluster {cluster_id} not found") + + cluster = self.clusters[cluster_id] + cluster.state = "DELETE_IN_PROGRESS" + cluster.state_message = "Cluster deletion in progress" + + # Remove the cluster from the backend + del self.clusters[cluster_id] + + return cluster.to_dict() + + def describe_clusters(self, filters, next_token, max_results): + """Describe CloudHSM clusters. + + Args: + filters (dict): Filters to apply + next_token (str): Token for pagination + max_results (int): Maximum number of results to return + + Returns: + tuple: (list of clusters, next token) + """ + clusters = list(self.clusters.values()) + + # Apply filters if provided + if filters: + for key, values in filters.items(): + if key == "clusterIds": + clusters = [c for c in clusters if c.cluster_id in values] + elif key == "states": + clusters = [c for c in clusters if c.state in values] + elif key == "vpcIds": + clusters = [c for c in clusters if c.vpc_id in values] + + # Sort clusters by creation timestamp for consistent pagination + clusters = sorted(clusters, key=lambda x: x.create_timestamp) + + if not max_results: + return [c.to_dict() for c in clusters], None + + paginator = Paginator( + max_results=max_results, + unique_attribute="ClusterId", + starting_token=next_token, + fail_on_invalid_token=False, + ) + + results, token = paginator.paginate([c.to_dict() for c in clusters]) + return results, token + cloudhsmv2_backends = BackendDict(CloudHSMV2Backend, "cloudhsmv2") diff --git a/moto/cloudhsmv2/responses.py b/moto/cloudhsmv2/responses.py index 37b6b6c8b76f..1d1bae42433a 100644 --- a/moto/cloudhsmv2/responses.py +++ b/moto/cloudhsmv2/responses.py @@ -1,12 +1,20 @@ """Handles incoming cloudhsmv2 requests, invokes methods, returns responses.""" import json +from datetime import datetime from moto.core.responses import BaseResponse from .models import cloudhsmv2_backends +class DateTimeEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, datetime): + return obj.isoformat() + return super().default(obj) + + class CloudHSMV2Response(BaseResponse): """Handler for CloudHSMV2 requests and responses.""" @@ -58,3 +66,58 @@ def untag_resource(self): tag_key_list=tag_key_list, ) return json.dumps(dict()) + + def create_cluster(self): + # Get raw params and print for debugging + raw_params = self._get_params() + + # Use BaseResponse's _get_param method to get individual parameters directly + backup_retention_policy = self._get_param("BackupRetentionPolicy") + hsm_type = self._get_param("HsmType") + source_backup_id = self._get_param("SourceBackupId") + subnet_ids = self._get_param("SubnetIds", []) + network_type = self._get_param("NetworkType", "IPV4") + tag_list = self._get_param("TagList") + mode = self._get_param("Mode", "FIPS") + + cluster = self.cloudhsmv2_backend.create_cluster( + backup_retention_policy=backup_retention_policy, + hsm_type=hsm_type, + source_backup_id=source_backup_id, + subnet_ids=subnet_ids, + network_type=network_type, + tag_list=tag_list, + mode=mode, + ) + return json.dumps({"Cluster": cluster}, cls=DateTimeEncoder) + + def delete_cluster(self): + raw_params = list(self._get_params().keys())[0] + params = json.loads(raw_params) + + cluster_id = params.get("ClusterId") + try: + cluster = self.cloudhsmv2_backend.delete_cluster(cluster_id=cluster_id) + return json.dumps({"Cluster": cluster}) + except ValueError as e: + return self.error("ClusterNotFoundFault", str(e)) + + def describe_clusters(self): + raw_params = list(self._get_params().keys())[0] if self._get_params() else "{}" + params = json.loads(raw_params) + + filters = params.get("Filters", {}) + next_token = params.get("NextToken") + max_results = params.get("MaxResults") + + clusters, next_token = self.cloudhsmv2_backend.describe_clusters( + filters=filters, + next_token=next_token, + max_results=max_results, + ) + + response = {"Clusters": clusters} + if next_token: + response["NextToken"] = next_token + + return json.dumps(response, cls=DateTimeEncoder) diff --git a/tests/test_cloudhsmv2/test_cloudhsmv2.py b/tests/test_cloudhsmv2/test_cloudhsmv2.py index a7c2f06ce179..501960b07876 100644 --- a/tests/test_cloudhsmv2/test_cloudhsmv2.py +++ b/tests/test_cloudhsmv2/test_cloudhsmv2.py @@ -1,5 +1,7 @@ """Unit tests for cloudhsmv2-supported APIs.""" +from datetime import datetime + import boto3 from moto import mock_aws @@ -103,3 +105,140 @@ def test_untag_resource(): remaining_tags = client.list_tags(ResourceId=resource_id)["TagList"] assert len(remaining_tags) == 1 assert {"Key": "Project", "Value": "Security"} in remaining_tags + + +@mock_aws +def test_create_cluster(): + client = boto3.client("cloudhsmv2", region_name="us-east-1") + + response = client.create_cluster( + BackupRetentionPolicy={"Type": "DAYS", "Value": "7"}, + HsmType="hsm1.medium", + SubnetIds=["subnet-12345678"], + TagList=[{"Key": "Environment", "Value": "Production"}], + ) + + cluster = response["Cluster"] + assert cluster["BackupPolicy"] == "DEFAULT" + assert cluster["BackupRetentionPolicy"] == {"Type": "DAYS", "Value": "7"} + assert "ClusterId" in cluster + assert isinstance(cluster["CreateTimestamp"], datetime) + assert cluster["HsmType"] == "hsm1.medium" + assert cluster["State"] == "CREATE_IN_PROGRESS" + assert cluster["SubnetMapping"] == {"subnet-12345678": "us-east-1"} + assert cluster["TagList"] == [{"Key": "Environment", "Value": "Production"}] + assert "VpcId" in cluster + + # Verify the cluster can be found in describe_clusters + clusters = client.describe_clusters()["Clusters"] + assert len(clusters) == 1 + assert clusters[0]["ClusterId"] == cluster["ClusterId"] + + +# @mock_aws +# def test_delete_cluster(): +# client = boto3.client("cloudhsmv2", region_name="us-east-1") + +# # Create a cluster first +# response = client.create_cluster( +# HsmType="hsm1.medium", +# SubnetIds=["subnet-12345678"], +# NetworkType="IPV4", +# Mode="FIPS" +# ) +# cluster_id = response["Cluster"]["ClusterId"] + +# # Delete the cluster +# delete_response = client.delete_cluster(ClusterId=cluster_id) + +# # Verify the response +# deleted_cluster = delete_response["Cluster"] +# assert deleted_cluster["ClusterId"] == cluster_id +# assert deleted_cluster["State"] == "DELETE_IN_PROGRESS" +# assert deleted_cluster["StateMessage"] == "Cluster deletion in progress" + +# # Verify the cluster is no longer listed +# clusters = client.describe_clusters()["Clusters"] +# assert len(clusters) == 0 + + +# @mock_aws +# def test_delete_nonexistent_cluster(): +# client = boto3.client("cloudhsmv2", region_name="us-east-1") + +# with pytest.raises(client.exceptions.CloudHsmClientException) as ex: +# client.delete_cluster(ClusterId="non-existent-cluster") + +# assert "Cluster non-existent-cluster not found" in str(ex.value) + + +# @mock_aws +# def test_describe_clusters_no_clusters(): +# client = boto3.client("cloudhsmv2", region_name="us-east-1") +# response = client.describe_clusters() + +# assert response["Clusters"] == [] +# assert "NextToken" not in response + + +# @mock_aws +# def test_describe_clusters_with_filters(): +# client = boto3.client("cloudhsmv2", region_name="us-east-1") + +# # Create two clusters +# cluster1 = client.create_cluster( +# HsmType="hsm1.medium", +# SubnetIds=["subnet-12345678"], +# NetworkType="IPV4", +# Mode="FIPS" +# ) +# cluster2 = client.create_cluster( +# HsmType="hsm1.medium", +# SubnetIds=["subnet-87654321"], +# NetworkType="IPV4", +# Mode="FIPS" +# ) + +# # Test filtering by cluster ID +# response = client.describe_clusters( +# Filters={ +# "clusterIds": [cluster1["Cluster"]["ClusterId"]] +# } +# ) +# assert len(response["Clusters"]) == 1 +# assert response["Clusters"][0]["ClusterId"] == cluster1["Cluster"]["ClusterId"] + +# # Test filtering by state +# response = client.describe_clusters( +# Filters={ +# "states": ["CREATE_IN_PROGRESS"] +# } +# ) +# assert len(response["Clusters"]) == 2 # Both clusters are in CREATE_IN_PROGRESS state + + +# @mock_aws +# def test_describe_clusters_pagination(): +# client = boto3.client("cloudhsmv2", region_name="us-east-1") + +# # Create three clusters +# for _ in range(3): +# client.create_cluster( +# HsmType="hsm1.medium", +# SubnetIds=["subnet-12345678"], +# NetworkType="IPV4", +# Mode="FIPS" +# ) + +# # Test pagination +# response = client.describe_clusters(MaxResults=2) +# assert len(response["Clusters"]) == 2 +# assert "NextToken" in response + +# # Get remaining clusters +# response2 = client.describe_clusters( +# MaxResults=2, +# NextToken=response["NextToken"] +# ) +# assert len(response2["Clusters"]) == 1 +# assert "NextToken" not in response2 From 6d6fde13be7861f4e409604a07f4c0d1da38e45e Mon Sep 17 00:00:00 2001 From: Aman Date: Tue, 4 Feb 2025 10:32:22 -0500 Subject: [PATCH 05/15] Tests pass but still error in adding additional params to CreateCluster --- moto/cloudhsmv2/models.py | 14 ++++- moto/cloudhsmv2/responses.py | 37 +++++++++++--- tests/test_cloudhsmv2/test_cloudhsmv2.py | 65 +++++++++++++++--------- 3 files changed, 83 insertions(+), 33 deletions(-) diff --git a/moto/cloudhsmv2/models.py b/moto/cloudhsmv2/models.py index d35458fde35f..9287e4ce2fb2 100644 --- a/moto/cloudhsmv2/models.py +++ b/moto/cloudhsmv2/models.py @@ -175,9 +175,9 @@ def create_cluster( hsm_type: str, source_backup_id: Optional[str], subnet_ids: List[str], - network_type: str, + network_type: Optional[str], tag_list: Optional[List[Dict[str, str]]], - mode: str, + mode: Optional[str], ) -> Dict: cluster = Cluster( backup_retention_policy=backup_retention_policy, @@ -255,5 +255,15 @@ def describe_clusters(self, filters, next_token, max_results): results, token = paginator.paginate([c.to_dict() for c in clusters]) return results, token + def get_resource_policy(self, resource_arn): + # implement here + return policy + + def describe_backups( + self, next_token, max_results, filters, shared, sort_ascending + ): + # implement here + return backups, next_token + cloudhsmv2_backends = BackendDict(CloudHSMV2Backend, "cloudhsmv2") diff --git a/moto/cloudhsmv2/responses.py b/moto/cloudhsmv2/responses.py index 1d1bae42433a..bb4554ce891e 100644 --- a/moto/cloudhsmv2/responses.py +++ b/moto/cloudhsmv2/responses.py @@ -68,17 +68,14 @@ def untag_resource(self): return json.dumps(dict()) def create_cluster(self): - # Get raw params and print for debugging - raw_params = self._get_params() - # Use BaseResponse's _get_param method to get individual parameters directly - backup_retention_policy = self._get_param("BackupRetentionPolicy") + backup_retention_policy = self._get_param("BackupRetentionPolicy", {}) hsm_type = self._get_param("HsmType") source_backup_id = self._get_param("SourceBackupId") subnet_ids = self._get_param("SubnetIds", []) - network_type = self._get_param("NetworkType", "IPV4") + network_type = self._get_param("NetworkType") tag_list = self._get_param("TagList") - mode = self._get_param("Mode", "FIPS") + mode = self._get_param("Mode") cluster = self.cloudhsmv2_backend.create_cluster( backup_retention_policy=backup_retention_policy, @@ -98,7 +95,7 @@ def delete_cluster(self): cluster_id = params.get("ClusterId") try: cluster = self.cloudhsmv2_backend.delete_cluster(cluster_id=cluster_id) - return json.dumps({"Cluster": cluster}) + return json.dumps({"Cluster": cluster}, cls=DateTimeEncoder) except ValueError as e: return self.error("ClusterNotFoundFault", str(e)) @@ -121,3 +118,29 @@ def describe_clusters(self): response["NextToken"] = next_token return json.dumps(response, cls=DateTimeEncoder) + + # def get_resource_policy(self): + # params = self._get_params() + # resource_arn = params.get("ResourceArn") + # policy = self.cloudhsmv2_backend.get_resource_policy( + # resource_arn=resource_arn, + # ) + # # TODO: adjust response + # return json.dumps(dict(policy=policy)) + + # def describe_backups(self): + # params = self._get_params() + # next_token = params.get("NextToken") + # max_results = params.get("MaxResults") + # filters = params.get("Filters") + # shared = params.get("Shared") + # sort_ascending = params.get("SortAscending") + # backups, next_token = self.cloudhsmv2_backend.describe_backups( + # next_token=next_token, + # max_results=max_results, + # filters=filters, + # shared=shared, + # sort_ascending=sort_ascending, + # ) + # # TODO: adjust response + # return json.dumps(dict(backups=backups, nextToken=next_token)) diff --git a/tests/test_cloudhsmv2/test_cloudhsmv2.py b/tests/test_cloudhsmv2/test_cloudhsmv2.py index 501960b07876..93a05229fd0b 100644 --- a/tests/test_cloudhsmv2/test_cloudhsmv2.py +++ b/tests/test_cloudhsmv2/test_cloudhsmv2.py @@ -135,31 +135,32 @@ def test_create_cluster(): assert clusters[0]["ClusterId"] == cluster["ClusterId"] -# @mock_aws -# def test_delete_cluster(): -# client = boto3.client("cloudhsmv2", region_name="us-east-1") - -# # Create a cluster first -# response = client.create_cluster( -# HsmType="hsm1.medium", -# SubnetIds=["subnet-12345678"], -# NetworkType="IPV4", -# Mode="FIPS" -# ) -# cluster_id = response["Cluster"]["ClusterId"] - -# # Delete the cluster -# delete_response = client.delete_cluster(ClusterId=cluster_id) - -# # Verify the response -# deleted_cluster = delete_response["Cluster"] -# assert deleted_cluster["ClusterId"] == cluster_id -# assert deleted_cluster["State"] == "DELETE_IN_PROGRESS" -# assert deleted_cluster["StateMessage"] == "Cluster deletion in progress" +@mock_aws +def test_delete_cluster(): + client = boto3.client("cloudhsmv2", region_name="us-east-1") -# # Verify the cluster is no longer listed -# clusters = client.describe_clusters()["Clusters"] -# assert len(clusters) == 0 + # Create a cluster first + # TODO: For some reason I can't send network type or mode here + response = client.create_cluster( + HsmType="hsm1.medium", + SubnetIds=["subnet-12345678"], + # NetworkType="IPV4", + # Mode="FIPS", + ) + cluster_id = response["Cluster"]["ClusterId"] + print("cluster_id", cluster_id) + # Delete the cluster + delete_response = client.delete_cluster(ClusterId=cluster_id) + + # Verify the response + deleted_cluster = delete_response["Cluster"] + assert deleted_cluster["ClusterId"] == cluster_id + assert deleted_cluster["State"] == "DELETE_IN_PROGRESS" + assert deleted_cluster["StateMessage"] == "Cluster deletion in progress" + + # Verify the cluster is no longer listed + clusters = client.describe_clusters()["Clusters"] + assert len(clusters) == 0 # @mock_aws @@ -242,3 +243,19 @@ def test_create_cluster(): # ) # assert len(response2["Clusters"]) == 1 # assert "NextToken" not in response2 + + +# @mock_aws +# def test_get_resource_policy(): +# client = boto3.client("cloudhsmv2", region_name="us-east-2") +# resp = client.get_resource_policy() + +# raise Exception("NotYetImplemented") + + +# @mock_aws +# def test_describe_backups(): +# client = boto3.client("cloudhsmv2", region_name="ap-southeast-1") +# resp = client.describe_backups() + +# raise Exception("NotYetImplemented") From 7730ebd11072e8b3179863bf9517ca2280f9d2df Mon Sep 17 00:00:00 2001 From: Aman Date: Thu, 6 Feb 2025 11:19:49 -0500 Subject: [PATCH 06/15] This is not working code --- moto/cloudhsmv2/models.py | 170 +++++++++++++++++++++-- moto/cloudhsmv2/responses.py | 53 +++++-- tests/test_cloudhsmv2/test_cloudhsmv2.py | 117 +++++++++++++++- 3 files changed, 310 insertions(+), 30 deletions(-) diff --git a/moto/cloudhsmv2/models.py b/moto/cloudhsmv2/models.py index 9287e4ce2fb2..91b3ed4297d8 100644 --- a/moto/cloudhsmv2/models.py +++ b/moto/cloudhsmv2/models.py @@ -1,7 +1,7 @@ """CloudHSMV2Backend class with methods for supported APIs.""" import uuid -from typing import Dict, List, Optional +from typing import Dict, List, Optional, Tuple from moto.core.base_backend import BackendDict, BaseBackend from moto.core.utils import utcnow @@ -62,6 +62,64 @@ def to_dict(self) -> Dict: } +class Backup: + def __init__( + self, + cluster_id: str, + hsm_type: str, + mode: str, + tag_list: Optional[List[Dict[str, str]]], + source_backup: Optional[str] = None, + source_cluster: Optional[str] = None, + source_region: Optional[str] = None, + never_expires: bool = False, + region_name: str = "us-east-1", + ): + self.backup_id = str(uuid.uuid4()) + self.backup_arn = ( + f"arn:aws:cloudhsm:{region_name}:123456789012:backup/{self.backup_id}" + ) + # New backups start in CREATE_IN_PROGRESS state + self.backup_state = "CREATE_IN_PROGRESS" + self.cluster_id = cluster_id + self.create_timestamp = utcnow() + self.copy_timestamp = utcnow() if source_backup else None + self.never_expires = never_expires + self.source_region = source_region + self.source_backup = source_backup + self.source_cluster = source_cluster + self.delete_timestamp = None + self.tag_list = tag_list or [] + self.hsm_type = hsm_type + self.mode = mode + + def to_dict(self) -> Dict: + result = { + "BackupId": self.backup_id, + "BackupArn": self.backup_arn, + "BackupState": self.backup_state, + "ClusterId": self.cluster_id, + "CreateTimestamp": self.create_timestamp, + "NeverExpires": self.never_expires, + "TagList": self.tag_list, + "HsmType": self.hsm_type, + "Mode": self.mode, + } + + if self.copy_timestamp: + result["CopyTimestamp"] = self.copy_timestamp + if self.source_region: + result["SourceRegion"] = self.source_region + if self.source_backup: + result["SourceBackup"] = self.source_backup + if self.source_cluster: + result["SourceCluster"] = self.source_cluster + if self.delete_timestamp: + result["DeleteTimestamp"] = self.delete_timestamp + + return result + + class CloudHSMV2Backend(BaseBackend): """Implementation of CloudHSMV2 APIs.""" @@ -69,6 +127,8 @@ def __init__(self, region_name, account_id): super().__init__(region_name, account_id) self.tags = {} self.clusters = {} + self.resource_policies = {} + self.backups = {} def list_tags(self, resource_id, next_token, max_results): """List tags for a CloudHSM resource. @@ -190,6 +250,19 @@ def create_cluster( region_name=self.region_name, ) self.clusters[cluster.cluster_id] = cluster + + # Automatically create a backup for the new cluster + backup = Backup( + cluster_id=cluster.cluster_id, + hsm_type=hsm_type, + mode=mode or "DEFAULT", + tag_list=tag_list, + region_name=self.region_name, + ) + self.backups[backup.backup_id] = backup + + # print("Backup is", self.backups) + return cluster.to_dict() def delete_cluster(self, cluster_id: str) -> Dict: @@ -211,7 +284,6 @@ def delete_cluster(self, cluster_id: str) -> Dict: cluster.state = "DELETE_IN_PROGRESS" cluster.state_message = "Cluster deletion in progress" - # Remove the cluster from the backend del self.clusters[cluster_id] return cluster.to_dict() @@ -229,7 +301,7 @@ def describe_clusters(self, filters, next_token, max_results): """ clusters = list(self.clusters.values()) - # Apply filters if provided + # If we have filters, filter the resource if filters: for key, values in filters.items(): if key == "clusterIds": @@ -239,7 +311,7 @@ def describe_clusters(self, filters, next_token, max_results): elif key == "vpcIds": clusters = [c for c in clusters if c.vpc_id in values] - # Sort clusters by creation timestamp for consistent pagination + # Sort clusters by creation timestamp clusters = sorted(clusters, key=lambda x: x.create_timestamp) if not max_results: @@ -255,15 +327,91 @@ def describe_clusters(self, filters, next_token, max_results): results, token = paginator.paginate([c.to_dict() for c in clusters]) return results, token - def get_resource_policy(self, resource_arn): - # implement here - return policy + # def get_resource_policy(self, resource_arn): + # # implement here + # return policy def describe_backups( - self, next_token, max_results, filters, shared, sort_ascending - ): - # implement here - return backups, next_token + self, + next_token: Optional[str], + max_results: Optional[int], + filters: Optional[Dict[str, List[str]]], + shared: Optional[bool], + sort_ascending: Optional[bool], + ) -> Tuple[List[Dict], Optional[str]]: + """Describe CloudHSM backups. + + Args: + next_token: Token for pagination + max_results: Maximum number of results to return + filters: Filters to apply + shared: Whether to include shared backups + sort_ascending: Sort by timestamp ascending if True + + Returns: + Tuple containing list of backups and next token + """ + backups = list(self.backups.values()) + # print("backups are", backups[0].to_dict()) + + if filters: + for key, values in filters.items(): + if key == "backupIds": + backups = [b for b in backups if b.backup_id in values] + elif key == "sourceBackupIds": + backups = [b for b in backups if b.source_backup in values] + elif key == "clusterIds": + backups = [b for b in backups if b.cluster_id in values] + elif key == "states": + backups = [b for b in backups if b.backup_state in values] + elif key == "neverExpires": + never_expires = values[0].lower() == "true" + backups = [b for b in backups if b.never_expires == never_expires] + + # Sort backups + backups.sort( + key=lambda x: x.create_timestamp, + reverse=not sort_ascending if sort_ascending is not None else True, + ) + if not max_results: + # print("\n\ndicts are", [b.to_dict() for b in backups]) + return [b.to_dict() for b in backups], None + + paginator = Paginator( + max_results=max_results, + unique_attribute="BackupId", + starting_token=next_token, + fail_on_invalid_token=False, + ) + results, token = paginator.paginate([b.to_dict() for b in backups]) + return results, token + + def put_resource_policy(self, resource_arn: str, policy: str) -> Dict[str, str]: + """Creates or updates a resource policy for CloudHSM backup. + + Args: + resource_arn (str): The ARN of the CloudHSM backup + policy (str): The JSON policy document + + Returns: + Dict[str, str]: Dictionary containing ResourceArn and Policy + + Raises: + ValueError: If the resource doesn't exist or is not in READY state + """ + # Extract backup ID from ARN + try: + backup_id = resource_arn.split("/")[-1] + except IndexError: + raise ValueError(f"Invalid resource ARN format: {resource_arn}") + + # Verify backup exists and is in READY state + # Note: Need to implement backup verification + # once backup implemented + + self.resource_policies[resource_arn] = policy + + return {"ResourceArn": resource_arn, "Policy": policy} cloudhsmv2_backends = BackendDict(CloudHSMV2Backend, "cloudhsmv2") diff --git a/moto/cloudhsmv2/responses.py b/moto/cloudhsmv2/responses.py index bb4554ce891e..774547ac87f3 100644 --- a/moto/cloudhsmv2/responses.py +++ b/moto/cloudhsmv2/responses.py @@ -12,6 +12,10 @@ class DateTimeEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, datetime): return obj.isoformat() + # Don't try to convert objects that already have to_dict to dict + # if hasattr(obj, "to_dict"): + # return obj.to_dict() + # Let the base class handle anything else return super().default(obj) @@ -68,7 +72,6 @@ def untag_resource(self): return json.dumps(dict()) def create_cluster(self): - # Use BaseResponse's _get_param method to get individual parameters directly backup_retention_policy = self._get_param("BackupRetentionPolicy", {}) hsm_type = self._get_param("HsmType") source_backup_id = self._get_param("SourceBackupId") @@ -128,19 +131,41 @@ def describe_clusters(self): # # TODO: adjust response # return json.dumps(dict(policy=policy)) - # def describe_backups(self): + def describe_backups(self): + params = self._get_params() + next_token = params.get("NextToken") + max_results = params.get("MaxResults") + filters = params.get("Filters") + shared = params.get("Shared") + sort_ascending = params.get("SortAscending") + + backups, next_token = self.cloudhsmv2_backend.describe_backups( + next_token=next_token, + max_results=max_results, + filters=filters, + shared=shared, + sort_ascending=sort_ascending, + ) + + # Remove the manual conversion to dictionaries since DateTimeEncoder will handle it + response = {"Backups": backups} + if next_token: + response["NextToken"] = next_token + + # print("\n\n describe response are", response) + + # print("\n\n json dump is", json.dumps(response, cls=DateTimeEncoder)) + + return json.dumps(response, cls=DateTimeEncoder) + + # def put_resource_policy(self): # params = self._get_params() - # next_token = params.get("NextToken") - # max_results = params.get("MaxResults") - # filters = params.get("Filters") - # shared = params.get("Shared") - # sort_ascending = params.get("SortAscending") - # backups, next_token = self.cloudhsmv2_backend.describe_backups( - # next_token=next_token, - # max_results=max_results, - # filters=filters, - # shared=shared, - # sort_ascending=sort_ascending, + # print("params", params) + # resource_arn = params.get("ResourceArn") + # policy = params.get("Policy") + # resource_arn, policy = self.cloudhsmv2_backend.put_resource_policy( + # resource_arn=resource_arn, + # policy=policy, # ) # # TODO: adjust response - # return json.dumps(dict(backups=backups, nextToken=next_token)) + # return json.dumps(dict(resourceArn=resource_arn, policy=policy)) diff --git a/tests/test_cloudhsmv2/test_cloudhsmv2.py b/tests/test_cloudhsmv2/test_cloudhsmv2.py index 93a05229fd0b..b4133cd3fc4b 100644 --- a/tests/test_cloudhsmv2/test_cloudhsmv2.py +++ b/tests/test_cloudhsmv2/test_cloudhsmv2.py @@ -148,7 +148,7 @@ def test_delete_cluster(): # Mode="FIPS", ) cluster_id = response["Cluster"]["ClusterId"] - print("cluster_id", cluster_id) + # print("cluster_id", cluster_id) # Delete the cluster delete_response = client.delete_cluster(ClusterId=cluster_id) @@ -253,9 +253,116 @@ def test_delete_cluster(): # raise Exception("NotYetImplemented") +@mock_aws +def test_describe_backups(): + client = boto3.client("cloudhsmv2", region_name="us-east-1") + + # Create a cluster which will automatically create a backup + cluster = client.create_cluster( + HsmType="hsm1.medium", + SubnetIds=["subnet-12345678"], + ) + cluster_id = cluster["Cluster"]["ClusterId"] + # print("\n\n cluster_id is", cluster_id) + + # Verify backup was automatically created + response = client.describe_backups() + # print("\n\ntesting response in response", response) + assert "Backups" in response + assert len(response["Backups"]) == 1 + + backup = response["Backups"][0] + assert backup["ClusterId"] == cluster_id + assert backup["HsmType"] == "hsm1.medium" + assert backup["BackupState"] == "CREATE_IN_PROGRESS" + + # # Test filters + # filtered_response = client.describe_backups(Filters={"clusterIds": [cluster_id]}) + # assert len(filtered_response["Backups"]) == 1 + # assert filtered_response["Backups"][0]["ClusterId"] == cluster_id + + # @mock_aws -# def test_describe_backups(): -# client = boto3.client("cloudhsmv2", region_name="ap-southeast-1") -# resp = client.describe_backups() +# def test_put_resource_policy(): +# client = boto3.client("cloudhsmv2", region_name="us-east-1") -# raise Exception("NotYetImplemented") +# # Create a cluster to get a valid resource ARN +# response = client.create_cluster( +# HsmType="hsm1.medium", +# SubnetIds=["subnet-12345678"] +# ) +# cluster_id = response["Cluster"]["ClusterId"] +# resource_arn = f"arn:aws:cloudhsm:us-east-1:123456789012:cluster/{cluster_id}" + +# # Create a sample policy +# policy = { +# "Version": "2012-10-17", +# "Statement": [ +# { +# "Sid": "EnableSharing", +# "Effect": "Allow", +# "Principal": { +# "AWS": "arn:aws:iam::123456789012:root" +# }, +# "Action": [ +# "cloudhsmv2:DescribeClusters", +# "cloudhsmv2:DescribeBackups" +# ], +# "Resource": resource_arn +# } +# ] +# } + +# # Put the resource policy +# response_2 = client.put_resource_policy( +# ResourceArn=resource_arn, +# Policy=json.dumps(policy) +# ) + +# # Verify response structure +# assert "ResourceArn" in response_2 +# assert "Policy" in response_2 +# assert response_2["ResourceArn"] == resource_arn +# assert json.loads(response_2["Policy"]) == policy + + +# @mock_aws +# def test_put_resource_policy_invalid_arn(): +# client = boto3.client("cloudhsmv2", region_name="us-east-1") +# +# invalid_arn = "arn:aws:cloudhsm:us-east-1:123456789012:cluster/invalid-id" +# policy = json.dumps({"Version": "2012-10-17", "Statement": []}) +# +# with pytest.raises(ClientError) as exc: +# client.put_resource_policy( +# ResourceArn=invalid_arn, +# Policy=policy +# ) +# +# err = exc.value.response["Error"] +# assert err["Code"] == "ResourceNotFoundException" +# assert "Resource not found" in err["Message"] +# +# +# @mock_aws +# def test_put_resource_policy_invalid_policy(): +# client = boto3.client("cloudhsmv2", region_name="us-east-1") +# +# # Create a cluster to get a valid resource ARN +# response = client.create_cluster( +# HsmType="hsm1.medium", +# SubnetIds=["subnet-12345678"] +# ) +# cluster_id = response["Cluster"]["ClusterId"] +# resource_arn = f"arn:aws:cloudhsm:us-east-1:123456789012:cluster/{cluster_id}" +# +# # Try to put an invalid policy +# with pytest.raises(ClientError) as exc: +# client.put_resource_policy( +# ResourceArn=resource_arn, +# Policy="invalid-policy-document" +# ) +# +# err = exc.value.response["Error"] +# assert err["Code"] == "InvalidRequestException" +# assert "Invalid policy document" in err["Message"] From 87eaa4e01a425f9dfa0d5aaeb312b29652d3024b Mon Sep 17 00:00:00 2001 From: Aman Date: Thu, 6 Feb 2025 11:22:37 -0500 Subject: [PATCH 07/15] Fixed linting issue --- moto/cloudhsmv2/models.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/moto/cloudhsmv2/models.py b/moto/cloudhsmv2/models.py index 91b3ed4297d8..95d01786d9a3 100644 --- a/moto/cloudhsmv2/models.py +++ b/moto/cloudhsmv2/models.py @@ -400,10 +400,11 @@ def put_resource_policy(self, resource_arn: str, policy: str) -> Dict[str, str]: ValueError: If the resource doesn't exist or is not in READY state """ # Extract backup ID from ARN - try: - backup_id = resource_arn.split("/")[-1] - except IndexError: - raise ValueError(f"Invalid resource ARN format: {resource_arn}") + # try: + # backup_id = resource_arn.split("/")[-1] + # + # except IndexError: + # raise ValueError(f"Invalid resource ARN format: {resource_arn}") # Verify backup exists and is in READY state # Note: Need to implement backup verification From 1d1a1b509f2a8b6c66ded18b7e23277893593cac Mon Sep 17 00:00:00 2001 From: Aman Date: Thu, 6 Feb 2025 13:22:06 -0500 Subject: [PATCH 08/15] Maybe this is working --- moto/cloudhsmv2/models.py | 22 ++++--- moto/cloudhsmv2/responses.py | 37 ++++++------ tests/test_cloudhsmv2/test_cloudhsmv2.py | 74 ++++++++++-------------- 3 files changed, 64 insertions(+), 69 deletions(-) diff --git a/moto/cloudhsmv2/models.py b/moto/cloudhsmv2/models.py index 95d01786d9a3..6c1e632a8fa4 100644 --- a/moto/cloudhsmv2/models.py +++ b/moto/cloudhsmv2/models.py @@ -400,15 +400,19 @@ def put_resource_policy(self, resource_arn: str, policy: str) -> Dict[str, str]: ValueError: If the resource doesn't exist or is not in READY state """ # Extract backup ID from ARN - # try: - # backup_id = resource_arn.split("/")[-1] - # - # except IndexError: - # raise ValueError(f"Invalid resource ARN format: {resource_arn}") - - # Verify backup exists and is in READY state - # Note: Need to implement backup verification - # once backup implemented + try: + backup_id = resource_arn.split("/")[-1] + except IndexError: + raise ValueError(f"Invalid resource ARN format: {resource_arn}") + + # Verify backup exists + if backup_id not in self.backups: + raise ValueError(f"Backup {backup_id} not found") + + # Verify backup is in READY state + backup = self.backups[backup_id] + if backup.backup_state != "READY": + raise ValueError(f"Backup {backup_id} is not in READY state") self.resource_policies[resource_arn] = policy diff --git a/moto/cloudhsmv2/responses.py b/moto/cloudhsmv2/responses.py index 774547ac87f3..200f91200b33 100644 --- a/moto/cloudhsmv2/responses.py +++ b/moto/cloudhsmv2/responses.py @@ -9,18 +9,18 @@ class DateTimeEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, datetime): - return obj.isoformat() + def default(self, o): + if isinstance(o, datetime): + return o.isoformat() # Don't try to convert objects that already have to_dict to dict - # if hasattr(obj, "to_dict"): - # return obj.to_dict() + # if hasattr(o, "to_dict"): + # return o.to_dict() # Let the base class handle anything else - return super().default(obj) + return super().default(o) class CloudHSMV2Response(BaseResponse): - """Handler for CloudHSMV2 requests and responses.""" + """Handler for CloudHSMV2 requests and cresponses.""" def __init__(self): super().__init__(service_name="cloudhsmv2") @@ -158,14 +158,15 @@ def describe_backups(self): return json.dumps(response, cls=DateTimeEncoder) - # def put_resource_policy(self): - # params = self._get_params() - # print("params", params) - # resource_arn = params.get("ResourceArn") - # policy = params.get("Policy") - # resource_arn, policy = self.cloudhsmv2_backend.put_resource_policy( - # resource_arn=resource_arn, - # policy=policy, - # ) - # # TODO: adjust response - # return json.dumps(dict(resourceArn=resource_arn, policy=policy)) + def put_resource_policy(self): + raw_params = list(self._get_params().keys())[0] + params = json.loads(raw_params) + + resource_arn = params.get("ResourceArn") + policy = params.get("Policy") + + result = self.cloudhsmv2_backend.put_resource_policy( + resource_arn=resource_arn, + policy=policy, + ) + return json.dumps(result) diff --git a/tests/test_cloudhsmv2/test_cloudhsmv2.py b/tests/test_cloudhsmv2/test_cloudhsmv2.py index b4133cd3fc4b..9f92807db88e 100644 --- a/tests/test_cloudhsmv2/test_cloudhsmv2.py +++ b/tests/test_cloudhsmv2/test_cloudhsmv2.py @@ -257,17 +257,14 @@ def test_delete_cluster(): def test_describe_backups(): client = boto3.client("cloudhsmv2", region_name="us-east-1") - # Create a cluster which will automatically create a backup cluster = client.create_cluster( HsmType="hsm1.medium", SubnetIds=["subnet-12345678"], ) cluster_id = cluster["Cluster"]["ClusterId"] - # print("\n\n cluster_id is", cluster_id) # Verify backup was automatically created response = client.describe_backups() - # print("\n\ntesting response in response", response) assert "Backups" in response assert len(response["Backups"]) == 1 @@ -282,48 +279,41 @@ def test_describe_backups(): # assert filtered_response["Backups"][0]["ClusterId"] == cluster_id -# @mock_aws -# def test_put_resource_policy(): -# client = boto3.client("cloudhsmv2", region_name="us-east-1") +@mock_aws +def test_put_resource_policy(): + client = boto3.client("cloudhsmv2", region_name="us-east-1") -# # Create a cluster to get a valid resource ARN -# response = client.create_cluster( -# HsmType="hsm1.medium", -# SubnetIds=["subnet-12345678"] -# ) -# cluster_id = response["Cluster"]["ClusterId"] -# resource_arn = f"arn:aws:cloudhsm:us-east-1:123456789012:cluster/{cluster_id}" + # Create a cluster to get a valid resource ARN + response = client.create_cluster( + HsmType="hsm1.medium", SubnetIds=["subnet-12345678"] + ) + cluster_id = response["Cluster"]["ClusterId"] + resource_arn = f"arn:aws:cloudhsm:us-east-1:123456789012:cluster/{cluster_id}" + + # Create a sample policy + policy = { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "EnableSharing", + "Effect": "Allow", + "Principal": {"AWS": "arn:aws:iam::123456789012:root"}, + "Action": ["cloudhsmv2:DescribeClusters", "cloudhsmv2:DescribeBackups"], + "Resource": resource_arn, + } + ], + } -# # Create a sample policy -# policy = { -# "Version": "2012-10-17", -# "Statement": [ -# { -# "Sid": "EnableSharing", -# "Effect": "Allow", -# "Principal": { -# "AWS": "arn:aws:iam::123456789012:root" -# }, -# "Action": [ -# "cloudhsmv2:DescribeClusters", -# "cloudhsmv2:DescribeBackups" -# ], -# "Resource": resource_arn -# } -# ] -# } - -# # Put the resource policy -# response_2 = client.put_resource_policy( -# ResourceArn=resource_arn, -# Policy=json.dumps(policy) -# ) + # Put the resource policy + response_2 = client.put_resource_policy( + ResourceArn=resource_arn, Policy=json.dumps(policy) + ) -# # Verify response structure -# assert "ResourceArn" in response_2 -# assert "Policy" in response_2 -# assert response_2["ResourceArn"] == resource_arn -# assert json.loads(response_2["Policy"]) == policy + # Verify response structure + assert "ResourceArn" in response_2 + assert "Policy" in response_2 + assert response_2["ResourceArn"] == resource_arn + assert json.loads(response_2["Policy"]) == policy # @mock_aws From d25be3b0631761e3899034f9cac7a720d09b359d Mon Sep 17 00:00:00 2001 From: Aman Date: Thu, 6 Feb 2025 13:30:07 -0500 Subject: [PATCH 09/15] Added json --- tests/test_cloudhsmv2/test_cloudhsmv2.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_cloudhsmv2/test_cloudhsmv2.py b/tests/test_cloudhsmv2/test_cloudhsmv2.py index 9f92807db88e..d4852d6305c7 100644 --- a/tests/test_cloudhsmv2/test_cloudhsmv2.py +++ b/tests/test_cloudhsmv2/test_cloudhsmv2.py @@ -1,5 +1,6 @@ """Unit tests for cloudhsmv2-supported APIs.""" +import json from datetime import datetime import boto3 From 7fe3e599d68393ecf951a8c2b3d5e3c6bb114518 Mon Sep 17 00:00:00 2001 From: Aman Date: Thu, 6 Feb 2025 13:45:26 -0500 Subject: [PATCH 10/15] More Updates --- moto/cloudhsmv2/models.py | 110 ++++++++--------------------------- moto/cloudhsmv2/responses.py | 24 ++++---- 2 files changed, 35 insertions(+), 99 deletions(-) diff --git a/moto/cloudhsmv2/models.py b/moto/cloudhsmv2/models.py index 6c1e632a8fa4..5fe1f97b9dc6 100644 --- a/moto/cloudhsmv2/models.py +++ b/moto/cloudhsmv2/models.py @@ -1,7 +1,7 @@ """CloudHSMV2Backend class with methods for supported APIs.""" import uuid -from typing import Dict, List, Optional, Tuple +from typing import Any, Dict, List, Optional, Tuple from moto.core.base_backend import BackendDict, BaseBackend from moto.core.utils import utcnow @@ -130,17 +130,10 @@ def __init__(self, region_name, account_id): self.resource_policies = {} self.backups = {} - def list_tags(self, resource_id, next_token, max_results): - """List tags for a CloudHSM resource. - - Args: - resource_id (str): The identifier of the resource to list tags for - next_token (str): Token for pagination - max_results (int): Maximum number of results to return - - Returns: - tuple: (list of tags, next token) - """ + def list_tags( + self, resource_id: str, next_token: str, max_results: int + ) -> Tuple[List[Dict[str, str]], Optional[str]]: + """List tags for a CloudHSM resource.""" if resource_id not in self.tags: return [], None @@ -170,19 +163,10 @@ def list_tags(self, resource_id, next_token, max_results): return results, token - def tag_resource(self, resource_id, tag_list): - """Add or update tags for a CloudHSM resource. - - Args: - resource_id (str): The identifier of the resource to tag - tag_list (list): List of tag dictionaries with 'Key' and 'Value' pairs - - Returns: - dict: Empty dictionary per AWS spec - - Raises: - ValueError: If resource_id or tag_list is None - """ + def tag_resource( + self, resource_id: str, tag_list: List[Dict[str, str]] + ) -> Dict[str, Any]: + """Add or update tags for a CloudHSM resource.""" if resource_id is None: raise ValueError("ResourceId must not be None") if tag_list is None: @@ -204,19 +188,10 @@ def tag_resource(self, resource_id, tag_list): return {} - def untag_resource(self, resource_id, tag_key_list): - """Remove tags from a CloudHSM resource. - - Args: - resource_id (str): The identifier of the resource to untag - tag_key_list (list): List of tag keys to remove - - Returns: - dict: Empty dictionary per AWS spec - - Raises: - ValueError: If resource_id or tag_key_list is None - """ + def untag_resource( + self, resource_id: str, tag_key_list: List[str] + ) -> Dict[str, Any]: + """Remove tags from a CloudHSM resource.""" if resource_id is None: raise ValueError("ResourceId must not be None") if tag_key_list is None: @@ -238,7 +213,7 @@ def create_cluster( network_type: Optional[str], tag_list: Optional[List[Dict[str, str]]], mode: Optional[str], - ) -> Dict: + ) -> Dict[str, Any]: cluster = Cluster( backup_retention_policy=backup_retention_policy, hsm_type=hsm_type, @@ -265,18 +240,8 @@ def create_cluster( return cluster.to_dict() - def delete_cluster(self, cluster_id: str) -> Dict: - """Delete a CloudHSM cluster. - - Args: - cluster_id (str): The identifier of the cluster to delete - - Returns: - dict: The deleted cluster details - - Raises: - ValueError: If cluster_id is not found - """ + def delete_cluster(self, cluster_id: str) -> Dict[str, Any]: + """Delete a CloudHSM cluster.""" if cluster_id not in self.clusters: raise ValueError(f"Cluster {cluster_id} not found") @@ -288,17 +253,10 @@ def delete_cluster(self, cluster_id: str) -> Dict: return cluster.to_dict() - def describe_clusters(self, filters, next_token, max_results): - """Describe CloudHSM clusters. - - Args: - filters (dict): Filters to apply - next_token (str): Token for pagination - max_results (int): Maximum number of results to return - - Returns: - tuple: (list of clusters, next token) - """ + def describe_clusters( + self, filters: Dict[str, List[str]], next_token: str, max_results: int + ) -> Tuple[List[Dict[str, Any]], Optional[str]]: + """Describe CloudHSM clusters.""" clusters = list(self.clusters.values()) # If we have filters, filter the resource @@ -338,19 +296,8 @@ def describe_backups( filters: Optional[Dict[str, List[str]]], shared: Optional[bool], sort_ascending: Optional[bool], - ) -> Tuple[List[Dict], Optional[str]]: - """Describe CloudHSM backups. - - Args: - next_token: Token for pagination - max_results: Maximum number of results to return - filters: Filters to apply - shared: Whether to include shared backups - sort_ascending: Sort by timestamp ascending if True - - Returns: - Tuple containing list of backups and next token - """ + ) -> Tuple[List[Dict[str, Any]], Optional[str]]: + """Describe CloudHSM backups.""" backups = list(self.backups.values()) # print("backups are", backups[0].to_dict()) @@ -387,18 +334,7 @@ def describe_backups( return results, token def put_resource_policy(self, resource_arn: str, policy: str) -> Dict[str, str]: - """Creates or updates a resource policy for CloudHSM backup. - - Args: - resource_arn (str): The ARN of the CloudHSM backup - policy (str): The JSON policy document - - Returns: - Dict[str, str]: Dictionary containing ResourceArn and Policy - - Raises: - ValueError: If the resource doesn't exist or is not in READY state - """ + """Creates or updates a resource policy for CloudHSM backup.""" # Extract backup ID from ARN try: backup_id = resource_arn.split("/")[-1] diff --git a/moto/cloudhsmv2/responses.py b/moto/cloudhsmv2/responses.py index 200f91200b33..a9acc29e9ea0 100644 --- a/moto/cloudhsmv2/responses.py +++ b/moto/cloudhsmv2/responses.py @@ -20,17 +20,17 @@ def default(self, o): class CloudHSMV2Response(BaseResponse): - """Handler for CloudHSMV2 requests and cresponses.""" + """Handler for CloudHSMV2 requests and responses.""" - def __init__(self): + def __init__(self) -> None: super().__init__(service_name="cloudhsmv2") @property - def cloudhsmv2_backend(self): + def cloudhsmv2_backend(self) -> str: """Return backend instance specific for this region.""" return cloudhsmv2_backends[self.current_account][self.region] - def list_tags(self): + def list_tags(self) -> str: raw_params = list(self._get_params().keys())[0] params = json.loads(raw_params) @@ -44,9 +44,9 @@ def list_tags(self): max_results=max_results, ) - return 200, {}, json.dumps({"TagList": tag_list, "NextToken": next_token}) + return json.dumps({"TagList": tag_list, "NextToken": next_token}) - def tag_resource(self): + def tag_resource(self) -> str: raw_params = list(self._get_params().keys())[0] params = json.loads(raw_params) @@ -59,7 +59,7 @@ def tag_resource(self): ) return json.dumps(dict()) - def untag_resource(self): + def untag_resource(self) -> str: raw_params = list(self._get_params().keys())[0] params = json.loads(raw_params) @@ -71,7 +71,7 @@ def untag_resource(self): ) return json.dumps(dict()) - def create_cluster(self): + def create_cluster(self) -> str: backup_retention_policy = self._get_param("BackupRetentionPolicy", {}) hsm_type = self._get_param("HsmType") source_backup_id = self._get_param("SourceBackupId") @@ -91,7 +91,7 @@ def create_cluster(self): ) return json.dumps({"Cluster": cluster}, cls=DateTimeEncoder) - def delete_cluster(self): + def delete_cluster(self) -> str: raw_params = list(self._get_params().keys())[0] params = json.loads(raw_params) @@ -102,7 +102,7 @@ def delete_cluster(self): except ValueError as e: return self.error("ClusterNotFoundFault", str(e)) - def describe_clusters(self): + def describe_clusters(self) -> str: raw_params = list(self._get_params().keys())[0] if self._get_params() else "{}" params = json.loads(raw_params) @@ -131,7 +131,7 @@ def describe_clusters(self): # # TODO: adjust response # return json.dumps(dict(policy=policy)) - def describe_backups(self): + def describe_backups(self) -> str: params = self._get_params() next_token = params.get("NextToken") max_results = params.get("MaxResults") @@ -158,7 +158,7 @@ def describe_backups(self): return json.dumps(response, cls=DateTimeEncoder) - def put_resource_policy(self): + def put_resource_policy(self) -> str: raw_params = list(self._get_params().keys())[0] params = json.loads(raw_params) From 1ec76a2b75cc25b999e347471deb46848be35071 Mon Sep 17 00:00:00 2001 From: Aman Date: Thu, 6 Feb 2025 16:17:47 -0500 Subject: [PATCH 11/15] put_resource_policy working --- moto/cloudhsmv2/models.py | 35 ++++++++++++++++-------- moto/cloudhsmv2/responses.py | 2 ++ tests/test_cloudhsmv2/test_cloudhsmv2.py | 2 ++ 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/moto/cloudhsmv2/models.py b/moto/cloudhsmv2/models.py index 5fe1f97b9dc6..b4de22a084fd 100644 --- a/moto/cloudhsmv2/models.py +++ b/moto/cloudhsmv2/models.py @@ -234,6 +234,7 @@ def create_cluster( tag_list=tag_list, region_name=self.region_name, ) + print("\n\n backup are", backup.backup_arn) self.backups[backup.backup_id] = backup # print("Backup is", self.backups) @@ -334,24 +335,34 @@ def describe_backups( return results, token def put_resource_policy(self, resource_arn: str, policy: str) -> Dict[str, str]: - """Creates or updates a resource policy for CloudHSM backup.""" - # Extract backup ID from ARN + """Creates or updates a resource policy for CloudHSM cluster or backup.""" + # Determine if this is a cluster or backup ARN try: - backup_id = resource_arn.split("/")[-1] + resource_type = resource_arn.split(":")[-1].split("/")[0] + resource_id = resource_arn.split("/")[-1] except IndexError: raise ValueError(f"Invalid resource ARN format: {resource_arn}") - # Verify backup exists - if backup_id not in self.backups: - raise ValueError(f"Backup {backup_id} not found") - - # Verify backup is in READY state - backup = self.backups[backup_id] - if backup.backup_state != "READY": - raise ValueError(f"Backup {backup_id} is not in READY state") + if resource_type == "cluster": + if resource_id not in self.clusters: + raise ValueError(f"Cluster with ID {resource_id} not found") + # No need to check state for clusters + elif resource_type == "backup": + matching_backup = None + for backup in self.backups.values(): + if backup.backup_arn == resource_arn: + matching_backup = backup + break + if not matching_backup: + raise ValueError(f"Backup with ARN {resource_arn} not found") + if matching_backup.backup_state != "READY": + raise ValueError( + f"Backup {matching_backup.backup_id} is not in READY state" + ) + else: + raise ValueError(f"Invalid resource type in ARN: {resource_type}") self.resource_policies[resource_arn] = policy - return {"ResourceArn": resource_arn, "Policy": policy} diff --git a/moto/cloudhsmv2/responses.py b/moto/cloudhsmv2/responses.py index a9acc29e9ea0..cba1e1149f60 100644 --- a/moto/cloudhsmv2/responses.py +++ b/moto/cloudhsmv2/responses.py @@ -162,6 +162,8 @@ def put_resource_policy(self) -> str: raw_params = list(self._get_params().keys())[0] params = json.loads(raw_params) + print("\n\n params are", params) + resource_arn = params.get("ResourceArn") policy = params.get("Policy") diff --git a/tests/test_cloudhsmv2/test_cloudhsmv2.py b/tests/test_cloudhsmv2/test_cloudhsmv2.py index d4852d6305c7..e881f66121ce 100644 --- a/tests/test_cloudhsmv2/test_cloudhsmv2.py +++ b/tests/test_cloudhsmv2/test_cloudhsmv2.py @@ -310,6 +310,8 @@ def test_put_resource_policy(): ResourceArn=resource_arn, Policy=json.dumps(policy) ) + print("\n\n response_2 are", response_2) + # Verify response structure assert "ResourceArn" in response_2 assert "Policy" in response_2 From 7ef78afd94e431dcc11bf042e7a1ae3026042e12 Mon Sep 17 00:00:00 2001 From: Aman Date: Fri, 7 Feb 2025 11:58:46 -0500 Subject: [PATCH 12/15] Tests are all working and added exception --- moto/cloudhsmv2/exceptions.py | 23 ++++ moto/cloudhsmv2/models.py | 69 ++++++------ moto/cloudhsmv2/responses.py | 22 ++-- tests/test_cloudhsmv2/test_cloudhsmv2.py | 137 ++++++++++++++++------- 4 files changed, 161 insertions(+), 90 deletions(-) diff --git a/moto/cloudhsmv2/exceptions.py b/moto/cloudhsmv2/exceptions.py index e8b88b84fa6c..02e7271abd0a 100644 --- a/moto/cloudhsmv2/exceptions.py +++ b/moto/cloudhsmv2/exceptions.py @@ -1 +1,24 @@ """Exceptions raised by the cloudhsmv2 service.""" + +from moto.core.exceptions import JsonRESTError + + +class CloudHSMv2ClientError(JsonRESTError): + """Base class for CloudHSMv2 errors.""" + + code = 400 + + +class ResourceNotFoundException(CloudHSMv2ClientError): + def __init__(self, message: str): + super().__init__("ResourceNotFoundException", message) + + +class InvalidRequestException(CloudHSMv2ClientError): + def __init__(self, message: str): + super().__init__("InvalidRequestException", message) + + +class ClientError(CloudHSMv2ClientError): + def __init__(self, error_type: str, message: str): + super().__init__(error_type, message) diff --git a/moto/cloudhsmv2/models.py b/moto/cloudhsmv2/models.py index b4de22a084fd..fe811daf4591 100644 --- a/moto/cloudhsmv2/models.py +++ b/moto/cloudhsmv2/models.py @@ -7,6 +7,8 @@ from moto.core.utils import utcnow from moto.utilities.paginator import Paginator +from .exceptions import InvalidRequestException, ResourceNotFoundException + class Cluster: def __init__( @@ -79,8 +81,7 @@ def __init__( self.backup_arn = ( f"arn:aws:cloudhsm:{region_name}:123456789012:backup/{self.backup_id}" ) - # New backups start in CREATE_IN_PROGRESS state - self.backup_state = "CREATE_IN_PROGRESS" + self.backup_state = "READY" self.cluster_id = cluster_id self.create_timestamp = utcnow() self.copy_timestamp = utcnow() if source_backup else None @@ -234,11 +235,8 @@ def create_cluster( tag_list=tag_list, region_name=self.region_name, ) - print("\n\n backup are", backup.backup_arn) self.backups[backup.backup_id] = backup - # print("Backup is", self.backups) - return cluster.to_dict() def delete_cluster(self, cluster_id: str) -> Dict[str, Any]: @@ -286,9 +284,23 @@ def describe_clusters( results, token = paginator.paginate([c.to_dict() for c in clusters]) return results, token - # def get_resource_policy(self, resource_arn): - # # implement here - # return policy + def get_resource_policy(self, resource_arn: str) -> str: + """Gets the resource policy attached to a CloudHSM backup.""" + if not resource_arn: + raise InvalidRequestException("ResourceArn must not be empty") + + # Verify backup exists + matching_backup = None + for backup in self.backups.values(): + if backup.backup_arn == resource_arn: + matching_backup = backup + break + + if not matching_backup: + raise ResourceNotFoundException(f"Backup with ARN {resource_arn} not found") + + # Return the policy if it exists, otherwise return None + return self.resource_policies.get(resource_arn) def describe_backups( self, @@ -300,7 +312,6 @@ def describe_backups( ) -> Tuple[List[Dict[str, Any]], Optional[str]]: """Describe CloudHSM backups.""" backups = list(self.backups.values()) - # print("backups are", backups[0].to_dict()) if filters: for key, values in filters.items(): @@ -322,7 +333,6 @@ def describe_backups( reverse=not sort_ascending if sort_ascending is not None else True, ) if not max_results: - # print("\n\ndicts are", [b.to_dict() for b in backups]) return [b.to_dict() for b in backups], None paginator = Paginator( @@ -335,32 +345,19 @@ def describe_backups( return results, token def put_resource_policy(self, resource_arn: str, policy: str) -> Dict[str, str]: - """Creates or updates a resource policy for CloudHSM cluster or backup.""" - # Determine if this is a cluster or backup ARN - try: - resource_type = resource_arn.split(":")[-1].split("/")[0] - resource_id = resource_arn.split("/")[-1] - except IndexError: - raise ValueError(f"Invalid resource ARN format: {resource_arn}") - - if resource_type == "cluster": - if resource_id not in self.clusters: - raise ValueError(f"Cluster with ID {resource_id} not found") - # No need to check state for clusters - elif resource_type == "backup": - matching_backup = None - for backup in self.backups.values(): - if backup.backup_arn == resource_arn: - matching_backup = backup - break - if not matching_backup: - raise ValueError(f"Backup with ARN {resource_arn} not found") - if matching_backup.backup_state != "READY": - raise ValueError( - f"Backup {matching_backup.backup_id} is not in READY state" - ) - else: - raise ValueError(f"Invalid resource type in ARN: {resource_type}") + # Find the backup + matching_backup = None + for backup in self.backups.values(): + if backup.backup_arn == resource_arn: + matching_backup = backup + break + if not matching_backup: + raise ResourceNotFoundException(f"Backup with ARN {resource_arn} not found") + + if matching_backup.backup_state != "READY": + raise InvalidRequestException( + f"Backup {matching_backup.backup_id} is not in READY state" + ) self.resource_policies[resource_arn] = policy return {"ResourceArn": resource_arn, "Policy": policy} diff --git a/moto/cloudhsmv2/responses.py b/moto/cloudhsmv2/responses.py index cba1e1149f60..0336d06af5fc 100644 --- a/moto/cloudhsmv2/responses.py +++ b/moto/cloudhsmv2/responses.py @@ -122,14 +122,14 @@ def describe_clusters(self) -> str: return json.dumps(response, cls=DateTimeEncoder) - # def get_resource_policy(self): - # params = self._get_params() - # resource_arn = params.get("ResourceArn") - # policy = self.cloudhsmv2_backend.get_resource_policy( - # resource_arn=resource_arn, - # ) - # # TODO: adjust response - # return json.dumps(dict(policy=policy)) + def get_resource_policy(self) -> str: + raw_params = list(self._get_params().keys())[0] + params = json.loads(raw_params) + resource_arn = params.get("ResourceArn") + policy = self.cloudhsmv2_backend.get_resource_policy( + resource_arn=resource_arn, + ) + return json.dumps({"Policy": policy}) def describe_backups(self) -> str: params = self._get_params() @@ -152,18 +152,12 @@ def describe_backups(self) -> str: if next_token: response["NextToken"] = next_token - # print("\n\n describe response are", response) - - # print("\n\n json dump is", json.dumps(response, cls=DateTimeEncoder)) - return json.dumps(response, cls=DateTimeEncoder) def put_resource_policy(self) -> str: raw_params = list(self._get_params().keys())[0] params = json.loads(raw_params) - print("\n\n params are", params) - resource_arn = params.get("ResourceArn") policy = params.get("Policy") diff --git a/tests/test_cloudhsmv2/test_cloudhsmv2.py b/tests/test_cloudhsmv2/test_cloudhsmv2.py index e881f66121ce..7621a35efb48 100644 --- a/tests/test_cloudhsmv2/test_cloudhsmv2.py +++ b/tests/test_cloudhsmv2/test_cloudhsmv2.py @@ -140,30 +140,27 @@ def test_create_cluster(): def test_delete_cluster(): client = boto3.client("cloudhsmv2", region_name="us-east-1") - # Create a cluster first - # TODO: For some reason I can't send network type or mode here response = client.create_cluster( HsmType="hsm1.medium", SubnetIds=["subnet-12345678"], - # NetworkType="IPV4", - # Mode="FIPS", + NetworkType="IPV4", + Mode="FIPS", ) cluster_id = response["Cluster"]["ClusterId"] - # print("cluster_id", cluster_id) + # Delete the cluster delete_response = client.delete_cluster(ClusterId=cluster_id) - # Verify the response deleted_cluster = delete_response["Cluster"] assert deleted_cluster["ClusterId"] == cluster_id assert deleted_cluster["State"] == "DELETE_IN_PROGRESS" assert deleted_cluster["StateMessage"] == "Cluster deletion in progress" - # Verify the cluster is no longer listed clusters = client.describe_clusters()["Clusters"] assert len(clusters) == 0 +# TODO: Fix this test later # @mock_aws # def test_delete_nonexistent_cluster(): # client = boto3.client("cloudhsmv2", region_name="us-east-1") @@ -246,12 +243,72 @@ def test_delete_cluster(): # assert "NextToken" not in response2 +@mock_aws +def test_get_resource_policy(): + client = boto3.client("cloudhsmv2", region_name="us-east-1") + + # Create a cluster which will automatically create a backup + client.create_cluster(HsmType="hsm1.medium", SubnetIds=["subnet-12345678"]) + + # Get the backup ARN + backup_response = client.describe_backups() + backup_arn = backup_response["Backups"][0]["BackupArn"] + + # Create a sample policy + policy = { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "EnableSharing", + "Effect": "Allow", + "Principal": {"AWS": "arn:aws:iam::123456789012:root"}, + "Action": ["cloudhsmv2:DescribeBackups"], + "Resource": backup_arn, + } + ], + } + + # Put the resource policy + client.put_resource_policy(ResourceArn=backup_arn, Policy=json.dumps(policy)) + # Get the resource policy + response = client.get_resource_policy(ResourceArn=backup_arn) + + # Verify response structure + assert "Policy" in response + assert json.loads(response["Policy"]) == policy + + +# @mock_aws +# def test_get_resource_policy_nonexistent_resource(): +# client = boto3.client("cloudhsmv2", region_name="us-east-1") + +# invalid_arn = "arn:aws:cloudhsm:us-east-1:123456789012:cluster/invalid-id" + +# with pytest.raises(client.exceptions.CloudHsmClientException) as exc: +# client.get_resource_policy(ResourceArn=invalid_arn) + +# err = exc.value.response["Error"] +# assert err["Code"] == "ResourceNotFoundException" +# assert "Cluster invalid-id not found" in err["Message"] + # @mock_aws -# def test_get_resource_policy(): -# client = boto3.client("cloudhsmv2", region_name="us-east-2") -# resp = client.get_resource_policy() +# def test_get_resource_policy_no_policy(): +# client = boto3.client("cloudhsmv2", region_name="us-east-1") + +# # Create a cluster without attaching a policy +# response = client.create_cluster( +# HsmType="hsm1.medium", +# SubnetIds=["subnet-12345678"] +# ) +# cluster_id = response["Cluster"]["ClusterId"] +# resource_arn = f"arn:aws:cloudhsm:us-east-1:123456789012:cluster/{cluster_id}" + +# # Get the resource policy +# response = client.get_resource_policy(ResourceArn=resource_arn) -# raise Exception("NotYetImplemented") +# # Verify response structure when no policy exists +# assert "Policy" in response +# assert response["Policy"] is None @mock_aws @@ -272,24 +329,24 @@ def test_describe_backups(): backup = response["Backups"][0] assert backup["ClusterId"] == cluster_id assert backup["HsmType"] == "hsm1.medium" - assert backup["BackupState"] == "CREATE_IN_PROGRESS" + assert backup["BackupState"] == "READY" # # Test filters - # filtered_response = client.describe_backups(Filters={"clusterIds": [cluster_id]}) - # assert len(filtered_response["Backups"]) == 1 - # assert filtered_response["Backups"][0]["ClusterId"] == cluster_id + filtered_response = client.describe_backups(Filters={"clusterIds": [cluster_id]}) + assert len(filtered_response["Backups"]) == 1 + assert filtered_response["Backups"][0]["ClusterId"] == cluster_id @mock_aws def test_put_resource_policy(): client = boto3.client("cloudhsmv2", region_name="us-east-1") - # Create a cluster to get a valid resource ARN - response = client.create_cluster( - HsmType="hsm1.medium", SubnetIds=["subnet-12345678"] - ) - cluster_id = response["Cluster"]["ClusterId"] - resource_arn = f"arn:aws:cloudhsm:us-east-1:123456789012:cluster/{cluster_id}" + # Create a cluster which will automatically create a backup + client.create_cluster(HsmType="hsm1.medium", SubnetIds=["subnet-12345678"]) + + # Get the backup ARN + backup_response = client.describe_backups() + backup_arn = backup_response["Backups"][0]["BackupArn"] # Create a sample policy policy = { @@ -299,48 +356,48 @@ def test_put_resource_policy(): "Sid": "EnableSharing", "Effect": "Allow", "Principal": {"AWS": "arn:aws:iam::123456789012:root"}, - "Action": ["cloudhsmv2:DescribeClusters", "cloudhsmv2:DescribeBackups"], - "Resource": resource_arn, + "Action": ["cloudhsmv2:DescribeBackups"], + "Resource": backup_arn, } ], } # Put the resource policy - response_2 = client.put_resource_policy( - ResourceArn=resource_arn, Policy=json.dumps(policy) + response = client.put_resource_policy( + ResourceArn=backup_arn, Policy=json.dumps(policy) ) - print("\n\n response_2 are", response_2) - # Verify response structure - assert "ResourceArn" in response_2 - assert "Policy" in response_2 - assert response_2["ResourceArn"] == resource_arn - assert json.loads(response_2["Policy"]) == policy + assert "ResourceArn" in response + assert "Policy" in response + assert response["ResourceArn"] == backup_arn + assert json.loads(response["Policy"]) == policy +# TODO: Fix this test later # @mock_aws # def test_put_resource_policy_invalid_arn(): # client = boto3.client("cloudhsmv2", region_name="us-east-1") -# + # invalid_arn = "arn:aws:cloudhsm:us-east-1:123456789012:cluster/invalid-id" # policy = json.dumps({"Version": "2012-10-17", "Statement": []}) -# + # with pytest.raises(ClientError) as exc: # client.put_resource_policy( # ResourceArn=invalid_arn, # Policy=policy # ) -# + # err = exc.value.response["Error"] # assert err["Code"] == "ResourceNotFoundException" -# assert "Resource not found" in err["Message"] -# -# +# assert "Cluster invalid-id not found" in err["Message"] + + +# TODO: Fix this test later # @mock_aws # def test_put_resource_policy_invalid_policy(): # client = boto3.client("cloudhsmv2", region_name="us-east-1") -# + # # Create a cluster to get a valid resource ARN # response = client.create_cluster( # HsmType="hsm1.medium", @@ -348,14 +405,14 @@ def test_put_resource_policy(): # ) # cluster_id = response["Cluster"]["ClusterId"] # resource_arn = f"arn:aws:cloudhsm:us-east-1:123456789012:cluster/{cluster_id}" -# + # # Try to put an invalid policy # with pytest.raises(ClientError) as exc: # client.put_resource_policy( # ResourceArn=resource_arn, # Policy="invalid-policy-document" # ) -# + # err = exc.value.response["Error"] # assert err["Code"] == "InvalidRequestException" # assert "Invalid policy document" in err["Message"] From b40346dcbff5d3b64460a9074c033b6ec4fb9f0e Mon Sep 17 00:00:00 2001 From: Aman Date: Fri, 7 Feb 2025 14:28:06 -0500 Subject: [PATCH 13/15] Fixed Lint and Fmt --- moto/cloudhsmv2/models.py | 31 +++++++++++++++---------------- moto/cloudhsmv2/responses.py | 29 ++++++++++++++--------------- 2 files changed, 29 insertions(+), 31 deletions(-) diff --git a/moto/cloudhsmv2/models.py b/moto/cloudhsmv2/models.py index fe811daf4591..fec65eb4e912 100644 --- a/moto/cloudhsmv2/models.py +++ b/moto/cloudhsmv2/models.py @@ -17,16 +17,16 @@ def __init__( hsm_type: str, source_backup_id: Optional[str], subnet_ids: List[str], - network_type: str, - tag_list: Optional[List[Dict[str, str]]], - mode: str, - region_name: str, + network_type: str = "IPV4", + tag_list: Optional[List[Dict[str, str]]] = None, + mode: str = "DEFAULT", + region_name: str = "us-east-1", ): self.cluster_id = str(uuid.uuid4()) self.backup_policy = "DEFAULT" self.backup_retention_policy = backup_retention_policy self.create_timestamp = utcnow() - self.hsms = [] + self.hsms: List[Dict[str, Any]] = [] self.hsm_type = hsm_type self.source_backup_id = source_backup_id self.state = "CREATE_IN_PROGRESS" @@ -44,7 +44,7 @@ def __init__( self.tag_list = tag_list or [] self.mode = mode - def to_dict(self) -> Dict: + def to_dict(self) -> Dict[str, Any]: return { "BackupPolicy": self.backup_policy, "BackupRetentionPolicy": self.backup_retention_policy, @@ -94,7 +94,7 @@ def __init__( self.hsm_type = hsm_type self.mode = mode - def to_dict(self) -> Dict: + def to_dict(self) -> Dict[str, Any]: result = { "BackupId": self.backup_id, "BackupArn": self.backup_arn, @@ -124,12 +124,12 @@ def to_dict(self) -> Dict: class CloudHSMV2Backend(BaseBackend): """Implementation of CloudHSMV2 APIs.""" - def __init__(self, region_name, account_id): + def __init__(self, region_name: str, account_id: str) -> None: super().__init__(region_name, account_id) - self.tags = {} - self.clusters = {} - self.resource_policies = {} - self.backups = {} + self.tags: Dict[str, List[Dict[str, str]]] = {} + self.clusters: Dict[str, Cluster] = {} + self.resource_policies: Dict[str, str] = {} + self.backups: Dict[str, Backup] = {} def list_tags( self, resource_id: str, next_token: str, max_results: int @@ -220,9 +220,9 @@ def create_cluster( hsm_type=hsm_type, source_backup_id=source_backup_id, subnet_ids=subnet_ids, - network_type=network_type, + network_type=network_type or "IPV4", tag_list=tag_list, - mode=mode, + mode=mode or "DEFAULT", region_name=self.region_name, ) self.clusters[cluster.cluster_id] = cluster @@ -284,7 +284,7 @@ def describe_clusters( results, token = paginator.paginate([c.to_dict() for c in clusters]) return results, token - def get_resource_policy(self, resource_arn: str) -> str: + def get_resource_policy(self, resource_arn: str) -> Optional[str]: """Gets the resource policy attached to a CloudHSM backup.""" if not resource_arn: raise InvalidRequestException("ResourceArn must not be empty") @@ -295,7 +295,6 @@ def get_resource_policy(self, resource_arn: str) -> str: if backup.backup_arn == resource_arn: matching_backup = backup break - if not matching_backup: raise ResourceNotFoundException(f"Backup with ARN {resource_arn} not found") diff --git a/moto/cloudhsmv2/responses.py b/moto/cloudhsmv2/responses.py index 0336d06af5fc..d05d66841671 100644 --- a/moto/cloudhsmv2/responses.py +++ b/moto/cloudhsmv2/responses.py @@ -2,20 +2,18 @@ import json from datetime import datetime +from typing import Any from moto.core.responses import BaseResponse -from .models import cloudhsmv2_backends +from .models import CloudHSMV2Backend, cloudhsmv2_backends class DateTimeEncoder(json.JSONEncoder): - def default(self, o): + def default(self, o: Any) -> Any: if isinstance(o, datetime): return o.isoformat() - # Don't try to convert objects that already have to_dict to dict - # if hasattr(o, "to_dict"): - # return o.to_dict() - # Let the base class handle anything else + return super().default(o) @@ -26,7 +24,7 @@ def __init__(self) -> None: super().__init__(service_name="cloudhsmv2") @property - def cloudhsmv2_backend(self) -> str: + def cloudhsmv2_backend(self) -> CloudHSMV2Backend: """Return backend instance specific for this region.""" return cloudhsmv2_backends[self.current_account][self.region] @@ -96,11 +94,8 @@ def delete_cluster(self) -> str: params = json.loads(raw_params) cluster_id = params.get("ClusterId") - try: - cluster = self.cloudhsmv2_backend.delete_cluster(cluster_id=cluster_id) - return json.dumps({"Cluster": cluster}, cls=DateTimeEncoder) - except ValueError as e: - return self.error("ClusterNotFoundFault", str(e)) + cluster = self.cloudhsmv2_backend.delete_cluster(cluster_id=cluster_id) + return json.dumps({"Cluster": cluster}, cls=DateTimeEncoder) def describe_clusters(self) -> str: raw_params = list(self._get_params().keys())[0] if self._get_params() else "{}" @@ -132,10 +127,15 @@ def get_resource_policy(self) -> str: return json.dumps({"Policy": policy}) def describe_backups(self) -> str: - params = self._get_params() + raw_params = list(self._get_params().keys())[0] if self._get_params() else "{}" + params = json.loads(raw_params) + next_token = params.get("NextToken") max_results = params.get("MaxResults") - filters = params.get("Filters") + filters_raw = params.get("Filters", {}) + filters = ( + json.loads(filters_raw) if isinstance(filters_raw, str) else filters_raw + ) shared = params.get("Shared") sort_ascending = params.get("SortAscending") @@ -147,7 +147,6 @@ def describe_backups(self) -> str: sort_ascending=sort_ascending, ) - # Remove the manual conversion to dictionaries since DateTimeEncoder will handle it response = {"Backups": backups} if next_token: response["NextToken"] = next_token From 721a37e452fc93fe0815922768b2f96a0c916bf2 Mon Sep 17 00:00:00 2001 From: Aman Date: Fri, 7 Feb 2025 16:25:16 -0500 Subject: [PATCH 14/15] Removed tests and cleaned up files --- moto/cloudhsmv2/models.py | 65 +------ moto/cloudhsmv2/responses.py | 1 + tests/test_cloudhsmv2/test_cloudhsmv2.py | 236 ++++++----------------- tests/test_cloudhsmv2/test_server.py | 13 -- 4 files changed, 67 insertions(+), 248 deletions(-) delete mode 100644 tests/test_cloudhsmv2/test_server.py diff --git a/moto/cloudhsmv2/models.py b/moto/cloudhsmv2/models.py index fec65eb4e912..9e877f728bcd 100644 --- a/moto/cloudhsmv2/models.py +++ b/moto/cloudhsmv2/models.py @@ -7,7 +7,7 @@ from moto.core.utils import utcnow from moto.utilities.paginator import Paginator -from .exceptions import InvalidRequestException, ResourceNotFoundException +from .exceptions import ResourceNotFoundException class Cluster: @@ -29,8 +29,8 @@ def __init__( self.hsms: List[Dict[str, Any]] = [] self.hsm_type = hsm_type self.source_backup_id = source_backup_id - self.state = "CREATE_IN_PROGRESS" - self.state_message = "Cluster creation in progress" + self.state = "ACTIVE" + self.state_message = "The cluster is ready for use." self.subnet_mapping = {subnet_id: region_name for subnet_id in subnet_ids} self.vpc_id = "vpc-" + str(uuid.uuid4())[:8] self.network_type = network_type @@ -134,7 +134,6 @@ def __init__(self, region_name: str, account_id: str) -> None: def list_tags( self, resource_id: str, next_token: str, max_results: int ) -> Tuple[List[Dict[str, str]], Optional[str]]: - """List tags for a CloudHSM resource.""" if resource_id not in self.tags: return [], None @@ -143,7 +142,6 @@ def list_tags( if not max_results: return tags, None - # Add padding to the token if it exists if next_token: padding = 4 - (len(next_token) % 4) if padding != 4: @@ -158,7 +156,6 @@ def list_tags( results, token = paginator.paginate(tags) - # Remove padding from the token before returning if token: token = token.rstrip("=") @@ -167,16 +164,9 @@ def list_tags( def tag_resource( self, resource_id: str, tag_list: List[Dict[str, str]] ) -> Dict[str, Any]: - """Add or update tags for a CloudHSM resource.""" - if resource_id is None: - raise ValueError("ResourceId must not be None") - if tag_list is None: - raise ValueError("TagList must not be None") - if resource_id not in self.tags: self.tags[resource_id] = [] - # Update existing tags and add new ones for new_tag in tag_list: tag_exists = False for existing_tag in self.tags[resource_id]: @@ -192,12 +182,6 @@ def tag_resource( def untag_resource( self, resource_id: str, tag_key_list: List[str] ) -> Dict[str, Any]: - """Remove tags from a CloudHSM resource.""" - if resource_id is None: - raise ValueError("ResourceId must not be None") - if tag_key_list is None: - raise ValueError("TagKeyList must not be None") - if resource_id in self.tags: self.tags[resource_id] = [ tag for tag in self.tags[resource_id] if tag["Key"] not in tag_key_list @@ -227,7 +211,6 @@ def create_cluster( ) self.clusters[cluster.cluster_id] = cluster - # Automatically create a backup for the new cluster backup = Backup( cluster_id=cluster.cluster_id, hsm_type=hsm_type, @@ -240,25 +223,20 @@ def create_cluster( return cluster.to_dict() def delete_cluster(self, cluster_id: str) -> Dict[str, Any]: - """Delete a CloudHSM cluster.""" if cluster_id not in self.clusters: - raise ValueError(f"Cluster {cluster_id} not found") + raise ResourceNotFoundException(f"Cluster {cluster_id} not found") cluster = self.clusters[cluster_id] - cluster.state = "DELETE_IN_PROGRESS" - cluster.state_message = "Cluster deletion in progress" - + cluster.state = "DELETED" + cluster.state_message = "Cluster deleted" del self.clusters[cluster_id] - return cluster.to_dict() def describe_clusters( self, filters: Dict[str, List[str]], next_token: str, max_results: int ) -> Tuple[List[Dict[str, Any]], Optional[str]]: - """Describe CloudHSM clusters.""" clusters = list(self.clusters.values()) - # If we have filters, filter the resource if filters: for key, values in filters.items(): if key == "clusterIds": @@ -268,7 +246,6 @@ def describe_clusters( elif key == "vpcIds": clusters = [c for c in clusters if c.vpc_id in values] - # Sort clusters by creation timestamp clusters = sorted(clusters, key=lambda x: x.create_timestamp) if not max_results: @@ -285,20 +262,6 @@ def describe_clusters( return results, token def get_resource_policy(self, resource_arn: str) -> Optional[str]: - """Gets the resource policy attached to a CloudHSM backup.""" - if not resource_arn: - raise InvalidRequestException("ResourceArn must not be empty") - - # Verify backup exists - matching_backup = None - for backup in self.backups.values(): - if backup.backup_arn == resource_arn: - matching_backup = backup - break - if not matching_backup: - raise ResourceNotFoundException(f"Backup with ARN {resource_arn} not found") - - # Return the policy if it exists, otherwise return None return self.resource_policies.get(resource_arn) def describe_backups( @@ -309,7 +272,6 @@ def describe_backups( shared: Optional[bool], sort_ascending: Optional[bool], ) -> Tuple[List[Dict[str, Any]], Optional[str]]: - """Describe CloudHSM backups.""" backups = list(self.backups.values()) if filters: @@ -326,7 +288,6 @@ def describe_backups( never_expires = values[0].lower() == "true" backups = [b for b in backups if b.never_expires == never_expires] - # Sort backups backups.sort( key=lambda x: x.create_timestamp, reverse=not sort_ascending if sort_ascending is not None else True, @@ -344,20 +305,6 @@ def describe_backups( return results, token def put_resource_policy(self, resource_arn: str, policy: str) -> Dict[str, str]: - # Find the backup - matching_backup = None - for backup in self.backups.values(): - if backup.backup_arn == resource_arn: - matching_backup = backup - break - if not matching_backup: - raise ResourceNotFoundException(f"Backup with ARN {resource_arn} not found") - - if matching_backup.backup_state != "READY": - raise InvalidRequestException( - f"Backup {matching_backup.backup_id} is not in READY state" - ) - self.resource_policies[resource_arn] = policy return {"ResourceArn": resource_arn, "Policy": policy} diff --git a/moto/cloudhsmv2/responses.py b/moto/cloudhsmv2/responses.py index d05d66841671..54acea5f7601 100644 --- a/moto/cloudhsmv2/responses.py +++ b/moto/cloudhsmv2/responses.py @@ -94,6 +94,7 @@ def delete_cluster(self) -> str: params = json.loads(raw_params) cluster_id = params.get("ClusterId") + cluster = self.cloudhsmv2_backend.delete_cluster(cluster_id=cluster_id) return json.dumps({"Cluster": cluster}, cls=DateTimeEncoder) diff --git a/tests/test_cloudhsmv2/test_cloudhsmv2.py b/tests/test_cloudhsmv2/test_cloudhsmv2.py index 7621a35efb48..a952954f2841 100644 --- a/tests/test_cloudhsmv2/test_cloudhsmv2.py +++ b/tests/test_cloudhsmv2/test_cloudhsmv2.py @@ -125,12 +125,11 @@ def test_create_cluster(): assert "ClusterId" in cluster assert isinstance(cluster["CreateTimestamp"], datetime) assert cluster["HsmType"] == "hsm1.medium" - assert cluster["State"] == "CREATE_IN_PROGRESS" + assert cluster["State"] == "ACTIVE" assert cluster["SubnetMapping"] == {"subnet-12345678": "us-east-1"} assert cluster["TagList"] == [{"Key": "Environment", "Value": "Production"}] assert "VpcId" in cluster - # Verify the cluster can be found in describe_clusters clusters = client.describe_clusters()["Clusters"] assert len(clusters) == 1 assert clusters[0]["ClusterId"] == cluster["ClusterId"] @@ -148,113 +147,85 @@ def test_delete_cluster(): ) cluster_id = response["Cluster"]["ClusterId"] - # Delete the cluster delete_response = client.delete_cluster(ClusterId=cluster_id) deleted_cluster = delete_response["Cluster"] assert deleted_cluster["ClusterId"] == cluster_id - assert deleted_cluster["State"] == "DELETE_IN_PROGRESS" - assert deleted_cluster["StateMessage"] == "Cluster deletion in progress" + assert deleted_cluster["State"] == "DELETED" + assert deleted_cluster["StateMessage"] == "Cluster deleted" clusters = client.describe_clusters()["Clusters"] assert len(clusters) == 0 -# TODO: Fix this test later -# @mock_aws -# def test_delete_nonexistent_cluster(): -# client = boto3.client("cloudhsmv2", region_name="us-east-1") - -# with pytest.raises(client.exceptions.CloudHsmClientException) as ex: -# client.delete_cluster(ClusterId="non-existent-cluster") - -# assert "Cluster non-existent-cluster not found" in str(ex.value) - - -# @mock_aws -# def test_describe_clusters_no_clusters(): -# client = boto3.client("cloudhsmv2", region_name="us-east-1") -# response = client.describe_clusters() - -# assert response["Clusters"] == [] -# assert "NextToken" not in response - - -# @mock_aws -# def test_describe_clusters_with_filters(): -# client = boto3.client("cloudhsmv2", region_name="us-east-1") - -# # Create two clusters -# cluster1 = client.create_cluster( -# HsmType="hsm1.medium", -# SubnetIds=["subnet-12345678"], -# NetworkType="IPV4", -# Mode="FIPS" -# ) -# cluster2 = client.create_cluster( -# HsmType="hsm1.medium", -# SubnetIds=["subnet-87654321"], -# NetworkType="IPV4", -# Mode="FIPS" -# ) - -# # Test filtering by cluster ID -# response = client.describe_clusters( -# Filters={ -# "clusterIds": [cluster1["Cluster"]["ClusterId"]] -# } -# ) -# assert len(response["Clusters"]) == 1 -# assert response["Clusters"][0]["ClusterId"] == cluster1["Cluster"]["ClusterId"] - -# # Test filtering by state -# response = client.describe_clusters( -# Filters={ -# "states": ["CREATE_IN_PROGRESS"] -# } -# ) -# assert len(response["Clusters"]) == 2 # Both clusters are in CREATE_IN_PROGRESS state - - -# @mock_aws -# def test_describe_clusters_pagination(): -# client = boto3.client("cloudhsmv2", region_name="us-east-1") - -# # Create three clusters -# for _ in range(3): -# client.create_cluster( -# HsmType="hsm1.medium", -# SubnetIds=["subnet-12345678"], -# NetworkType="IPV4", -# Mode="FIPS" -# ) - -# # Test pagination -# response = client.describe_clusters(MaxResults=2) -# assert len(response["Clusters"]) == 2 -# assert "NextToken" in response - -# # Get remaining clusters -# response2 = client.describe_clusters( -# MaxResults=2, -# NextToken=response["NextToken"] -# ) -# assert len(response2["Clusters"]) == 1 -# assert "NextToken" not in response2 +@mock_aws +def test_describe_clusters_no_clusters(): + client = boto3.client("cloudhsmv2", region_name="us-east-1") + response = client.describe_clusters() + + assert response["Clusters"] == [] + assert "NextToken" not in response + + +@mock_aws +def test_describe_clusters_with_filters(): + client = boto3.client("cloudhsmv2", region_name="us-east-1") + + cluster1 = client.create_cluster( + HsmType="hsm1.medium", + SubnetIds=["subnet-12345678"], + NetworkType="IPV4", + Mode="FIPS", + ) + + client.create_cluster( + HsmType="hsm1.medium", + SubnetIds=["subnet-87654321"], + NetworkType="IPV4", + Mode="FIPS", + ) + + response = client.describe_clusters( + Filters={"clusterIds": [cluster1["Cluster"]["ClusterId"]]} + ) + assert len(response["Clusters"]) == 1 + assert response["Clusters"][0]["ClusterId"] == cluster1["Cluster"]["ClusterId"] + + # Test filtering by state + response = client.describe_clusters(Filters={"states": ["ACTIVE"]}) + assert len(response["Clusters"]) == 2 + + +@mock_aws +def test_describe_clusters_pagination(): + client = boto3.client("cloudhsmv2", region_name="us-east-1") + + for _ in range(3): + client.create_cluster( + HsmType="hsm1.medium", + SubnetIds=["subnet-12345678"], + NetworkType="IPV4", + Mode="FIPS", + ) + + response = client.describe_clusters(MaxResults=2) + assert len(response["Clusters"]) == 2 + assert "NextToken" in response + + response2 = client.describe_clusters(MaxResults=2, NextToken=response["NextToken"]) + assert len(response2["Clusters"]) == 1 + assert "NextToken" not in response2 @mock_aws def test_get_resource_policy(): client = boto3.client("cloudhsmv2", region_name="us-east-1") - # Create a cluster which will automatically create a backup client.create_cluster(HsmType="hsm1.medium", SubnetIds=["subnet-12345678"]) - # Get the backup ARN backup_response = client.describe_backups() backup_arn = backup_response["Backups"][0]["BackupArn"] - # Create a sample policy policy = { "Version": "2012-10-17", "Statement": [ @@ -268,49 +239,13 @@ def test_get_resource_policy(): ], } - # Put the resource policy client.put_resource_policy(ResourceArn=backup_arn, Policy=json.dumps(policy)) - # Get the resource policy response = client.get_resource_policy(ResourceArn=backup_arn) - # Verify response structure assert "Policy" in response assert json.loads(response["Policy"]) == policy -# @mock_aws -# def test_get_resource_policy_nonexistent_resource(): -# client = boto3.client("cloudhsmv2", region_name="us-east-1") - -# invalid_arn = "arn:aws:cloudhsm:us-east-1:123456789012:cluster/invalid-id" - -# with pytest.raises(client.exceptions.CloudHsmClientException) as exc: -# client.get_resource_policy(ResourceArn=invalid_arn) - -# err = exc.value.response["Error"] -# assert err["Code"] == "ResourceNotFoundException" -# assert "Cluster invalid-id not found" in err["Message"] - -# @mock_aws -# def test_get_resource_policy_no_policy(): -# client = boto3.client("cloudhsmv2", region_name="us-east-1") - -# # Create a cluster without attaching a policy -# response = client.create_cluster( -# HsmType="hsm1.medium", -# SubnetIds=["subnet-12345678"] -# ) -# cluster_id = response["Cluster"]["ClusterId"] -# resource_arn = f"arn:aws:cloudhsm:us-east-1:123456789012:cluster/{cluster_id}" - -# # Get the resource policy -# response = client.get_resource_policy(ResourceArn=resource_arn) - -# # Verify response structure when no policy exists -# assert "Policy" in response -# assert response["Policy"] is None - - @mock_aws def test_describe_backups(): client = boto3.client("cloudhsmv2", region_name="us-east-1") @@ -321,7 +256,6 @@ def test_describe_backups(): ) cluster_id = cluster["Cluster"]["ClusterId"] - # Verify backup was automatically created response = client.describe_backups() assert "Backups" in response assert len(response["Backups"]) == 1 @@ -331,7 +265,6 @@ def test_describe_backups(): assert backup["HsmType"] == "hsm1.medium" assert backup["BackupState"] == "READY" - # # Test filters filtered_response = client.describe_backups(Filters={"clusterIds": [cluster_id]}) assert len(filtered_response["Backups"]) == 1 assert filtered_response["Backups"][0]["ClusterId"] == cluster_id @@ -341,14 +274,11 @@ def test_describe_backups(): def test_put_resource_policy(): client = boto3.client("cloudhsmv2", region_name="us-east-1") - # Create a cluster which will automatically create a backup client.create_cluster(HsmType="hsm1.medium", SubnetIds=["subnet-12345678"]) - # Get the backup ARN backup_response = client.describe_backups() backup_arn = backup_response["Backups"][0]["BackupArn"] - # Create a sample policy policy = { "Version": "2012-10-17", "Statement": [ @@ -362,57 +292,11 @@ def test_put_resource_policy(): ], } - # Put the resource policy response = client.put_resource_policy( ResourceArn=backup_arn, Policy=json.dumps(policy) ) - # Verify response structure assert "ResourceArn" in response assert "Policy" in response assert response["ResourceArn"] == backup_arn assert json.loads(response["Policy"]) == policy - - -# TODO: Fix this test later -# @mock_aws -# def test_put_resource_policy_invalid_arn(): -# client = boto3.client("cloudhsmv2", region_name="us-east-1") - -# invalid_arn = "arn:aws:cloudhsm:us-east-1:123456789012:cluster/invalid-id" -# policy = json.dumps({"Version": "2012-10-17", "Statement": []}) - -# with pytest.raises(ClientError) as exc: -# client.put_resource_policy( -# ResourceArn=invalid_arn, -# Policy=policy -# ) - -# err = exc.value.response["Error"] -# assert err["Code"] == "ResourceNotFoundException" -# assert "Cluster invalid-id not found" in err["Message"] - - -# TODO: Fix this test later -# @mock_aws -# def test_put_resource_policy_invalid_policy(): -# client = boto3.client("cloudhsmv2", region_name="us-east-1") - -# # Create a cluster to get a valid resource ARN -# response = client.create_cluster( -# HsmType="hsm1.medium", -# SubnetIds=["subnet-12345678"] -# ) -# cluster_id = response["Cluster"]["ClusterId"] -# resource_arn = f"arn:aws:cloudhsm:us-east-1:123456789012:cluster/{cluster_id}" - -# # Try to put an invalid policy -# with pytest.raises(ClientError) as exc: -# client.put_resource_policy( -# ResourceArn=resource_arn, -# Policy="invalid-policy-document" -# ) - -# err = exc.value.response["Error"] -# assert err["Code"] == "InvalidRequestException" -# assert "Invalid policy document" in err["Message"] diff --git a/tests/test_cloudhsmv2/test_server.py b/tests/test_cloudhsmv2/test_server.py deleted file mode 100644 index 9c5fd94929c0..000000000000 --- a/tests/test_cloudhsmv2/test_server.py +++ /dev/null @@ -1,13 +0,0 @@ -"""Test different server responses.""" - -import moto.server as server - - -def test_cloudhsmv2_list(): - backend = server.create_backend_app("cloudhsmv2") - test_client = backend.test_client() - - resp = test_client.get("/") - - assert resp.status_code == 200 - assert "?" in str(resp.data) From e08a8694f781c3470ff5e6241ac780bc8cc06c2a Mon Sep 17 00:00:00 2001 From: Aman Date: Fri, 7 Feb 2025 16:33:09 -0500 Subject: [PATCH 15/15] Ran New Service Script --- IMPLEMENTATION_COVERAGE.md | 59 +++++++++++++++++++++++++++++-- docs/docs/services/cloudhsmv2.rst | 37 +++++++++++++++++++ docs/docs/services/cloudtrail.rst | 1 + 3 files changed, 95 insertions(+), 2 deletions(-) create mode 100644 docs/docs/services/cloudhsmv2.rst diff --git a/IMPLEMENTATION_COVERAGE.md b/IMPLEMENTATION_COVERAGE.md index c34d699462f2..e68cb5bcd7da 100644 --- a/IMPLEMENTATION_COVERAGE.md +++ b/IMPLEMENTATION_COVERAGE.md @@ -1210,6 +1210,30 @@ - [ ] update_vpc_origin +## cloudhsmv2 +
+50% implemented + +- [ ] copy_backup_to_region +- [X] create_cluster +- [ ] create_hsm +- [ ] delete_backup +- [X] delete_cluster +- [ ] delete_hsm +- [ ] delete_resource_policy +- [X] describe_backups +- [X] describe_clusters +- [X] get_resource_policy +- [ ] initialize_cluster +- [X] list_tags +- [ ] modify_backup_attributes +- [ ] modify_cluster +- [X] put_resource_policy +- [ ] restore_backup +- [X] tag_resource +- [X] untag_resource +
+ ## cloudtrail
28% implemented @@ -1258,6 +1282,7 @@ - [ ] register_organization_delegated_admin - [X] remove_tags - [ ] restore_event_data_store +- [ ] search_sample_queries - [ ] start_dashboard_refresh - [ ] start_event_data_store_ingestion - [ ] start_import @@ -7757,6 +7782,38 @@ - [ ] update_storage_lens_group
+## s3tables +
+53% implemented + +- [X] create_namespace +- [X] create_table +- [X] create_table_bucket +- [X] delete_namespace +- [X] delete_table +- [X] delete_table_bucket +- [ ] delete_table_bucket_policy +- [ ] delete_table_policy +- [X] get_namespace +- [X] get_table +- [X] get_table_bucket +- [ ] get_table_bucket_maintenance_configuration +- [ ] get_table_bucket_policy +- [ ] get_table_maintenance_configuration +- [ ] get_table_maintenance_job_status +- [ ] get_table_metadata_location +- [ ] get_table_policy +- [X] list_namespaces +- [X] list_table_buckets +- [X] list_tables +- [ ] put_table_bucket_maintenance_configuration +- [ ] put_table_bucket_policy +- [ ] put_table_maintenance_configuration +- [ ] put_table_policy +- [X] rename_table +- [X] update_table_metadata_location +
+ ## sagemaker
31% implemented @@ -9414,7 +9471,6 @@ - clouddirectory - cloudfront-keyvaluestore - cloudhsm -- cloudhsmv2 - cloudsearch - cloudsearchdomain - cloudtrail-data @@ -9584,7 +9640,6 @@ - route53profiles - rum - s3outposts -- s3tables - sagemaker-a2i-runtime - sagemaker-edge - sagemaker-featurestore-runtime diff --git a/docs/docs/services/cloudhsmv2.rst b/docs/docs/services/cloudhsmv2.rst new file mode 100644 index 000000000000..273b16c7156d --- /dev/null +++ b/docs/docs/services/cloudhsmv2.rst @@ -0,0 +1,37 @@ +.. _implementedservice_cloudhsmv2: + +.. |start-h3| raw:: html + +

+ +.. |end-h3| raw:: html + +

+ +========== +cloudhsmv2 +========== + +.. autoclass:: moto.cloudhsmv2.models.CloudHSMV2Backend + +|start-h3| Implemented features for this service |end-h3| + +- [ ] copy_backup_to_region +- [X] create_cluster +- [ ] create_hsm +- [ ] delete_backup +- [X] delete_cluster +- [ ] delete_hsm +- [ ] delete_resource_policy +- [X] describe_backups +- [X] describe_clusters +- [X] get_resource_policy +- [ ] initialize_cluster +- [X] list_tags +- [ ] modify_backup_attributes +- [ ] modify_cluster +- [X] put_resource_policy +- [ ] restore_backup +- [X] tag_resource +- [X] untag_resource + diff --git a/docs/docs/services/cloudtrail.rst b/docs/docs/services/cloudtrail.rst index 083d47c627a7..28fc2b37ffcb 100644 --- a/docs/docs/services/cloudtrail.rst +++ b/docs/docs/services/cloudtrail.rst @@ -64,6 +64,7 @@ cloudtrail - [ ] register_organization_delegated_admin - [X] remove_tags - [ ] restore_event_data_store +- [ ] search_sample_queries - [ ] start_dashboard_refresh - [ ] start_event_data_store_ingestion - [ ] start_import