diff --git a/plugins/inventory/aws_ec.py b/plugins/inventory/aws_ec.py new file mode 100644 index 00000000000..e86708bf2ab --- /dev/null +++ b/plugins/inventory/aws_ec.py @@ -0,0 +1,260 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2018 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +name: aws_ec +short_description: Elasticache/ec inventory source +description: + - Get Cache from Amazon Web Services Elasticache. + - Uses a YAML configuration file that ends with aws_ec.(yml|yaml). +options: + regions: + description: + - A list of regions in which to describe Elasticache instances and clusters. Available regions are listed here + U(https://docs.aws.amazon.com/fr_fr/AmazonElastiCache/latest/red-ug/RegionsAndAZs.html). + default: [] + filters: + description: + - A dictionary of filter value pairs. Available filters are listed here + U(https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_Filter.html). + default: {} + strict_permissions: + description: + - By default if an AccessDenied exception is encountered this plugin will fail. You can set strict_permissions to + False in the inventory config file which will allow the restrictions to be gracefully skipped. + type: bool + default: True + statuses: + description: A list of desired states for instances/clusters to be added to inventory. Set to ['all'] as a shorthand to find everything. + type: list + elements: str + default: + - creating + - available + hostvars_prefix: + description: + - The prefix for host variables names coming from AWS. + type: str + version_added: 3.1.0 + hostvars_suffix: + description: + - The suffix for host variables names coming from AWS. + type: str + version_added: 3.1.0 +notes: + - Ansible versions prior to 2.10 should use the fully qualified plugin name 'amazon.aws.aws_ec'. +extends_documentation_fragment: + - inventory_cache + - constructed + - amazon.aws.boto3 + - amazon.aws.common.plugins + - amazon.aws.region.plugins + - amazon.aws.assume_role.plugins +author: + - Your friendly neighbourhood Rafael (@Rafjt/@Raf211) +""" + +EXAMPLES = r""" +plugin: amazon.aws.aws_ec +regions: + - us-east-1 + - ca-central-1 +hostvars_prefix: aws_ +hostvars_suffix: _ec +""" + +try: + import botocore +except ImportError: + pass # will be captured by imported HAS_BOTO3 + +from ansible.errors import AnsibleError +from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from pprint import pprint + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.plugin_utils.inventory import AWSInventoryBase + +def _find_ec_clusters_with_valid_statuses(replication_groups, cache_clusters, statuses): + if "all" in statuses: + return(replication_groups,cache_clusters) + valid_clusters = [] + for replication_group in replication_groups: + if replication_group.get("Status") in statuses: + valid_clusters.append(replication_group) + for cache_cluster in cache_clusters: + if cache_cluster.get("CacheClusterStatus") in statuses: + valid_clusters.append(cache_cluster) + return valid_clusters + +def _add_tags_for_ec_clusters(connection, clusters, strict): + for cluster in clusters: + if "ReplicationGroupId" in cluster: + resource_arn = cluster["ARN"] + try: + tags = connection.list_tags_for_resource(ResourceName=resource_arn)["TagList"] + except is_boto3_error_code("AccessDenied") as e: + if not strict: + tags = [] + else: + raise e + cluster["Tags"] = tags + +def describe_resource_with_tags(func): + def describe_wrapper(connection, strict=False): + try: + results = func(connection=connection,) + if "ReplicationGroups" in results: + results = results["ReplicationGroups"] + else: + results = results["CacheClusters"] + _add_tags_for_ec_clusters(connection, results, strict) + except is_boto3_error_code("AccessDenied") as e: # pylint: disable=duplicate-except + if not strict: + return [] + raise AnsibleError(f"Failed to query ElastiCache: {to_native(e)}") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleError(f"Failed to query ElastiCache: {to_native(e)}") + + return results + + return describe_wrapper + + +@describe_resource_with_tags +def _describe_replication_groups(connection): + paginator = connection.get_paginator("describe_replication_groups") + return paginator.paginate().build_full_result() + +@describe_resource_with_tags +def _describe_cache_clusters(connection): + paginator = connection.get_paginator("describe_cache_clusters") + return paginator.paginate().build_full_result() + + +class InventoryModule(AWSInventoryBase): + NAME = "amazon.aws.aws_ec" + INVENTORY_FILE_SUFFIXES = ("aws_ec.yml", "aws_ec.yaml") + + def __init__(self): + super().__init__() + self.credentials = {} + + def _populate(self, replication_groups, cache_clusters): + group = "aws_ec" + cluster_group_name = "cluster_group" + replication_group_name = "replication_group" + + if replication_groups: + self.inventory.add_group(replication_group_name) + self._add_hosts(hosts=replication_groups, group=replication_group_name) + self.inventory.add_child("all", replication_group_name) + + if cache_clusters: + self.inventory.add_group(cluster_group_name) + self._add_hosts(hosts=cache_clusters, group=cluster_group_name) + self.inventory.add_child("all", cluster_group_name) + + def _populate_from_source(self, source_data): + hostvars = source_data.pop("_meta", {}).get("hostvars", {}) + for group in source_data: + if group == "all": + continue + self.inventory.add_group(group) + hosts = source_data[group].get("hosts", []) + for host in hosts: + self._populate_host_vars([host], hostvars.get(host, {}), group) + self.inventory.add_child("all", group) + + + def _add_hosts(self, hosts, group): + """ + :param hosts: a list of hosts to be added to a group + :param group: the name of the group to which the hosts belong + """ + for host in hosts: + if "replicationgroup" == host["ARN"].split(":")[5]: + host_type = "replicationgroup" + host_name = host["ReplicationGroupId"] + else: + host_type = "cluster" + host_name = host["CacheClusterId"] + + host = camel_dict_to_snake_dict(host, ignore_list=["Tags"]) + host["tags"] = boto3_tag_list_to_ansible_dict(host.get("tags", [])) + host["type"] = host_type + + if "availability_zone" in host: + host["region"] = host["availability_zone"][:-1] + elif "availability_zones" in host: + host["region"] = host["availability_zones"][0][:-1] + + self.inventory.add_host(host_name, group=group) + hostvars_prefix = self.get_option("hostvars_prefix") + hostvars_suffix = self.get_option("hostvars_suffix") + new_vars = dict() + for hostvar, hostval in host.items(): + if hostvars_prefix: + hostvar = hostvars_prefix + hostvar + if hostvars_suffix: + hostvar = hostvar + hostvars_suffix + new_vars[hostvar] = hostval + self.inventory.set_variable(host_name, hostvar, hostval) + host.update(new_vars) + + strict = self.get_option("strict") + self._set_composite_vars(self.get_option("compose"), host, host_name, strict=strict) + self._add_host_to_composed_groups(self.get_option("groups"), host, host_name, strict=strict) + self._add_host_to_keyed_groups(self.get_option("keyed_groups"), host, host_name, strict=strict) + + + + def _get_all_replication_groups(self, regions, strict, statuses): + replication_groups = [] + for connection, _region in self.all_clients("elasticache"): + replication_groups += _describe_replication_groups(connection, strict=strict) + sorted_replication_groups = sorted(replication_groups, key=lambda x: x["ReplicationGroupId"]) + return _find_ec_clusters_with_valid_statuses(sorted_replication_groups, [], statuses) + + def _get_all_cache_clusters(self, regions, strict, statuses): + cache_clusters = [] + for connection, _region in self.all_clients("elasticache"): + cache_clusters += _describe_cache_clusters(connection, strict=strict) + sorted_cache_clusters = sorted(cache_clusters, key=lambda x: x["CacheClusterId"]) + return _find_ec_clusters_with_valid_statuses([], sorted_cache_clusters, statuses) + + + def parse(self, inventory, loader, path, cache=True): + super().parse(inventory, loader, path, cache=cache) + + regions = self.get_option("regions") + strict_permissions = self.get_option("strict_permissions") + statuses = self.get_option("statuses") + + result_was_cached, cached_result = self.get_cached_result(path, cache) + if result_was_cached: + self._populate_from_source(cached_result) + return + + replication_groups = self._get_all_replication_groups( + regions=regions, + strict=strict_permissions, + statuses=statuses, + ) + + cache_clusters = self._get_all_cache_clusters( + regions=regions, + strict=strict_permissions, + statuses=statuses, + ) + + self._populate(replication_groups, cache_clusters) + diff --git a/tests/unit/plugins/inventory/test_aws_ec.py b/tests/unit/plugins/inventory/test_aws_ec.py new file mode 100644 index 00000000000..102c7670ea5 --- /dev/null +++ b/tests/unit/plugins/inventory/test_aws_ec.py @@ -0,0 +1,445 @@ +# -*- coding: utf-8 -*- + +# Copyright 2024 Your friendly neighbourhood Rafael (@Rafjt/@Raf211) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import copy +import random +import string +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import patch + +import pytest + +try: + import botocore +except ImportError: + # Handled by HAS_BOTO3 + pass + +from ansible.errors import AnsibleError + +from ansible_collections.amazon.aws.plugins.inventory.aws_ec import InventoryModule +from ansible_collections.amazon.aws.plugins.inventory.aws_ec import _add_tags_for_ec_clusters +from ansible_collections.amazon.aws.plugins.inventory.aws_ec import _describe_replication_groups +from ansible_collections.amazon.aws.plugins.inventory.aws_ec import _describe_cache_clusters +from ansible_collections.amazon.aws.plugins.inventory.aws_ec import _find_ec_clusters_with_valid_statuses +from ansible_collections.amazon.aws.plugins.inventory.aws_ec import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 + +if not HAS_BOTO3: + pytestmark = pytest.mark.skip("test_aws_ec.py requires the python modules 'boto3' and 'botocore'") + + +def make_clienterror_exception(code="AccessDenied"): + return botocore.exceptions.ClientError( + { + "Error": {"Code": code, "Message": "User is not authorized to perform: xxx on resource: user yyyy"}, + "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, + }, + "getXXX", + ) + + +@pytest.fixture() +def inventory(): + inventory = InventoryModule() + inventory.inventory = MagicMock() + inventory._populate_host_vars = MagicMock() + + inventory.all_clients = MagicMock() + inventory.get_option = MagicMock() + + inventory._set_composite_vars = MagicMock() + inventory._add_host_to_composed_groups = MagicMock() + inventory._add_host_to_keyed_groups = MagicMock() + inventory._read_config_data = MagicMock() + inventory._set_credentials = MagicMock() + + inventory.get_cache_key = MagicMock() + + inventory._cache = {} + return inventory + + +@pytest.fixture() +def connection(): + conn = MagicMock() + return conn + +@pytest.mark.parametrize( + "suffix,result", + [ + ("aws_ec.yml", True), + ("aws_ec.yaml", True), + ("aws_EC.yml", False), + ("AWS_ec.yaml", False), + ], +) + +def test_inventory_verify_file_suffix(inventory, suffix, result, tmp_path): + test_dir = tmp_path / "test_aws_ec" + test_dir.mkdir() + inventory_file = "inventory" + suffix + inventory_file = test_dir / inventory_file + inventory_file.write_text("my inventory") + assert result == inventory.verify_file(str(inventory_file)) + + +def test_inventory_verify_file_with_missing_file(inventory): + inventory_file = "this_file_does_not_exist_aws_ec.yml" + assert not inventory.verify_file(inventory_file) + + +def generate_random_string(with_digits=True, with_punctuation=True, length=16): + data = string.ascii_letters + if with_digits: + data += string.digits + if with_punctuation: + data += string.punctuation + return "".join([random.choice(data) for i in range(length)]) + + +@pytest.mark.parametrize( + "replication_groups, cache_clusters, statuses, expected", + [ + ( + [ + {"replication_group": "rg1", "Status": "Available"}, + {"replication_group": "rg2", "Status": "Creating"}, + ], + [ + {"host": "host1", "CacheClusterStatus": "Available", "Status": "active"}, + {"host": "host2", "CacheClusterStatus": "Creating", "Status": "active"}, + {"host": "host3", "CacheClusterStatus": "Stopped", "Status": "active"}, + {"host": "host4", "CacheClusterStatus": "Configuring", "Status": "active"}, + ], + ["Available"], + [ + {"replication_group": "rg1", "Status": "Available"}, + {"host": "host1", "CacheClusterStatus": "Available", "Status": "active"}, + ], + ), + ( + [ + {"replication_group": "rg1", "Status": "Available"}, + {"replication_group": "rg2", "Status": "Creating"}, + ], + [ + {"host": "host1", "CacheClusterStatus": "Available", "Status": "active"}, + {"host": "host2", "CacheClusterStatus": "Creating", "Status": "active"}, + {"host": "host3", "CacheClusterStatus": "Stopped", "Status": "active"}, + {"host": "host4", "CacheClusterStatus": "Configuring", "Status": "active"}, + ], + ["all"], + [ + {"replication_group": "rg1", "Status": "Available"}, + {"replication_group": "rg2", "Status": "Creating"}, + {"host": "host1", "CacheClusterStatus": "Available", "Status": "active"}, + {"host": "host2", "CacheClusterStatus": "Creating", "Status": "active"}, + {"host": "host3", "CacheClusterStatus": "Stopped", "Status": "active"}, + {"host": "host4", "CacheClusterStatus": "Configuring", "Status": "active"}, + ], + ), + ( + [ + {"replication_group": "rg1", "Status": "Available"}, + {"replication_group": "rg2", "Status": "Creating"}, + ], + [ + {"host": "host1", "CacheClusterStatus": "Available", "Status": "active"}, + {"host": "host2", "CacheClusterStatus": "Creating", "Status": "Available"}, + {"host": "host3", "CacheClusterStatus": "Stopped", "Status": "active"}, + {"host": "host4", "CacheClusterStatus": "Configuring", "Status": "active"}, + ], + ["Available"], + [ + {"replication_group": "rg1", "Status": "Available"}, + {"host": "host1", "CacheClusterStatus": "Available", "Status": "active"}, + {"host": "host2", "CacheClusterStatus": "Creating", "Status": "Available"}, + ], + ), + ], +) +def test_find_ec_clusters_with_valid_statuses(replication_groups, cache_clusters, statuses, expected): + expected == _find_ec_clusters_with_valid_statuses(replication_groups, cache_clusters, statuses) + + + +import pytest +from unittest.mock import MagicMock + +@pytest.mark.parametrize("length", range(0, 10, 2)) +def test_inventory_populate(inventory, length): + cluster_group_name = "cluster_group" + replication_group_name = "replication_group" + + replication_groups = [f"replication_group_{int(i)}" for i in range(length)] + cache_clusters = [f"cache_cluster_{int(i)}" for i in range(length)] + + inventory._add_hosts = MagicMock() + inventory.inventory.add_group = MagicMock() + inventory.inventory.add_child = MagicMock() + + inventory._populate(replication_groups=replication_groups, cache_clusters=cache_clusters) + + if len(replication_groups) == 0 and len(cache_clusters) == 0: + inventory._add_hosts.assert_not_called() + inventory.inventory.add_child.assert_not_called() + else: + if replication_groups: + inventory._add_hosts.assert_any_call(hosts=replication_groups, group=replication_group_name) + inventory.inventory.add_child.assert_any_call("all", replication_group_name) + if cache_clusters: + inventory._add_hosts.assert_any_call(hosts=cache_clusters, group=cluster_group_name) + inventory.inventory.add_child.assert_any_call("all", cluster_group_name) + + +def test_inventory_populate_from_source(inventory): + source_data = { + "_meta": { + "hostvars": { + "host_1_0": {"var10": "value10"}, + "host_2": {"var2": "value2"}, + "host_3": {"var3": ["value30", "value31", "value32"]}, + } + }, + "all": {"hosts": ["host_1_0", "host_1_1", "host_2", "host_3"]}, + "aws_host_1": {"hosts": ["host_1_0", "host_1_1"]}, + "aws_host_2": {"hosts": ["host_2"]}, + "aws_host_3": {"hosts": ["host_3"]}, + } + + inventory._populate_from_source(source_data) + inventory.inventory.add_group.assert_has_calls( + [ + call("aws_host_1"), + call("aws_host_2"), + call("aws_host_3"), + ], + any_order=True, + ) + inventory.inventory.add_child.assert_has_calls( + [ + call("all", "aws_host_1"), + call("all", "aws_host_2"), + call("all", "aws_host_3"), + ], + any_order=True, + ) + + inventory._populate_host_vars.assert_has_calls( + [ + call(["host_1_0"], {"var10": "value10"}, "aws_host_1"), + call(["host_1_1"], {}, "aws_host_1"), + call(["host_2"], {"var2": "value2"}, "aws_host_2"), + call(["host_3"], {"var3": ["value30", "value31", "value32"]}, "aws_host_3"), + ], + any_order=True, + ) + + +@pytest.mark.parametrize("strict", [True, False]) +def test_add_tags_for_ec_clusters_with_no_hosts(connection, strict): + hosts = [] + + _add_tags_for_ec_clusters(connection, hosts, strict) + connection.list_tags_for_resource.assert_not_called() + + +def test_add_tags_for_ec_hosts_with_hosts(connection): + hosts = [ + {'ReplicationGroupId': 'exemple-001', 'ARN': 'ARN_test'}, + ] + + ec_hosts_tags = { + "ARN_test": {"TagList": ["tag1=ARN_test", "phase=units"]}, + } + connection.list_tags_for_resource.side_effect = lambda **kwargs: ec_hosts_tags.get(kwargs.get("ResourceName")) + + _add_tags_for_ec_clusters(connection, hosts, strict=False) + + assert hosts == [ + {'ReplicationGroupId': 'exemple-001', 'ARN': 'ARN_test', 'Tags': ["tag1=ARN_test", "phase=units"]}, + ] + +def test_add_tags_for_ec_clusters_with_failure_not_strict(connection): + hosts = [{"ReplicationGroupId": "cachearn1",'ARN': 'ARN_test'}] + + connection.list_tags_for_resource.side_effect = make_clienterror_exception() + + _add_tags_for_ec_clusters(connection, hosts, strict=False) + + assert hosts == [ + {"ReplicationGroupId": "cachearn1",'ARN': 'ARN_test', "Tags": []}, + ] + + +def test_add_tags_for_ec_clusters_with_failure_strict(connection): + hosts = [{"ReplicationGroupId": "cachearn1",'ARN': 'ARN_test',}] + + connection.list_tags_for_resource.side_effect = make_clienterror_exception() + + with pytest.raises(botocore.exceptions.ClientError): + _add_tags_for_ec_clusters(connection, hosts, strict=True) + + +ADD_TAGS_FOR_EC_HOSTS = "ansible_collections.amazon.aws.plugins.inventory.aws_ec._add_tags_for_ec_clusters" + + +@patch(ADD_TAGS_FOR_EC_HOSTS) +def test_describe_replication_groups(add_tags_for_ec_clusters, connection): + replication_group = { + "ReplicationGroupId": "my_sample_cache", + "CacheClusterStatus": "Stopped", + "ReplicationGroupId": "replication_id_01", + "CacheClusterArn": "arn:xxx:xxxx", + "DeletionProtection": True, + } + + mock_paginator = MagicMock() + mock_paginator.paginate().build_full_result.return_value = {"ReplicationGroups": [replication_group]} + connection.get_paginator.return_value = mock_paginator + + filters = generate_random_string(with_punctuation=False) + strict = False + + result = _describe_replication_groups(connection=connection, strict=strict) + + assert result == [replication_group] + + add_tags_for_ec_clusters.assert_called_with(connection, result, strict) + +@pytest.mark.parametrize("strict", [True, False]) +@patch(ADD_TAGS_FOR_EC_HOSTS) +def test_describe_replication_groups_with_access_denied(add_tags_for_ec_clusters, connection, strict): + paginator = MagicMock() + paginator.paginate.side_effect = make_clienterror_exception() + + connection.get_paginator.return_value = paginator + + filters = generate_random_string(with_punctuation=False) + + if strict: + with pytest.raises(AnsibleError): + _describe_replication_groups(connection=connection, strict=strict) + else: + result = _describe_replication_groups(connection=connection, strict=strict) + assert result == [] + + add_tags_for_ec_clusters.assert_not_called() + + +@patch(ADD_TAGS_FOR_EC_HOSTS) +def test_describe_replication_groups_with_client_error(add_tags_for_ec_clusters, connection): + paginator = MagicMock() + paginator.paginate.side_effect = make_clienterror_exception(code="Unknown") + + connection.get_paginator.return_value = paginator + + filters = generate_random_string(with_punctuation=False) + + with pytest.raises(AnsibleError): + _describe_replication_groups(connection=connection, strict=False) + + add_tags_for_ec_clusters.assert_not_called() + +import pytest +import random +from unittest.mock import MagicMock, patch +from ansible_collections.amazon.aws.plugins.inventory.aws_ec import ( + _describe_replication_groups, + _find_ec_clusters_with_valid_statuses, + AWSInventoryBase, +) + +DESCRIBE_REPLICATION_GROUPS = "ansible_collections.amazon.aws.plugins.inventory.aws_ec._describe_replication_groups" +FIND_EC_CLUSTERS_WITH_VALID_STATUSES = ( + "ansible_collections.amazon.aws.plugins.inventory.aws_ec._find_ec_clusters_with_valid_statuses" +) + +BASE_INVENTORY_PARSE = "ansible_collections.amazon.aws.plugins.inventory.aws_ec.AWSInventoryBase.parse" + + +@pytest.mark.parametrize("include_clusters", [True, False]) +@pytest.mark.parametrize("filter_replication_group_id", [True, False]) +@pytest.mark.parametrize("user_cache_directive", [True, False]) +@pytest.mark.parametrize("cache", [True, False]) +@pytest.mark.parametrize("cache_hit", [True, False]) +@patch(BASE_INVENTORY_PARSE) +def test_inventory_parse_ec( + m_parse, inventory, include_clusters, filter_replication_group_id, user_cache_directive, cache, cache_hit +): + + inventory_data = MagicMock() + loader = MagicMock() + path = generate_random_string(with_punctuation=False, with_digits=False) + + + options = { + "regions": [f"us-east-{d}" for d in range(1, 3)], + "strict_permissions": random.choice((True, False)), + "statuses": ["available", "creating"], + } + inventory.get_option.side_effect = lambda opt: options.get(opt) + + + cache_key = path + generate_random_string() + inventory.get_cache_key.return_value = cache_key + cached_data = generate_random_string() + if cache_hit: + inventory._cache[cache_key] = cached_data + + inventory._populate = MagicMock() + inventory._populate_from_source = MagicMock() + inventory._get_all_replication_groups = MagicMock() + inventory._get_all_cache_clusters = MagicMock() + inventory._format_inventory = MagicMock() + inventory.get_cached_result = MagicMock(return_value=(cache_hit, cached_data if cache_hit else None)) + inventory.update_cached_result = MagicMock() + + mock_replication_groups = [{"host": f"host_{random.randint(1, 1000)}"} for _ in range(4)] + inventory._get_all_replication_groups.return_value = mock_replication_groups + formatted_inventory_rg = f"formatted_inventory_{mock_replication_groups}" + inventory._format_inventory.return_value = formatted_inventory_rg + + mock_cache_clusters = [{"host": f"host_{random.randint(1, 1000)}"} for _ in range(4)] + inventory._get_all_cache_clusters.return_value = mock_cache_clusters + formatted_inventory_cc = f"formatted_inventory_{mock_cache_clusters}" + inventory._format_inventory.return_value = formatted_inventory_cc + + inventory.parse(inventory_data, loader, path, cache) + m_parse.assert_called_with(inventory_data, loader, path, cache=cache) + + if cache_hit: + inventory._get_all_replication_groups.assert_not_called() + inventory._get_all_cache_clusters.assert_not_called() + inventory._populate.assert_not_called() + inventory._format_inventory.assert_not_called() + inventory._populate_from_source.assert_called_with(cached_data) + else: + inventory._get_all_replication_groups.assert_called_with( + regions=options["regions"], + strict=options["strict_permissions"], + statuses=options["statuses"], + ) + inventory._get_all_cache_clusters.assert_called_with( + regions=options["regions"], + strict=options["strict_permissions"], + statuses=options["statuses"], + ) \ No newline at end of file