From 5c4ab705ab78ec9dddbb58f25ebb3c8b32637bf4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=A0t=C4=9Bp=C3=A1n=20Tomsa?= Date: Thu, 11 Jul 2019 13:55:26 +0200 Subject: [PATCH] Fix E501 rule Fixed the E501 Flake8 rule targetting too long lines. In most places, the code has been only reformatted. Sometimes a variable name has been shortened or another code-aware change has been made. --- api/host.py | 17 +- api/metrics.py | 47 +++-- app/config.py | 52 +++--- app/logging.py | 9 +- app/models.py | 15 +- host_dumper.py | 11 +- ...fe3208d_add_system_profile_facts_column.py | 9 +- .../a9f0e674cf52_add_ansible_host_column.py | 4 +- run_gunicorn.py | 10 +- tasks/__init__.py | 8 +- test_api.py | 175 +++++++++++------- test_unit.py | 13 +- test_utils.py | 9 +- 13 files changed, 244 insertions(+), 135 deletions(-) diff --git a/api/host.py b/api/host.py index a4fe22b1d5..3fdac744e8 100644 --- a/api/host.py +++ b/api/host.py @@ -396,7 +396,9 @@ def patch_by_id(host_id_list, host_data): hosts_to_update = query.all() if not hosts_to_update: - logger.debug("Failed to find hosts during patch operation - hosts: %s" % host_id_list) + logger.debug( + "Failed to find hosts during patch operation - hosts: %s" % host_id_list + ) return flask.abort(status.HTTP_404_NOT_FOUND) for host in hosts_to_update: @@ -418,7 +420,10 @@ def replace_facts(host_id_list, namespace, fact_dict): @metrics.api_request_time.time() def merge_facts(host_id_list, namespace, fact_dict): if not fact_dict: - error_msg = "ERROR: Invalid request. Merging empty facts into existing facts is a no-op." + error_msg = ( + "ERROR: Invalid request. " + "Merging empty facts into existing facts is a no-op." + ) logger.debug(error_msg) return error_msg, 400 @@ -436,7 +441,13 @@ def update_facts_by_namespace(operation, host_id_list, namespace, fact_dict): logger.debug("hosts_to_update:%s" % hosts_to_update) if len(hosts_to_update) != len(host_id_list): - error_msg = "ERROR: The number of hosts requested does not match the " "number of hosts found in the host database. This could " " happen if the namespace " "does not exist or the account number associated with the " "call does not match the account number associated with " "one or more the hosts. Rejecting the fact change request." + error_msg = ( + "ERROR: The number of hosts requested does not match the number of hosts " + "found in the host database. This could happen if the namespace does not " + "exist or the account number associated with the call does not match the " + "account number associated with one or more the hosts. Rejecting the fact " + "change request." + ) logger.debug(error_msg) return error_msg, 400 diff --git a/api/metrics.py b/api/metrics.py index 82209d0312..bcda85174e 100644 --- a/api/metrics.py +++ b/api/metrics.py @@ -2,14 +2,21 @@ api_request_time = Summary("inventory_request_processing_seconds", "Time spent processing request") -host_dedup_processing_time = Summary("inventory_dedup_processing_seconds", - "Time spent looking for existing host (dedup logic)") -find_host_using_elevated_ids = Summary("inventory_find_host_using_elevated_ids_processing_seconds", - "Time spent looking for existing host using the elevated ids") -new_host_commit_processing_time = Summary("inventory_new_host_commit_seconds", - "Time spent committing a new host to the database") -update_host_commit_processing_time = Summary("inventory_update_host_commit_seconds", - "Time spent committing a update host to the database") +host_dedup_processing_time = Summary( + "inventory_dedup_processing_seconds", + "Time spent looking for existing host (dedup logic)") +find_host_using_elevated_ids = Summary( + "inventory_find_host_using_elevated_ids_processing_seconds", + "Time spent looking for existing host using the elevated ids" +) +new_host_commit_processing_time = Summary( + "inventory_new_host_commit_seconds", + "Time spent committing a new host to the database" +) +update_host_commit_processing_time = Summary( + "inventory_update_host_commit_seconds", + "Time spent committing a update host to the database" +) api_request_count = Counter("inventory_request_count", "The total amount of API requests") create_host_count = Counter("inventory_create_host_count", @@ -22,11 +29,19 @@ "Time spent deleting hosts from the database") login_failure_count = Counter("inventory_login_failure_count", "The total amount of failed login attempts") -system_profile_deserialization_time = Summary("inventory_system_profile_deserialization_time", - "Time spent deserializing system profile documents") -system_profile_commit_processing_time = Summary("inventory_system_profile_commit_processing_time", - "Time spent committing an update to a system profile to the database") -system_profile_commit_count = Counter("inventory_system_profile_commit_count", - "Count of successful system profile commits to the database") -system_profile_failure_count = Counter("inventory_system_profile_failure_count", - "Count of failures to commit the system profile to the database") +system_profile_deserialization_time = Summary( + "inventory_system_profile_deserialization_time", + "Time spent deserializing system profile documents" +) +system_profile_commit_processing_time = Summary( + "inventory_system_profile_commit_processing_time", + "Time spent committing an update to a system profile to the database" +) +system_profile_commit_count = Counter( + "inventory_system_profile_commit_count", + "Count of successful system profile commits to the database" +) +system_profile_failure_count = Counter( + "inventory_system_profile_failure_count", + "Count of failures to commit the system profile to the database" +) diff --git a/app/config.py b/app/config.py index 19e769ed3c..735ee391e1 100644 --- a/app/config.py +++ b/app/config.py @@ -1,4 +1,4 @@ -import os +from os import getenv from app.common import get_build_version from app.logging import get_logger @@ -10,34 +10,36 @@ class Config: def __init__(self): self.logger = get_logger(__name__) - self._db_user = os.getenv("INVENTORY_DB_USER", "insights") - self._db_password = os.getenv("INVENTORY_DB_PASS", "insights") - self._db_host = os.getenv("INVENTORY_DB_HOST", "localhost") - self._db_name = os.getenv("INVENTORY_DB_NAME", "insights") - self._db_ssl_mode = os.getenv("INVENTORY_DB_SSL_MODE", "") - self._db_ssl_cert = os.getenv("INVENTORY_DB_SSL_CERT", "") + self._db_user = getenv("INVENTORY_DB_USER", "insights") + self._db_password = getenv("INVENTORY_DB_PASS", "insights") + self._db_host = getenv("INVENTORY_DB_HOST", "localhost") + self._db_name = getenv("INVENTORY_DB_NAME", "insights") + self._db_ssl_mode = getenv("INVENTORY_DB_SSL_MODE", "") + self._db_ssl_cert = getenv("INVENTORY_DB_SSL_CERT", "") - self.db_pool_timeout = int(os.getenv("INVENTORY_DB_POOL_TIMEOUT", "5")) - self.db_pool_size = int(os.getenv("INVENTORY_DB_POOL_SIZE", "5")) + self.db_pool_timeout = int(getenv("INVENTORY_DB_POOL_TIMEOUT", "5")) + self.db_pool_size = int(getenv("INVENTORY_DB_POOL_SIZE", "5")) self.db_uri = self._build_db_uri(self._db_ssl_mode) self.base_url_path = self._build_base_url_path() self.api_url_path_prefix = self._build_api_path() - self.legacy_api_url_path_prefix = os.getenv("INVENTORY_LEGACY_API_URL", "") - self.mgmt_url_path_prefix = os.getenv("INVENTORY_MANAGEMENT_URL_PATH_PREFIX", "/") + self.legacy_api_url_path_prefix = getenv("INVENTORY_LEGACY_API_URL", "") + self.mgmt_url_path_prefix = getenv("INVENTORY_MANAGEMENT_URL_PATH_PREFIX", "/") self.api_urls = [self.api_url_path_prefix, self.legacy_api_url_path_prefix] - self.system_profile_topic = os.environ.get("KAFKA_TOPIC", "platform.system-profile") - self.consumer_group = os.environ.get("KAFKA_GROUP", "inventory") - self.bootstrap_servers = os.environ.get("KAFKA_BOOTSTRAP_SERVERS", "kafka:29092") - self.event_topic = os.environ.get("KAFKA_EVENT_TOPIC", "platform.inventory.events") - self.kafka_enabled = all(map(os.environ.get, ["KAFKA_TOPIC", "KAFKA_GROUP", "KAFKA_BOOTSTRAP_SERVERS"])) + self.system_profile_topic = getenv("KAFKA_TOPIC", "platform.system-profile") + self.consumer_group = getenv("KAFKA_GROUP", "inventory") + self.bootstrap_servers = getenv("KAFKA_BOOTSTRAP_SERVERS", "kafka:29092") + self.event_topic = getenv("KAFKA_EVENT_TOPIC", "platform.inventory.events") + self.kafka_enabled = all( + map(getenv, ["KAFKA_TOPIC", "KAFKA_GROUP", "KAFKA_BOOTSTRAP_SERVERS"]) + ) def _build_base_url_path(self): - app_name = os.getenv("APP_NAME", "inventory") - path_prefix = os.getenv("PATH_PREFIX", "api") + app_name = getenv("APP_NAME", "inventory") + path_prefix = getenv("PATH_PREFIX", "api") base_url_path = f"/{path_prefix}/{app_name}" return base_url_path @@ -65,11 +67,19 @@ def log_configuration(self, config_name): self.logger.info("Insights Host Inventory Configuration:") self.logger.info("Build Version: %s" % get_build_version()) self.logger.info("API URL Path: %s" % self.api_url_path_prefix) - self.logger.info("Management URL Path Prefix: %s" % self.mgmt_url_path_prefix) + self.logger.info( + "Management URL Path Prefix: %s" % self.mgmt_url_path_prefix + ) self.logger.info("DB Host: %s" % self._db_host) self.logger.info("DB Name: %s" % self._db_name) - self.logger.info("DB Connection URI: %s" % self._build_db_uri(self._db_ssl_mode, hide_password=True)) + self.logger.info( + "DB Connection URI: %s" % self._build_db_uri( + self._db_ssl_mode, hide_password=True + ) + ) if self._db_ssl_mode == self.SSL_VERIFY_FULL: self.logger.info("Using SSL for DB connection:") - self.logger.info("Postgresql SSL verification type: %s" % self._db_ssl_mode) + self.logger.info( + "Postgresql SSL verification type: %s" % self._db_ssl_mode + ) self.logger.info("Path to certificate: %s" % self._db_ssl_cert) diff --git a/app/logging.py b/app/logging.py index 84f8f60359..c42e0339a3 100644 --- a/app/logging.py +++ b/app/logging.py @@ -8,7 +8,7 @@ from boto3.session import Session from gunicorn import glogging -OPENSHIFT_ENVIRONMENT_NAME_FILE = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" +OPENSHIFT_ENV_NAME_FILE = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" DEFAULT_AWS_LOGGING_NAMESPACE = "inventory-dev" LOGGER_PREFIX = "inventory." @@ -42,11 +42,14 @@ def _configure_watchtower_logging_handler(): aws_secret_access_key = os.getenv("AWS_SECRET_ACCESS_KEY", None) aws_region_name = os.getenv("AWS_REGION_NAME", None) log_group = os.getenv("AWS_LOG_GROUP", "platform") - stream_name = _get_aws_logging_stream_name(OPENSHIFT_ENVIRONMENT_NAME_FILE) + stream_name = _get_aws_logging_stream_name(OPENSHIFT_ENV_NAME_FILE) if all([aws_access_key_id, aws_secret_access_key, aws_region_name, stream_name]): - print(f"Configuring watchtower logging (log_group={log_group}, stream_name={stream_name})") + print( + "Configuring watchtower logging " + f"(log_group={log_group}, stream_name={stream_name})" + ) boto3_session = Session(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=aws_region_name) diff --git a/app/models.py b/app/models.py index 94404fc00d..8b1726d84b 100644 --- a/app/models.py +++ b/app/models.py @@ -31,12 +31,15 @@ class Host(db.Model): __tablename__ = "hosts" # These Index entries are essentially place holders so that the # alembic autogenerate functionality does not try to remove the indexes - __table_args__ = (Index("idxinsightsid", text("(canonical_facts ->> 'insights_id')")), - Index("idxgincanonicalfacts", "canonical_facts"), - Index("idxaccount", "account"), - Index("hosts_subscription_manager_id_index", - text("(canonical_facts ->> 'subscription_manager_id')")), - ) + __table_args__ = ( + Index("idxinsightsid", text("(canonical_facts ->> 'insights_id')")), + Index("idxgincanonicalfacts", "canonical_facts"), + Index("idxaccount", "account"), + Index( + "hosts_subscription_manager_id_index", + text("(canonical_facts ->> 'subscription_manager_id')") + ), + ) id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4) account = db.Column(db.String(10)) diff --git a/host_dumper.py b/host_dumper.py index 951c064d35..4043f059ce 100644 --- a/host_dumper.py +++ b/host_dumper.py @@ -9,9 +9,10 @@ application = create_app("cli") -parser = argparse.ArgumentParser(description="Util that dumps a host from the hosts table." - " The db configuration is read from the environment. This util is expected to be" - " used within the image/pod") +parser = argparse.ArgumentParser( + description="Util that dumps a host from the hosts table. The db configuration is " + "read from the environment. This util is expected to be used within the image/pod" +) group = parser.add_mutually_exclusive_group(required=True) group.add_argument("--id", help="search for a host using id") @@ -44,8 +45,8 @@ elif args.insights_id: print("looking up host using insights_id") query_results = Host.query.filter( - Host.canonical_facts.comparator.contains({'insights_id':args.insights_id}) - ).all() + Host.canonical_facts.comparator.contains({"insights_id":args.insights_id}) + ).all() elif args.account_number: query_results = Host.query.filter( Host.account == args.account_number diff --git a/migrations/versions/2d1cdfe3208d_add_system_profile_facts_column.py b/migrations/versions/2d1cdfe3208d_add_system_profile_facts_column.py index 54881aae00..d5721a2e3f 100644 --- a/migrations/versions/2d1cdfe3208d_add_system_profile_facts_column.py +++ b/migrations/versions/2d1cdfe3208d_add_system_profile_facts_column.py @@ -18,7 +18,14 @@ def upgrade(): # ### commands auto generated by Alembic - please adjust! ### - op.add_column('hosts', sa.Column('system_profile_facts', postgresql.JSONB(astext_type=sa.Text()), nullable=True)) + op.add_column( + 'hosts', + sa.Column( + 'system_profile_facts', + postgresql.JSONB(astext_type=sa.Text()), + nullable=True + ) + ) # ### end Alembic commands ### diff --git a/migrations/versions/a9f0e674cf52_add_ansible_host_column.py b/migrations/versions/a9f0e674cf52_add_ansible_host_column.py index 4493497558..707f36c369 100644 --- a/migrations/versions/a9f0e674cf52_add_ansible_host_column.py +++ b/migrations/versions/a9f0e674cf52_add_ansible_host_column.py @@ -18,7 +18,9 @@ def upgrade(): # ### commands auto generated by Alembic - please adjust! ### - op.add_column('hosts', sa.Column('ansible_host', sa.String(length=255), nullable=True)) + op.add_column( + 'hosts', sa.Column('ansible_host', sa.String(length=255), nullable=True) + ) # ### end Alembic commands ### diff --git a/run_gunicorn.py b/run_gunicorn.py index 62b65d8b67..36e4ad293a 100755 --- a/run_gunicorn.py +++ b/run_gunicorn.py @@ -30,7 +30,15 @@ def run_server(): variables. """ bind = f"0.0.0.0:{LISTEN_PORT}" - run(("gunicorn", "--log-config=logconfig.ini", "--log-level=debug", f"--bind={bind}", "run")) + run( + ( + "gunicorn", + "--log-config=logconfig.ini", + "--log-level=debug", + f"--bind={bind}", + "run", + ) + ) if __name__ == "__main__": diff --git a/tasks/__init__.py b/tasks/__init__.py index 7dd274530b..a5fc72131f 100644 --- a/tasks/__init__.py +++ b/tasks/__init__.py @@ -56,13 +56,17 @@ def msg_handler(parsed): if host is None: logger.error("Host with id [%s] not found!", id_) return - logger.info("Processing message id=%s request_id=%s", parsed["id"], parsed["request_id"]) + logger.info( + "Processing message id=%s request_id=%s", parsed["id"], parsed["request_id"] + ) profile = SystemProfileSchema(strict=True).load(parsed["system_profile"]).data host._update_system_profile(profile) db.session.commit() -def _init_system_profile_consumer(config, flask_app, handler=msg_handler, consumer=None): +def _init_system_profile_consumer( + config, flask_app, handler=msg_handler, consumer=None +): if not config.kafka_enabled: logger.info("System profile consumer has been disabled") diff --git a/test_api.py b/test_api.py index 981007ce46..ab5b431ee1 100755 --- a/test_api.py +++ b/test_api.py @@ -276,7 +276,9 @@ def test_create_and_update(self): host_data, expected_id=original_id) - def test_create_host_update_with_same_insights_id_and_different_canonical_facts(self): + def test_create_host_update_with_same_insights_id_and_different_canonical_facts( + self + ): original_insights_id = generate_uuid() host_data = HostWrapper(test_data(facts=None)) @@ -862,55 +864,66 @@ def _test_get_page(page, expected_count=1): class CreateHostsWithSystemProfileTestCase(DBAPITestCase, PaginationTestCase): def _valid_system_profile(self): - return {"number_of_cpus": 1, - "number_of_sockets": 2, - "cores_per_socket": 4, - "system_memory_bytes": 1024, - "infrastructure_type": "massive cpu", - "infrastructure_vendor": "dell", - "network_interfaces": [{"ipv4_addresses": ["10.10.10.1"], - "state": "UP", - "ipv6_addresses": ["2001:0db8:85a3:0000:0000:8a2e:0370:7334",], - "mtu": 1500, - "mac_address": "aa:bb:cc:dd:ee:ff", - "type": "loopback", - "name": "eth0", }], - "disk_devices": [{"device": "/dev/sdb1", - "label": "home drive", - "options": {"uid": "0", - "ro": True}, - "mount_point": "/home", - "type": "ext3"}], - "bios_vendor": "AMI", - "bios_version": "1.0.0uhoh", - "bios_release_date": "10/31/2013", - "cpu_flags": ["flag1", "flag2"], - "os_release": "Red Hat EL 7.0.1", - "os_kernel_version": "Linux 2.0.1", - "arch": "x86-64", - "last_boot_time": "12:25 Mar 19, 2019", - "kernel_modules": ["i915", "e1000e"], - "running_processes": ["vim", "gcc", "python"], - "subscription_status": "valid", - "subscription_auto_attach": "yes", - "katello_agent_running": False, - "satellite_managed": False, - "cloud_provider": "Maclean's Music", - "yum_repos": [{"name": "repo1", "gpgcheck": True, - "enabled": True, - "base_url": "http://rpms.redhat.com"}], - "installed_products": [{"name": "eap", - "id": "123", - "status": "UP"}, - {"name": "jbws", - "id": "321", - "status": "DOWN"}, ], - "insights_client_version": "12.0.12", - "insights_egg_version": "120.0.1", - "installed_packages": ["rpm1", "rpm2"], - "installed_services": ["ndb", "krb5"], - "enabled_services": ["ndb", "krb5"], + return { + "number_of_cpus": 1, + "number_of_sockets": 2, + "cores_per_socket": 4, + "system_memory_bytes": 1024, + "infrastructure_type": "massive cpu", + "infrastructure_vendor": "dell", + "network_interfaces": [ + { + "ipv4_addresses": ["10.10.10.1"], + "state": "UP", + "ipv6_addresses": ["2001:0db8:85a3:0000:0000:8a2e:0370:7334"], + "mtu": 1500, + "mac_address": "aa:bb:cc:dd:ee:ff", + "type": "loopback", + "name": "eth0", + } + ], + "disk_devices": [ + { + "device": "/dev/sdb1", + "label": "home drive", + "options": {"uid": "0", "ro": True}, + "mount_point": "/home", + "type": "ext3", + } + ], + "bios_vendor": "AMI", + "bios_version": "1.0.0uhoh", + "bios_release_date": "10/31/2013", + "cpu_flags": ["flag1", "flag2"], + "os_release": "Red Hat EL 7.0.1", + "os_kernel_version": "Linux 2.0.1", + "arch": "x86-64", + "last_boot_time": "12:25 Mar 19, 2019", + "kernel_modules": ["i915", "e1000e"], + "running_processes": ["vim", "gcc", "python"], + "subscription_status": "valid", + "subscription_auto_attach": "yes", + "katello_agent_running": False, + "satellite_managed": False, + "cloud_provider": "Maclean's Music", + "yum_repos": [ + { + "name": "repo1", + "gpgcheck": True, + "enabled": True, + "base_url": "http://rpms.redhat.com", } + ], + "installed_products": [ + {"name": "eap", "id": "123", "status": "UP"}, + {"name": "jbws", "id": "321", "status": "DOWN"}, + ], + "insights_client_version": "12.0.12", + "insights_egg_version": "120.0.1", + "installed_packages": ["rpm1", "rpm2"], + "installed_services": ["ndb", "krb5"], + "enabled_services": ["ndb", "krb5"], + } def test_create_host_with_system_profile(self): facts = None @@ -933,7 +946,9 @@ def test_create_host_with_system_profile(self): # verify system_profile is not included self.assertNotIn("system_profile", created_host) - host_lookup_results = self.get("%s/%s/system_profile" % (HOST_URL, original_id), 200) + host_lookup_results = self.get( + "%s/%s/system_profile" % (HOST_URL, original_id), 200 + ) actual_host = host_lookup_results["results"][0] self.assertEqual(original_id, actual_host["id"]) @@ -990,7 +1005,9 @@ def test_create_host_without_system_profile_then_update_with_system_profile(self with self.app.app_context(): msg_handler(mq_message) - host_lookup_results = self.get("%s/%s/system_profile" % (HOST_URL, original_id), 200) + host_lookup_results = self.get( + "%s/%s/system_profile" % (HOST_URL, original_id), 200 + ) actual_host = host_lookup_results["results"][0] self.assertEqual(original_id, actual_host["id"]) @@ -1042,11 +1059,12 @@ def test_create_host_with_system_profile_with_different_yum_urls(self): host = test_data(display_name="host1", facts=facts) - yum_urls = ["file:///cdrom/", - "http://foo.com http://foo.com", - "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch", - "https://codecs.fedoraproject.org/openh264/$releasever/$basearch/debug/", - ] + yum_urls = [ + "file:///cdrom/", + "http://foo.com http://foo.com", + "file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-$releasever-$basearch", + "https://codecs.fedoraproject.org/openh264/$releasever/$basearch/debug/", + ] for yum_url in yum_urls: with self.subTest(yum_url=yum_url): @@ -1066,7 +1084,9 @@ def test_create_host_with_system_profile_with_different_yum_urls(self): original_id = created_host["id"] # Verify that the system profile data is saved - host_lookup_results = self.get("%s/%s/system_profile" % (HOST_URL, original_id), 200) + host_lookup_results = self.get( + "%s/%s/system_profile" % (HOST_URL, original_id), 200 + ) actual_host = host_lookup_results["results"][0] self.assertEqual(original_id, actual_host["id"]) @@ -1095,7 +1115,9 @@ def test_create_host_with_system_profile_with_different_cloud_providers(self): original_id = created_host["id"] # Verify that the system profile data is saved - host_lookup_results = self.get("%s/%s/system_profile" % (HOST_URL, original_id), 200) + host_lookup_results = self.get( + "%s/%s/system_profile" % (HOST_URL, original_id), 200 + ) actual_host = host_lookup_results["results"][0] self.assertEqual(original_id, actual_host["id"]) @@ -1120,7 +1142,9 @@ def test_get_system_profile_of_host_that_does_not_have_system_profile(self): original_id = created_host["id"] - host_lookup_results = self.get("%s/%s/system_profile" % (HOST_URL, original_id), 200) + host_lookup_results = self.get( + "%s/%s/system_profile" % (HOST_URL, original_id), 200 + ) actual_host = host_lookup_results["results"][0] self.assertEqual(original_id, actual_host["id"]) @@ -1190,8 +1214,8 @@ def setUp(self): def create_hosts(self): hosts_to_create = [ ("host1", generate_uuid(), "host1.domain.test"), - ("host2", generate_uuid(), "host1.domain.test"), # the same fqdn is intentional - ("host3", generate_uuid(), "host2.domain.test"), # the same display_name is intentional + ("host2", generate_uuid(), "host1.domain.test"), # intentional same fqdn + ("host3", generate_uuid(), "host2.domain.test"), # dtto same display_name ] host_list = [] @@ -1375,10 +1399,13 @@ def test_query_using_display_name(self): response = self.get(HOST_URL + "?display_name=" + host_list[0].display_name) - self.assertEqual(len(response["results"]), 1) - self.assertEqual(response["results"][0]["fqdn"], host_list[0].fqdn) - self.assertEqual(response["results"][0]["insights_id"], host_list[0].insights_id) - self.assertEqual(response["results"][0]["display_name"], host_list[0].display_name) + results = response["results"] + self.assertEqual(len(results), 1) + + result = results[0] + self.assertEqual(result["fqdn"], host_list[0].fqdn) + self.assertEqual(result["insights_id"], host_list[0].insights_id) + self.assertEqual(result["display_name"], host_list[0].display_name) def test_query_using_fqdn_two_results(self): expected_host_list = [self.added_hosts[0], self.added_hosts[1]] @@ -1388,8 +1415,13 @@ def test_query_using_fqdn_two_results(self): self.assertEqual(len(response["results"]), 2) for result in response["results"]: self.assertEqual(result["fqdn"], expected_host_list[0].fqdn) - assert any(result["insights_id"] == host.insights_id for host in expected_host_list) - assert any(result["display_name"] == host.display_name for host in expected_host_list) + assert any( + result["insights_id"] == host.insights_id for host in expected_host_list + ) + assert any( + result["display_name"] == host.display_name + for host in expected_host_list + ) def test_query_using_fqdn_one_result(self): expected_host_list = [self.added_hosts[2]] @@ -1399,8 +1431,13 @@ def test_query_using_fqdn_one_result(self): self.assertEqual(len(response["results"]), 1) for result in response["results"]: self.assertEqual(result["fqdn"], expected_host_list[0].fqdn) - assert any(result["insights_id"] == host.insights_id for host in expected_host_list) - assert any(result["display_name"] == host.display_name for host in expected_host_list) + assert any( + result["insights_id"] == host.insights_id for host in expected_host_list + ) + assert any( + result["display_name"] == host.display_name + for host in expected_host_list + ) def test_query_using_non_existant_fqdn(self): host_list = self.added_hosts diff --git a/test_unit.py b/test_unit.py index ec75a22d44..09f66213b7 100755 --- a/test_unit.py +++ b/test_unit.py @@ -205,7 +205,10 @@ def test_configuration_with_env_vars(self): conf = Config() - self.assertEqual(conf.db_uri, "postgresql://fredflintstone:bedrock1234@localhost/SlateRockAndGravel") + self.assertEqual( + conf.db_uri, + "postgresql://fredflintstone:bedrock1234@localhost/SlateRockAndGravel" + ) self.assertEqual(conf.db_pool_timeout, 3) self.assertEqual(conf.db_pool_size, 8) self.assertEqual(conf.api_url_path_prefix, expected_api_path) @@ -220,7 +223,9 @@ def test_config_default_settings(self): conf = Config() - self.assertEqual(conf.db_uri, "postgresql://insights:insights@localhost/insights") + self.assertEqual( + conf.db_uri, "postgresql://insights:insights@localhost/insights" + ) self.assertEqual(conf.api_url_path_prefix, expected_api_path) self.assertEqual(conf.mgmt_url_path_prefix, expected_mgmt_url_path_prefix) self.assertEqual(conf.db_pool_timeout, 5) @@ -283,7 +288,9 @@ def test_order_by_updated_desc(self, modified_on, order_how): order_how.assert_called_once_with(modified_on, "DESC") @patch("api.host.Host.display_name") - def test_default_for_display_name_is_asc(self, display_name, modified_on, order_how): + def test_default_for_display_name_is_asc( + self, display_name, modified_on, order_how + ): actual = _params_to_order_by("display_name",) expected = (display_name.asc.return_value, modified_on.desc.return_value) self.assertEqual(actual, expected) diff --git a/test_utils.py b/test_utils.py index 964f36460c..149c721519 100644 --- a/test_utils.py +++ b/test_utils.py @@ -24,10 +24,11 @@ def rename_host_table_and_indexes(): to make dropping the table at the end of the tests a bit safer. """ temp_table_name_suffix = "__unit_tests__" - if temp_table_name_suffix not in Host.__table__.name: - Host.__table__.name = Host.__table__.name + temp_table_name_suffix - if temp_table_name_suffix not in Host.__table__.fullname: - Host.__table__.fullname = Host.__table__.fullname + temp_table_name_suffix + table = Host.__table__ + if temp_table_name_suffix not in table.name: + table.name = table.name + temp_table_name_suffix + if temp_table_name_suffix not in table.fullname: + table.fullname = table.fullname + temp_table_name_suffix # Adjust the names of the indices for index in Host.__table_args__: