From 3a6a5003aed9e082e709999f1bac2c766f6951a4 Mon Sep 17 00:00:00 2001 From: ronsh12 <101520407+ronsh12@users.noreply.github.com> Date: Tue, 16 Jan 2024 19:09:30 +0200 Subject: [PATCH] feat: Added queries for bigquery pci_dss (#325) --- .../aws_compliance__pci_dss_v3_2_1_free.sql | 16 +-- .../models/aws_compliance__pci_dss_v3_2_1.sql | 98 +++++++++---------- .../autoscaling_groups_elb_check.sql | 14 +++ .../codebuild/check_environment_variables.sql | 22 ++++- .../check_oauth_usage_for_sources.sql | 16 ++- .../config/config_enabled_all_regions.sql | 20 +++- .../aws/macros/dms/replication_not_public.sql | 16 ++- .../ec2/ebs_snapshot_permissions_check.sql | 29 +++++- .../aws/macros/ec2/get_unused_public_ips.sql | 16 +++ ...elasticsearch_domains_should_be_in_vpc.sql | 16 ++- ...should_have_encryption_at_rest_enabled.sql | 16 ++- .../elbv2/elbv2_redirect_http_to_https.sql | 18 +++- .../aws/macros/guardduty/detector_enabled.sql | 36 +++++++ transformations/aws/macros/iam/no_star.sql | 34 +++++++ .../aws/macros/iam/password_policy_strong.sql | 22 ++++- .../macros/lambda/lambda_function_in_vpc.sql | 15 ++- ...lambda_function_prohibit_public_access.sql | 24 ++++- ...napshots_should_prohibit_public_access.sql | 17 +++- .../redshift/cluster_publicly_accessible.sql | 2 +- .../s3/account_level_public_access_blocks.sql | 22 ++++- .../aws/macros/s3/deny_http_requests.sql | 34 ++++++- .../macros/s3/publicly_readable_buckets.sql | 64 +++++++++++- .../macros/s3/publicly_writable_buckets.sql | 64 +++++++++++- .../macros/s3/s3_cross_region_replication.sql | 15 +++ .../s3/s3_server_side_encryption_enabled.sql | 18 +++- ...stance_direct_internet_access_disabled.sql | 14 ++- .../remove_unused_secrets_manager_secrets.sql | 15 ++- ...ic_rotation_should_rotate_successfully.sql | 15 ++- ...ated_within_a_specified_number_of_days.sql | 15 ++- ...should_have_automatic_rotation_enabled.sql | 14 ++- ...ec2_instances_should_be_managed_by_ssm.sql | 16 ++- ...ciation_compliance_status_of_compliant.sql | 25 ++++- ...e_patch_compliance_status_of_compliant.sql | 29 +++++- ...afv2_web_acl_logging_should_be_enabled.sql | 33 ++++++- 34 files changed, 753 insertions(+), 87 deletions(-) diff --git a/transformations/aws/compliance-free/models/aws_compliance__pci_dss_v3_2_1_free.sql b/transformations/aws/compliance-free/models/aws_compliance__pci_dss_v3_2_1_free.sql index a11bffb3b..1431f1412 100644 --- a/transformations/aws/compliance-free/models/aws_compliance__pci_dss_v3_2_1_free.sql +++ b/transformations/aws/compliance-free/models/aws_compliance__pci_dss_v3_2_1_free.sql @@ -3,22 +3,22 @@ with aggregated as ( ({{ autoscaling_groups_elb_check('pci_dss_v3.2.1','autoscaling.1') }}) - UNION + {{ union() }} ({{ logs_encrypted('pci_dss_v3.2.1','cloudtrail.1') }}) - UNION + {{ union() }} ({{ cloudtrail_enabled_all_regions('pci_dss_v3.2.1','cloudtrail.2') }}) - UNION + {{ union() }} ({{ log_file_validation_enabled('pci_dss_v3.2.1','cloudtrail.3') }}) - UNION + {{ union() }} ({{ integrated_with_cloudwatch_logs('pci_dss_v3.2.1','cloudtrail.4') }}) - UNION + {{ union() }} ({{ check_oauth_usage_for_sources('pci_dss_v3.2.1','codebuild.1') }}) - UNION + {{ union() }} ({{ check_environment_variables('pci_dss_v3.2.1','codebuild.2') }}) - UNION + {{ union() }} ({{ config_enabled_all_regions('pci_dss_v3.2.1','config.1') }}) ) select - ('{{ run_started_at }}')::timestamp as policy_execution_time, + {{ gen_timestamp() }}, aggregated.* from aggregated diff --git a/transformations/aws/compliance-premium/models/aws_compliance__pci_dss_v3_2_1.sql b/transformations/aws/compliance-premium/models/aws_compliance__pci_dss_v3_2_1.sql index 63a50ad43..5cbadcae2 100644 --- a/transformations/aws/compliance-premium/models/aws_compliance__pci_dss_v3_2_1.sql +++ b/transformations/aws/compliance-premium/models/aws_compliance__pci_dss_v3_2_1.sql @@ -1,105 +1,103 @@ -{{ config(enabled=block_bigquery()) }} - with aggregated as ( ({{ autoscaling_groups_elb_check('pci_dss_v3.2.1','autoscaling.1') }}) - UNION + {{ union() }} ({{ logs_encrypted('pci_dss_v3.2.1','cloudtrail.1') }}) - UNION + {{ union() }} ({{ cloudtrail_enabled_all_regions('pci_dss_v3.2.1','cloudtrail.2') }}) - UNION + {{ union() }} ({{ log_file_validation_enabled('pci_dss_v3.2.1','cloudtrail.3') }}) - UNION + {{ union() }} ({{ integrated_with_cloudwatch_logs('pci_dss_v3.2.1','cloudtrail.4') }}) - UNION + {{ union() }} ({{ check_oauth_usage_for_sources('pci_dss_v3.2.1','codebuild.1') }}) - UNION + {{ union() }} ({{ check_environment_variables('pci_dss_v3.2.1','codebuild.2') }}) - UNION + {{ union() }} ({{ config_enabled_all_regions('pci_dss_v3.2.1','config.1') }}) - UNION + {{ union() }} ({{ alarm_root_account('pci_dss_v3.2.1','cloudwatch.1') }}) - UNION + {{ union() }} ({{ replication_not_public('pci_dss_v3.2.1','dms.1') }}) - UNION + {{ union() }} ({{ ebs_snapshot_permissions_check('pci_dss_v3.2.1','ec2.1') }}) - UNION + {{ union() }} ({{ default_sg_no_access('pci_dss_v3.2.1','ec2.2') }}) - UNION + {{ union() }} ({{ get_unused_public_ips('pci_dss_v3.2.1','ec2.4') }}) - UNION + {{ union() }} ({{ no_broad_public_ingress_on_port_22('pci_dss_v3.2.1','ec2.5') }}) - UNION + {{ union() }} ({{ flow_logs_enabled_in_all_vpcs('pci_dss_v3.2.1','ec2.6') }}) - UNION + {{ union() }} ({{ elbv2_redirect_http_to_https('pci_dss_v3.2.1','elbv2.1') }}) - UNION + {{ union() }} ({{ elasticsearch_domains_should_be_in_vpc('pci_dss_v3.2.1','elasticsearch.1') }}) - UNION + {{ union() }} ({{ elasticsearch_domains_should_have_encryption_at_rest_enabled('pci_dss_v3.2.1','elasticsearch.2') }}) - UNION + {{ union() }} ({{ detector_enabled('pci_dss_v3.2.1','guardduty enabled in all enabled regions') }}) - UNION + {{ union() }} ({{ root_user_no_access_keys('pci_dss_v3.2.1','iam.1') }}) - UNION + {{ union() }} ({{ policies_attached_to_groups_roles('pci_dss_v3.2.1','iam.2') }}) - UNION + {{ union() }} ({{ no_star('pci_dss_v3.2.1','iam.3') }}) - UNION + {{ union() }} ({{ hardware_mfa_enabled_for_root('pci_dss_v3.2.1','iam.4') }}) - UNION + {{ union() }} ({{ mfa_enabled_for_root('pci_dss_v3.2.1','iam.5') }}) - UNION + {{ union() }} ({{ mfa_enabled_for_console_access('pci_dss_v3.2.1','iam.6') }}) - UNION + {{ union() }} ({{ unused_creds_disabled('pci_dss_v3.2.1','iam.7') }}) - UNION + {{ union() }} ({{ password_policy_strong('pci_dss_v3.2.1','iam.8') }}) - UNION + {{ union() }} ({{ rotation_enabled_for_customer_key('pci_dss_v3.2.1','kms.1') }}) - UNION + {{ union() }} ({{ lambda_function_prohibit_public_access('pci_dss_v3.2.1','lambda.1') }}) - UNION + {{ union() }} ({{ lambda_function_in_vpc('pci_dss_v3.2.1','lambda.2') }}) - UNION + {{ union() }} ({{ snapshots_should_prohibit_public_access('pci_dss_v3.2.1','rds.1') }}) - UNION + {{ union() }} ({{ rds_db_instances_should_prohibit_public_access('pci_dss_v3.2.1','rds.2') }}) - UNION + {{ union() }} ({{ cluster_publicly_accessible('pci_dss_v3.2.1','redshift.1') }}) - UNION + {{ union() }} ({{ publicly_writable_buckets('pci_dss_v3.2.1','s3.1') }}) - UNION + {{ union() }} ({{ publicly_readable_buckets('pci_dss_v3.2.1','s3.2') }}) - UNION + {{ union() }} ({{ s3_cross_region_replication('pci_dss_v3.2.1','s3.3') }}) - UNION + {{ union() }} ({{ s3_server_side_encryption_enabled('pci_dss_v3.2.1','s3.4') }}) - UNION + {{ union() }} ({{ deny_http_requests('pci_dss_v3.2.1','s3.5') }}) - UNION + {{ union() }} ({{ account_level_public_access_blocks('pci_dss_v3.2.1','s3.6') }}) - UNION + {{ union() }} ({{ sagemaker_notebook_instance_direct_internet_access_disabled('pci_dss_v3.2.1','sagemaker.1') }}) - UNION + {{ union() }} ({{ secrets_should_have_automatic_rotation_enabled('pci_dss_v3.2.1','secretmanager.1') }}) - UNION + {{ union() }} ({{ secrets_configured_with_automatic_rotation_should_rotate_successfully('pci_dss_v3.2.1','secretmanager.2') }}) - UNION + {{ union() }} ({{ remove_unused_secrets_manager_secrets('pci_dss_v3.2.1','secretmanager.3') }}) - UNION + {{ union() }} ({{ secrets_should_be_rotated_within_a_specified_number_of_days('pci_dss_v3.2.1','secretmanager.4') }}) - UNION + {{ union() }} ({{ instances_should_have_patch_compliance_status_of_compliant('pci_dss_v3.2.1','ssm.1') }}) - UNION + {{ union() }} ({{ instances_should_have_association_compliance_status_of_compliant('pci_dss_v3.2.1','ssm.2') }}) - UNION + {{ union() }} ({{ ec2_instances_should_be_managed_by_ssm('pci_dss_v3.2.1','ssm.3') }}) - UNION + {{ union() }} ({{ wafv2_web_acl_logging_should_be_enabled('pci_dss_v3.2.1','waf.1') }}) ) select - ('{{ run_started_at }}')::timestamp as policy_execution_time, + {{ gen_timestamp() }}, aggregated.* from aggregated diff --git a/transformations/aws/macros/autoscaling/autoscaling_groups_elb_check.sql b/transformations/aws/macros/autoscaling/autoscaling_groups_elb_check.sql index 9c4f57976..a11d884c4 100644 --- a/transformations/aws/macros/autoscaling/autoscaling_groups_elb_check.sql +++ b/transformations/aws/macros/autoscaling/autoscaling_groups_elb_check.sql @@ -31,3 +31,17 @@ select end as status from aws_autoscaling_groups {% endmacro %} + +{% macro bigquery__autoscaling_groups_elb_check(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'Auto Scaling groups associated with a load balancer should use health checks' as title, + account_id, + arn as resource_id, + case + when ARRAY_LENGTH(load_balancer_names) > 0 and health_check_type is distinct from 'ELB' then 'fail' + else 'pass' + end as status +from {{ full_table_name("aws_autoscaling_groups") }} +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/codebuild/check_environment_variables.sql b/transformations/aws/macros/codebuild/check_environment_variables.sql index dea8101e9..e4e057767 100644 --- a/transformations/aws/macros/codebuild/check_environment_variables.sql +++ b/transformations/aws/macros/codebuild/check_environment_variables.sql @@ -43,4 +43,24 @@ from aws_codebuild_projects, JSONB_ARRAY_ELEMENTS(environment->'EnvironmentVaria {% endmacro %} {% macro default__check_environment_variables(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__check_environment_variables(framework, check_id) %} +select distinct + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'CodeBuild project environment variables should not contain clear text credentials' as title, + account_id, + arn as resource_id, + case when + JSON_VALUE(e.Type) = 'PLAINTEXT' + and ( + UPPER(JSON_VALUE(e.Name)) like '%ACCESS_KEY%' or + UPPER(JSON_VALUE(e.Name)) like '%SECRET%' or + UPPER(JSON_VALUE(e.Name)) like '%PASSWORD%' + ) + then 'fail' + else 'pass' + end as status +from {{ full_table_name("aws_codebuild_projects") }}, +UNNEST(JSON_QUERY_ARRAY(environment.EnvironmentVariables)) AS e +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/codebuild/check_oauth_usage_for_sources.sql b/transformations/aws/macros/codebuild/check_oauth_usage_for_sources.sql index 6311a25ba..a2ace8862 100644 --- a/transformations/aws/macros/codebuild/check_oauth_usage_for_sources.sql +++ b/transformations/aws/macros/codebuild/check_oauth_usage_for_sources.sql @@ -33,4 +33,18 @@ from aws_codebuild_projects {% endmacro %} {% macro default__check_oauth_usage_for_sources(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__check_oauth_usage_for_sources(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'CodeBuild GitHub or Bitbucket source repository URLs should use OAuth' as title, + account_id, + arn as resource_id, + case when + JSON_VALUE(source.Type) IN ('GITHUB', 'BITBUCKET') AND JSON_VALUE(source.Auth.Type) != 'OAUTH' + then 'fail' + else 'pass' + end as status +from {{ full_table_name("aws_codebuild_projects") }} +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/config/config_enabled_all_regions.sql b/transformations/aws/macros/config/config_enabled_all_regions.sql index 3fbfccfb7..17455fcb5 100644 --- a/transformations/aws/macros/config/config_enabled_all_regions.sql +++ b/transformations/aws/macros/config/config_enabled_all_regions.sql @@ -38,4 +38,22 @@ FROM {% endmacro %} {% macro default__config_enabled_all_regions(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__config_enabled_all_regions(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'AWS Config should be enabled' as title, + account_id, + arn as resource_id, + case when + CAST( JSON_VALUE(recording_group.IncludeGlobalResourceTypes) AS BOOL) IS NOT TRUE + OR CAST( JSON_VALUE(recording_group.AllSupported) AS BOOL) IS NOT TRUE + OR status_recording IS NOT TRUE + OR status_last_status IS DISTINCT FROM 'SUCCESS' + then 'fail' + else 'pass' + end as status +FROM + {{ full_table_name("aws_config_configuration_recorders") }} +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/dms/replication_not_public.sql b/transformations/aws/macros/dms/replication_not_public.sql index 222088190..c7c5e6fec 100644 --- a/transformations/aws/macros/dms/replication_not_public.sql +++ b/transformations/aws/macros/dms/replication_not_public.sql @@ -33,4 +33,18 @@ from aws_dms_replication_instances {% endmacro %} {% macro default__replication_not_public(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__replication_not_public(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'AWS Database Migration Service replication instances should not be public' as title, + account_id, + arn as resource_id, + case when + publicly_accessible is true + then 'fail' + else 'pass' + end as status +from {{ full_table_name("aws_dms_replication_instances") }} +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/ec2/ebs_snapshot_permissions_check.sql b/transformations/aws/macros/ec2/ebs_snapshot_permissions_check.sql index 25e7de415..afc6c8df9 100644 --- a/transformations/aws/macros/ec2/ebs_snapshot_permissions_check.sql +++ b/transformations/aws/macros/ec2/ebs_snapshot_permissions_check.sql @@ -55,4 +55,31 @@ FROM snapshot_access_groups {% endmacro %} {% macro default__ebs_snapshot_permissions_check(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__ebs_snapshot_permissions_check(framework, check_id) %} +WITH snapshot_access_groups AS ( + SELECT account_id, + region, + snapshot_id, + groupa, + user_id + FROM {{ full_table_name("aws_ec2_ebs_snapshot_attributes") }}, + UNNEST(JSON_QUERY_ARRAY(create_volume_permissions.Group)) AS groupa, + UNNEST(JSON_QUERY_ARRAY(create_volume_permissions.UserId)) AS user_id +) +SELECT DISTINCT + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'Amazon EBS snapshots should not be public, determined by the ability to be restorable by anyone' as title, + account_id, + snapshot_id as resource_id, + case when + JSON_VALUE(groupa) = 'all' + -- this is under question because + -- trusted accounts(user_id) do not violate this control + OR JSON_VALUE(user_id) IS DISTINCT FROM '' + then 'fail' + else 'pass' + end as status +FROM snapshot_access_groups +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/ec2/get_unused_public_ips.sql b/transformations/aws/macros/ec2/get_unused_public_ips.sql index de88db211..ff9050461 100644 --- a/transformations/aws/macros/ec2/get_unused_public_ips.sql +++ b/transformations/aws/macros/ec2/get_unused_public_ips.sql @@ -18,6 +18,22 @@ select end as status from aws_ec2_eips {% endmacro %} + +{% macro bigquery__get_unused_public_ips(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'Unused EC2 EIPs should be removed' as title, + account_id, + public_ip as resource_id, + case when + instance_id is null + then 'fail' + else 'pass' + end as status +from {{ full_table_name("aws_ec2_eips") }} +{% endmacro %} + {% macro snowflake__get_unused_public_ips(framework, check_id) %} select '{{framework}}' as framework, diff --git a/transformations/aws/macros/elasticsearch/elasticsearch_domains_should_be_in_vpc.sql b/transformations/aws/macros/elasticsearch/elasticsearch_domains_should_be_in_vpc.sql index c33bb1c57..df968490d 100644 --- a/transformations/aws/macros/elasticsearch/elasticsearch_domains_should_be_in_vpc.sql +++ b/transformations/aws/macros/elasticsearch/elasticsearch_domains_should_be_in_vpc.sql @@ -33,4 +33,18 @@ from aws_elasticsearch_domains {% endmacro %} {% macro default__elasticsearch_domains_should_be_in_vpc(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__elasticsearch_domains_should_be_in_vpc(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'Elasticsearch domains should be in a VPC' as title, + account_id, + arn as resource_id, + case when + vpc_options.VPCId is null + then 'fail' + else 'pass' + end as status +from {{ full_table_name("aws_elasticsearch_domains") }} +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/elasticsearch/elasticsearch_domains_should_have_encryption_at_rest_enabled.sql b/transformations/aws/macros/elasticsearch/elasticsearch_domains_should_have_encryption_at_rest_enabled.sql index 7a3611af9..8934956c7 100644 --- a/transformations/aws/macros/elasticsearch/elasticsearch_domains_should_have_encryption_at_rest_enabled.sql +++ b/transformations/aws/macros/elasticsearch/elasticsearch_domains_should_have_encryption_at_rest_enabled.sql @@ -33,4 +33,18 @@ from aws_elasticsearch_domains {% endmacro %} {% macro default__elasticsearch_domains_should_have_encryption_at_rest_enabled(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__elasticsearch_domains_should_have_encryption_at_rest_enabled(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'Elasticsearch domains should have encryption at rest enabled' as title, + account_id, + arn as resource_id, + case when + CAST( JSON_VALUE(encryption_at_rest_options.Enabled) AS BOOL) is not true + then 'fail' + else 'pass' + end as status +from {{ full_table_name("aws_elasticsearch_domains") }} +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/elbv2/elbv2_redirect_http_to_https.sql b/transformations/aws/macros/elbv2/elbv2_redirect_http_to_https.sql index b92c67659..4d6cdb5eb 100644 --- a/transformations/aws/macros/elbv2/elbv2_redirect_http_to_https.sql +++ b/transformations/aws/macros/elbv2/elbv2_redirect_http_to_https.sql @@ -34,4 +34,20 @@ from aws_elbv2_listeners, JSONB_ARRAY_ELEMENTS(default_actions) AS da {% endmacro %} {% macro default__elbv2_redirect_http_to_https(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__elbv2_redirect_http_to_https(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'Application Load Balancer should be configured to redirect all HTTP requests to HTTPS' as title, + account_id, + arn as resource_id, + case when + protocol = 'HTTP' and ( + JSON_VALUE(da.Type) != 'redirect' or JSON_VALUE(da.RedirectConfig.Protocol) != 'HTTPS') + then 'fail' + else 'pass' + end as status +from {{ full_table_name("aws_elbv2_listeners") }}, + UNNEST(JSON_QUERY_ARRAY(default_actions)) AS da +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/guardduty/detector_enabled.sql b/transformations/aws/macros/guardduty/detector_enabled.sql index c528dbb7a..709368eec 100644 --- a/transformations/aws/macros/guardduty/detector_enabled.sql +++ b/transformations/aws/macros/guardduty/detector_enabled.sql @@ -78,3 +78,39 @@ where {% macro default__detector_enabled(framework, check_id) %}{% endmacro %} +{% macro bigquery__detector_enabled(framework, check_id) %} +with enabled_detector_regions as ( + select request_account_id as account_id, request_region as region + from {{ full_table_name("aws_guardduty_detectors") }} + where status = 'ENABLED' +) + +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'GuardDuty should be enabled' AS title, + r.account_id, + r.region AS resource_id, + case when + enabled = TRUE and e.region is null + then 'fail' else 'pass' end AS status +from {{ full_table_name("aws_regions") }} r +left join enabled_detector_regions e on e.region = r.region AND e.account_id = r.account_id +union all +-- Add any detector that is enabled but all data sources are disabled +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'GuardDuty should be enabled (detectors)' AS title, + request_account_id as account_id, + request_region AS resource_id, + case when + JSON_VALUE(data_sources.S3Logs.Status) != 'ENABLED' AND + JSON_VALUE(data_sources.DNSLogs.Status) != 'ENABLED' AND + JSON_VALUE(data_sources.CloudTrail.Status) != 'ENABLED' AND + JSON_VALUE(data_sources.FlowLogs.Status) != 'ENABLED' + then 'fail' else 'pass' end AS status +from {{ full_table_name("aws_guardduty_detectors") }} +where + status = 'ENABLED' +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/iam/no_star.sql b/transformations/aws/macros/iam/no_star.sql index 72a892cdc..1817968e8 100644 --- a/transformations/aws/macros/iam/no_star.sql +++ b/transformations/aws/macros/iam/no_star.sql @@ -51,6 +51,40 @@ from aws_iam_policies left join violations on violations.id = aws_iam_policies.id {% endmacro %} +{% macro bigquery__no_star(framework, check_id) %} +with pvs as ( + select + p.id, + pv.document_json as document + from {{ full_table_name("aws_iam_policies") }} p + inner join {{ full_table_name("aws_iam_policy_versions") }} pv on p.account_id = pv.account_id AND p.arn = pv.policy_arn +), violations as ( + select + id, + COUNT(*) as violations + from pvs, + UNNEST(JSON_QUERY_ARRAY(document.Statement)) AS statement, + UNNEST(JSON_QUERY_ARRAY(statement.Resource)) AS resource, + UNNEST(JSON_QUERY_ARRAY(statement.Action)) AS action + where JSON_VALUE(statement.Effect) = 'Allow' + and JSON_VALUE(resource) = '*' + and ( JSON_VALUE(action.value) = '*' or JSON_VALUE(action.value) = '*:*' ) + group by id +) + +select distinct + '{{framework}}' as framework, + '{{check_id}}' as check_id, + CONCAT('IAM policies should not allow full ', '''*''', ' administrative privileges') AS title, + account_id, + arn AS resource_id, + case when + violations.id is not null AND violations.violations > 0 + then 'fail' else 'pass' end as status +from {{ full_table_name("aws_iam_policies") }} +left join violations on violations.id = aws_iam_policies.id +{% endmacro %} + {% macro snowflake__no_star(framework, check_id) %} with pvs as ( select diff --git a/transformations/aws/macros/iam/password_policy_strong.sql b/transformations/aws/macros/iam/password_policy_strong.sql index c498b63d1..a04b10c12 100644 --- a/transformations/aws/macros/iam/password_policy_strong.sql +++ b/transformations/aws/macros/iam/password_policy_strong.sql @@ -47,4 +47,24 @@ from aws_iam_password_policies {% endmacro %} {% macro default__password_policy_strong(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__password_policy_strong(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'Password policies for IAM users should have strong configurations' as title, + account_id, + account_id AS resource_id, + case when + ( + require_uppercase_characters is not TRUE + or require_lowercase_characters is not TRUE + or require_numbers is not TRUE + or minimum_password_length < 14 + or password_reuse_prevention is null + or max_password_age is null + or policy_exists is not TRUE + ) + then 'fail' else 'pass' end as status +from {{ full_table_name("aws_iam_password_policies") }} +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/lambda/lambda_function_in_vpc.sql b/transformations/aws/macros/lambda/lambda_function_in_vpc.sql index 8209f68ef..4a21b8689 100644 --- a/transformations/aws/macros/lambda/lambda_function_in_vpc.sql +++ b/transformations/aws/macros/lambda/lambda_function_in_vpc.sql @@ -16,8 +16,8 @@ from aws_lambda_functions {% endmacro %} {% macro snowflake__lambda_function_in_vpc(framework, check_id) %} SELECT - 'pci_dss_v3.2.1' AS framework, - 'lambda.2' AS check_id, + '{{framework}}' as framework, + '{{check_id}}' as check_id, 'Lambda functions should be in a VPC' AS title, account_id, arn AS resource_id, @@ -28,4 +28,15 @@ SELECT ELSE 'pass' END AS status FROM aws_lambda_functions +{% endmacro %} + +{% macro bigquery__lambda_function_in_vpc(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'Lambda functions should be in a VPC' AS title, + account_id, + arn as resource_id, + case when configuration.VpcConfig.VpcId is null or JSON_VALUE(configuration.VpcConfig.VpcId) = '' then 'fail' else 'pass' end as status +from {{ full_table_name("aws_lambda_functions") }} {% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/lambda/lambda_function_prohibit_public_access.sql b/transformations/aws/macros/lambda/lambda_function_prohibit_public_access.sql index 7d2af6f2e..c32da67a9 100644 --- a/transformations/aws/macros/lambda/lambda_function_prohibit_public_access.sql +++ b/transformations/aws/macros/lambda/lambda_function_prohibit_public_access.sql @@ -30,6 +30,7 @@ where statement ->> 'Effect' = 'Allow' end)::JSONB ? '*' ) {% endmacro %} +<<<<<<< HEAD {% macro snowflake__lambda_function_prohibit_public_access(framework, check_id) %} SELECT '{{framework}}' AS framework, @@ -60,4 +61,25 @@ WHERE statement.value:Effect = 'Allow' END )::VARIANT:AWS LIKE '%*%' ) -{% endmacro %} \ No newline at end of file +{% endmacro %} +======= + +{% macro bigquery__lambda_function_prohibit_public_access(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'Lambda functions should prohibit public access' as title, + account_id, + arn as resource_id, + 'fail' as status -- TODO FIXME +from {{ full_table_name("aws_lambda_functions") }}, + UNNEST(JSON_QUERY_ARRAY(policy_document.Statement)) AS statement +where JSON_VALUE(statement.Effect) = 'Allow' + and ( + JSON_VALUE(statement.Principal) = '*' + or JSON_VALUE(statement.Principal.AWS) = '*' + + or ( '*' IN UNNEST(JSON_EXTRACT_STRING_ARRAY(statement.Principal.AWS)) ) + ) +{% endmacro %} +>>>>>>> 093192f (feat: Added queries for bigquery pci_dss) diff --git a/transformations/aws/macros/rds/snapshots_should_prohibit_public_access.sql b/transformations/aws/macros/rds/snapshots_should_prohibit_public_access.sql index ef2f228e7..df959d23a 100644 --- a/transformations/aws/macros/rds/snapshots_should_prohibit_public_access.sql +++ b/transformations/aws/macros/rds/snapshots_should_prohibit_public_access.sql @@ -32,4 +32,19 @@ SELECT END AS status FROM aws_rds_cluster_snapshots, LATERAL FLATTEN(INPUT => PARSE_JSON(attributes)) AS attrs -{% endmacro %} \ No newline at end of file +{% endmacro %} + +{% macro bigquery__snapshots_should_prohibit_public_access(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'RDS snapshots should be private' as title, + account_id, + arn AS resource_id, + case when + (JSON_VALUE(attrs.AttributeName) is not distinct from 'restore') + and JSON_VALUE(attrs.AttributeValues) = 'all' + then 'fail' else 'pass' end as status +from {{ full_table_name("aws_rds_cluster_snapshots") }}, + UNNEST(JSON_QUERY_ARRAY(attributes)) as attrs +{% endmacro %} diff --git a/transformations/aws/macros/redshift/cluster_publicly_accessible.sql b/transformations/aws/macros/redshift/cluster_publicly_accessible.sql index 2f79fe144..ec2446023 100644 --- a/transformations/aws/macros/redshift/cluster_publicly_accessible.sql +++ b/transformations/aws/macros/redshift/cluster_publicly_accessible.sql @@ -25,7 +25,7 @@ from aws_redshift_clusters {% endmacro %} {% macro default__cluster_publicly_accessible(framework, check_id) %}{% endmacro %} - + {% macro bigquery__cluster_publicly_accessible(framework, check_id) %} select '{{framework}}' as framework, diff --git a/transformations/aws/macros/s3/account_level_public_access_blocks.sql b/transformations/aws/macros/s3/account_level_public_access_blocks.sql index 613ff2686..245114f84 100644 --- a/transformations/aws/macros/s3/account_level_public_access_blocks.sql +++ b/transformations/aws/macros/s3/account_level_public_access_blocks.sql @@ -45,4 +45,24 @@ left join {% endmacro %} {% macro default__account_level_public_access_blocks(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__account_level_public_access_blocks(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'S3 Block Public Access setting should be enabled' as title, + aws_iam_accounts.account_id, + aws_iam_accounts.account_id AS resource_id, + case when + config_exists is not TRUE + or block_public_acls is not TRUE + or block_public_policy is not TRUE + or ignore_public_acls is not TRUE + or restrict_public_buckets is not TRUE + then 'fail' else 'pass' end as status +from + {{ full_table_name("aws_iam_accounts") }} +left join + {{ full_table_name("aws_s3_accounts") }} on + aws_iam_accounts.account_id = aws_s3_accounts.account_id +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/s3/deny_http_requests.sql b/transformations/aws/macros/s3/deny_http_requests.sql index be83867eb..e4008c0f4 100644 --- a/transformations/aws/macros/s3/deny_http_requests.sql +++ b/transformations/aws/macros/s3/deny_http_requests.sql @@ -78,4 +78,36 @@ where {% endmacro %} {% macro default__deny_http_requests(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__deny_http_requests(framework, check_id) %} +select + '{{framework}}' As framework, + '{{check_id}}' As check_id, + 'S3 buckets should deny non-HTTPS requests' AS title, + account_id, + arn AS resource_id, + 'fail' AS status +FROM + {{ full_table_name("aws_s3_buckets") }} +WHERE + arn NOT IN ( + SELECT foo.arn + FROM ( + SELECT + b.arn, + statements AS statement + FROM + {{ full_table_name("aws_s3_buckets") }} AS b + inner join {{ full_table_name("aws_s3_bucket_policies") }} + on b.arn = aws_s3_bucket_policies.bucket_arn, + UNNEST(JSON_QUERY_ARRAY(aws_s3_bucket_policies.policy_json.Statement)) AS statements + WHERE + CAST(JSON_VALUE(statements.Effect) AS STRING) = 'Deny' + AND CAST(JSON_VALUE(JSON_EXTRACT(statements, '$.Condition.Bool."aws:SecureTransport"')) AS STRING) = 'false' + ) AS foo + WHERE + CAST(JSON_VALUE(foo.statement.Principal) AS STRING) = '*' + OR + CONTAINS_SUBSTR(CAST(JSON_VALUE(foo.statement.Principal) AS STRING), '*') + ) +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/s3/publicly_readable_buckets.sql b/transformations/aws/macros/s3/publicly_readable_buckets.sql index a43754d1e..5dbc3ae83 100644 --- a/transformations/aws/macros/s3/publicly_readable_buckets.sql +++ b/transformations/aws/macros/s3/publicly_readable_buckets.sql @@ -130,4 +130,66 @@ where {% endmacro %} {% macro default__publicly_readable_buckets(framework, check_id) %}{% endmacro %} - \ No newline at end of file +{% macro bigquery__publicly_readable_buckets(framework, check_id) %} +with policy_allow_public as ( + select + arn, + count(*) as statement_count + from + ( + select + b.arn, + bp.policy_json.Statement.Principal as principals + from + {{ full_table_name("aws_s3_buckets") }} b + inner join {{ full_table_name("aws_s3_bucket_policies") }} bp on b.arn = bp.bucket_arn + where + JSON_VALUE(bp.policy_json.Statement.Effect) = '"Allow"' + ) as foo + where + JSON_VALUE(principals) = '"*"' + or ( + 'AWS' IN UNNEST(JSON_EXTRACT_STRING_ARRAY(principals)) + and ( + JSON_VALUE(principals.AWS) = '"*"' + or '"*"' IN UNNEST(JSON_EXTRACT_STRING_ARRAY(principals.AWS)) + ) + ) + group by + arn +) +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'S3 buckets should prohibit public read access' as title, + aws_s3_buckets.account_id, + aws_s3_buckets.arn as resource_id, + 'fail' as status -- TODO FIXME +from + -- Find and join all bucket ACLS that give a public write access + {{ full_table_name("aws_s3_buckets") }} +left join + {{ full_table_name("aws_s3_bucket_grants") }} on + aws_s3_buckets.arn = aws_s3_bucket_grants.bucket_arn +-- Find all statements that could give public allow access +-- Statements that give public access have 1) Effect == Allow 2) One of the following principal: +-- Principal = {"AWS": "*"} +-- Principal = {"AWS": ["arn:aws:iam::12345678910:root", "*"]} +-- Principal = "*" +left join policy_allow_public on + aws_s3_buckets.arn = policy_allow_public.arn +left join {{ full_table_name("aws_s3_bucket_public_access_blocks") }} on + aws_s3_buckets.arn = aws_s3_bucket_public_access_blocks.bucket_arn +where + ( + CAST( JSON_VALUE(aws_s3_bucket_public_access_blocks.public_access_block_configuration.BlockPublicAcls) AS BOOL) != TRUE + and ( + JSON_VALUE(grantee.URI) = 'http://acs.amazonaws.com/groups/global/AllUsers' + and permission in ('READ_ACP', 'FULL_CONTROL') + ) + ) + or ( + CAST( JSON_VALUE(aws_s3_bucket_public_access_blocks.public_access_block_configuration.BlockPublicPolicy) AS BOOL) != TRUE + and policy_allow_public.statement_count > 0 + ) +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/s3/publicly_writable_buckets.sql b/transformations/aws/macros/s3/publicly_writable_buckets.sql index 71d79b152..4d8e285b3 100644 --- a/transformations/aws/macros/s3/publicly_writable_buckets.sql +++ b/transformations/aws/macros/s3/publicly_writable_buckets.sql @@ -130,4 +130,66 @@ where {% endmacro %} {% macro default__publicly_writable_buckets(framework, check_id) %}{% endmacro %} - \ No newline at end of file +{% macro bigquery__publicly_writable_buckets(framework, check_id) %} +with policy_allow_public as ( + select + arn, + count(*) as statement_count + from + ( + select + b.arn, + bp.policy_json.Statement.Principal as principals + from + {{ full_table_name("aws_s3_buckets") }} b + inner join {{ full_table_name("aws_s3_bucket_policies") }} bp on b.arn = bp.bucket_arn + where + JSON_VALUE(bp.policy_json.Statement.Effect) = '"Allow"' + ) as foo + where + JSON_VALUE(principals) = '"*"' + or ( + 'AWS' IN UNNEST(JSON_EXTRACT_STRING_ARRAY(principals)) + and ( + JSON_VALUE(principals.AWS) = '"*"' + or '"*"' IN UNNEST(JSON_EXTRACT_STRING_ARRAY(principals.AWS)) + ) + ) + group by + arn +) +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'S3 buckets should prohibit public write access' as title, + aws_s3_buckets.account_id, + aws_s3_buckets.arn as resource_id, + 'fail' as status -- TODO FIXME +from + -- Find and join all bucket ACLS that give a public write access + {{ full_table_name("aws_s3_buckets") }} +left join + {{ full_table_name("aws_s3_bucket_grants") }} on + aws_s3_buckets.arn = aws_s3_bucket_grants.bucket_arn +-- Find all statements that could give public allow access +-- Statements that give public access have 1) Effect == Allow 2) One of the following principal: +-- Principal = {"AWS": "*"} +-- Principal = {"AWS": ["arn:aws:iam::12345678910:root", "*"]} +-- Principal = "*" +left join policy_allow_public on + aws_s3_buckets.arn = policy_allow_public.arn +left join {{ full_table_name("aws_s3_bucket_public_access_blocks") }} on + aws_s3_buckets.arn = aws_s3_bucket_public_access_blocks.bucket_arn +where + ( + CAST( JSON_VALUE(aws_s3_bucket_public_access_blocks.public_access_block_configuration.BlockPublicAcls) AS BOOL) != TRUE + and ( + JSON_VALUE(grantee.URI) = 'http://acs.amazonaws.com/groups/global/AllUsers' + and permission in ('WRITE_ACP', 'FULL_CONTROL') + ) + ) + or ( + CAST( JSON_VALUE(aws_s3_bucket_public_access_blocks.public_access_block_configuration.BlockPublicPolicy) AS BOOL) != TRUE + and policy_allow_public.statement_count > 0 + ) +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/s3/s3_cross_region_replication.sql b/transformations/aws/macros/s3/s3_cross_region_replication.sql index 4eafd8eb1..0bc4184a2 100644 --- a/transformations/aws/macros/s3/s3_cross_region_replication.sql +++ b/transformations/aws/macros/s3/s3_cross_region_replication.sql @@ -20,6 +20,21 @@ from -- Note: This query doesn't validate that the destination bucket is actually in a different region {% endmacro %} +{% macro bigquery__s3_cross_region_replication(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'S3 buckets with replication rules should be enabled' as title, + aws_s3_buckets.account_id, + aws_s3_buckets.arn as resource_id, + case when + JSON_VALUE(aws_s3_bucket_replications.replication_configuration.Rule.Status) is distinct from 'Enabled' + then 'fail' else 'pass' end as status +from + {{ full_table_name("aws_s3_buckets") }} + inner join {{ full_table_name("aws_s3_bucket_replications") }} on aws_s3_buckets.arn = aws_s3_bucket_replications.bucket_arn +{% endmacro %} + {% macro snowflake__s3_cross_region_replication(framework, check_id) %} select '{{framework}}' as framework, diff --git a/transformations/aws/macros/s3/s3_server_side_encryption_enabled.sql b/transformations/aws/macros/s3/s3_server_side_encryption_enabled.sql index 53d167277..9d96950f4 100644 --- a/transformations/aws/macros/s3/s3_server_side_encryption_enabled.sql +++ b/transformations/aws/macros/s3/s3_server_side_encryption_enabled.sql @@ -35,4 +35,20 @@ left join aws_s3_bucket_encryption_rules on aws_s3_bucket_encryption_rules.bucke {% endmacro %} {% macro default__s3_server_side_encryption_enabled(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__s3_server_side_encryption_enabled(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'S3 buckets should have server-side encryption enabled' as title, + aws_s3_buckets.account_id, + arn as resource_id, + case when + aws_s3_bucket_encryption_rules.bucket_arn is null + then 'fail' else 'pass' end as status +from + {{ full_table_name("aws_s3_buckets") }} +left join {{ full_table_name("aws_s3_bucket_encryption_rules") }} on aws_s3_bucket_encryption_rules.bucket_arn=aws_s3_buckets.arn + +-- Note: This query doesn't validate if a bucket policy requires encryption for `put-object` requests +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/sagemaker/sagemaker_notebook_instance_direct_internet_access_disabled.sql b/transformations/aws/macros/sagemaker/sagemaker_notebook_instance_direct_internet_access_disabled.sql index 11fac5c6f..3f9196370 100644 --- a/transformations/aws/macros/sagemaker/sagemaker_notebook_instance_direct_internet_access_disabled.sql +++ b/transformations/aws/macros/sagemaker/sagemaker_notebook_instance_direct_internet_access_disabled.sql @@ -29,4 +29,16 @@ from aws_sagemaker_notebook_instances {% endmacro %} {% macro default__sagemaker_notebook_instance_direct_internet_access_disabled(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__sagemaker_notebook_instance_direct_internet_access_disabled(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'Amazon SageMaker notebook instances should not have direct internet access' as title, + account_id, + arn as resource_id, + case when + direct_internet_access = 'Enabled' + then 'fail' else 'pass' end as status +from {{ full_table_name("aws_sagemaker_notebook_instances") }} +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/secretsmanager/remove_unused_secrets_manager_secrets.sql b/transformations/aws/macros/secretsmanager/remove_unused_secrets_manager_secrets.sql index 7617a140d..ca0ac8c43 100644 --- a/transformations/aws/macros/secretsmanager/remove_unused_secrets_manager_secrets.sql +++ b/transformations/aws/macros/secretsmanager/remove_unused_secrets_manager_secrets.sql @@ -31,4 +31,17 @@ from aws_secretsmanager_secrets {% endmacro %} {% macro default__remove_unused_secrets_manager_secrets(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__remove_unused_secrets_manager_secrets(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'Remove unused Secrets Manager secrets' as title, + account_id, + arn as resource_id, + case when + (last_accessed_date is null and created_date < TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL 90 DAY)) + or (last_accessed_date is not null and last_accessed_date < TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL 90 DAY)) + then 'fail' else 'pass' end as status +from {{ full_table_name("aws_secretsmanager_secrets") }} +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/secretsmanager/secrets_configured_with_automatic_rotation_should_rotate_successfully.sql b/transformations/aws/macros/secretsmanager/secrets_configured_with_automatic_rotation_should_rotate_successfully.sql index 0c0f0b1a9..1324678d3 100644 --- a/transformations/aws/macros/secretsmanager/secrets_configured_with_automatic_rotation_should_rotate_successfully.sql +++ b/transformations/aws/macros/secretsmanager/secrets_configured_with_automatic_rotation_should_rotate_successfully.sql @@ -32,4 +32,17 @@ select {% endmacro %} {% macro default__secrets_configured_with_automatic_rotation_should_rotate_successfully(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__secrets_configured_with_automatic_rotation_should_rotate_successfully(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'Secrets Manager secrets configured with automatic rotation should rotate successfully' as title, + account_id, + arn as resource_id, + case when + (last_rotated_date is null and created_date < TIMESTAMP_SUB(CURRENT_TIMESTAMP(), (INTERVAL 1*(CAST(JSON_VALUE(rotation_rules.AutomaticallyAfterDays) AS INT64)) DAY) )) + or (last_rotated_date is not null and last_rotated_date < TIMESTAMP_SUB(CURRENT_TIMESTAMP(), (INTERVAL 1*(CAST(JSON_VALUE(rotation_rules.AutomaticallyAfterDays) AS INT64)) DAY) )) + then 'fail' else 'pass' end as status + from {{ full_table_name("aws_secretsmanager_secrets") }} +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/secretsmanager/secrets_should_be_rotated_within_a_specified_number_of_days.sql b/transformations/aws/macros/secretsmanager/secrets_should_be_rotated_within_a_specified_number_of_days.sql index 946e14a50..d1fcee11a 100644 --- a/transformations/aws/macros/secretsmanager/secrets_should_be_rotated_within_a_specified_number_of_days.sql +++ b/transformations/aws/macros/secretsmanager/secrets_should_be_rotated_within_a_specified_number_of_days.sql @@ -31,4 +31,17 @@ from aws_secretsmanager_secrets {% endmacro %} {% macro default__secrets_should_be_rotated_within_a_specified_number_of_days(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__secrets_should_be_rotated_within_a_specified_number_of_days(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'Secrets Manager secrets should be rotated within a specified number of days' as title, + account_id, + arn as resource_id, + case when + (last_rotated_date is null and created_date < TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL 90 DAY)) + or (last_rotated_date is not null and last_rotated_date < TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL 90 DAY)) + then 'fail' else 'pass' end as status +from {{ full_table_name("aws_secretsmanager_secrets") }} +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/secretsmanager/secrets_should_have_automatic_rotation_enabled.sql b/transformations/aws/macros/secretsmanager/secrets_should_have_automatic_rotation_enabled.sql index 80e173452..34d6af711 100644 --- a/transformations/aws/macros/secretsmanager/secrets_should_have_automatic_rotation_enabled.sql +++ b/transformations/aws/macros/secretsmanager/secrets_should_have_automatic_rotation_enabled.sql @@ -29,4 +29,16 @@ from aws_secretsmanager_secrets {% endmacro %} {% macro default__secrets_should_have_automatic_rotation_enabled(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__secrets_should_have_automatic_rotation_enabled(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'Secrets Manager secrets should have automatic rotation enabled' as title, + account_id, + arn as resource_id, + case when + rotation_enabled is distinct from TRUE + then 'fail' else 'pass' end as status +from {{ full_table_name("aws_secretsmanager_secrets") }} +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/ssm/ec2_instances_should_be_managed_by_ssm.sql b/transformations/aws/macros/ssm/ec2_instances_should_be_managed_by_ssm.sql index 17586f621..55be69729 100644 --- a/transformations/aws/macros/ssm/ec2_instances_should_be_managed_by_ssm.sql +++ b/transformations/aws/macros/ssm/ec2_instances_should_be_managed_by_ssm.sql @@ -33,4 +33,18 @@ left outer join aws_ssm_instances on aws_ec2_instances.instance_id = aws_ssm_ins {% endmacro %} {% macro default__ec2_instances_should_be_managed_by_ssm(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__ec2_instances_should_be_managed_by_ssm(framework, check_id) %} +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'Amazon EC2 instances should be managed by AWS Systems Manager' as title, + aws_ec2_instances.account_id, + aws_ec2_instances.arn as resource_id, + case when + aws_ssm_instances.instance_id is null + then 'fail' else 'pass' end as status +from + {{ full_table_name("aws_ec2_instances") }} +left outer join {{ full_table_name("aws_ssm_instances") }} on aws_ec2_instances.instance_id = aws_ssm_instances.instance_id +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/ssm/instances_should_have_association_compliance_status_of_compliant.sql b/transformations/aws/macros/ssm/instances_should_have_association_compliance_status_of_compliant.sql index 258dd24df..b8ca7c934 100644 --- a/transformations/aws/macros/ssm/instances_should_have_association_compliance_status_of_compliant.sql +++ b/transformations/aws/macros/ssm/instances_should_have_association_compliance_status_of_compliant.sql @@ -51,4 +51,27 @@ select {% endmacro %} {% macro default__instances_should_have_association_compliance_status_of_compliant(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__instances_should_have_association_compliance_status_of_compliant(framework, check_id) %} +with association_compliance_status_groups as( + select + instance_arn, + status + from + {{ full_table_name("aws_ssm_instance_compliance_items") }} + where + compliance_type = 'Association' +) +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'Amazon EC2 instances managed by Systems Manager should have an association compliance status of COMPLIANT' as title, + aws_ssm_instances.account_id, + aws_ssm_instances.arn, + case when + association_compliance_status_groups.status is distinct from 'COMPLIANT' + then 'fail' else 'pass' end as status + from + {{ full_table_name("aws_ssm_instances") }} + inner join association_compliance_status_groups on aws_ssm_instances.arn = association_compliance_status_groups.instance_arn +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/ssm/instances_should_have_patch_compliance_status_of_compliant.sql b/transformations/aws/macros/ssm/instances_should_have_patch_compliance_status_of_compliant.sql index b2f346449..c5a802719 100644 --- a/transformations/aws/macros/ssm/instances_should_have_patch_compliance_status_of_compliant.sql +++ b/transformations/aws/macros/ssm/instances_should_have_patch_compliance_status_of_compliant.sql @@ -40,7 +40,32 @@ select from aws_ssm_instances INNER join patch_compliance_status_groups - on aws_ssm_instances.arn = patch_compliance_status_groups.instance_arn{% endmacro %} + on aws_ssm_instances.arn = patch_compliance_status_groups.instance_arn +{% endmacro %} {% macro default__instances_should_have_patch_compliance_status_of_compliant(framework, check_id) %}{% endmacro %} - \ No newline at end of file + +{% macro bigquery__instances_should_have_patch_compliance_status_of_compliant(framework, check_id) %} +with patch_compliance_status_groups as( + select DISTINCT + instance_arn, + status + from + {{ full_table_name("aws_ssm_instance_compliance_items") }} + where + compliance_type = 'Patch' +) +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'Amazon EC2 instances managed by Systems Manager should have a patch compliance status of COMPLIANT after a patch installation' as title, + aws_ssm_instances.account_id, + aws_ssm_instances.arn, + case when + patch_compliance_status_groups.status is distinct from 'COMPLIANT' + then 'fail' else 'pass' end as status + from + {{ full_table_name("aws_ssm_instances") }} +INNER join patch_compliance_status_groups + on aws_ssm_instances.arn = patch_compliance_status_groups.instance_arn +{% endmacro %} \ No newline at end of file diff --git a/transformations/aws/macros/wafv2/wafv2_web_acl_logging_should_be_enabled.sql b/transformations/aws/macros/wafv2/wafv2_web_acl_logging_should_be_enabled.sql index 836d81f6f..8ffd71ae8 100644 --- a/transformations/aws/macros/wafv2/wafv2_web_acl_logging_should_be_enabled.sql +++ b/transformations/aws/macros/wafv2/wafv2_web_acl_logging_should_be_enabled.sql @@ -61,4 +61,35 @@ select then 'fail' else 'pass' end as status from aws_wafv2_web_acls ) -{% endmacro %} \ No newline at end of file +{% endmacro %} + +{% macro bigquery__wafv2_web_acl_logging_should_be_enabled(framework, check_id) %} +( +-- WAF Classic +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'AWS WAF Classic global web ACL logging should be enabled' as title, + account_id, + arn as resource_id, + case when + logging_configuration is null or JSON_VALUE(logging_configuration) = '{}' + then 'fail' else 'pass' end as status +from {{ full_table_name("aws_waf_web_acls") }} +) +union all +( +-- WAF V2 +select + '{{framework}}' as framework, + '{{check_id}}' as check_id, + 'AWS WAF Classic global web ACL logging should be enabled' as title, + account_id, + arn as resource_id, + case when + logging_configuration is null or JSON_VALUE(logging_configuration) = '{}' + then 'fail' else 'pass' end as status +from {{ full_table_name("aws_wafv2_web_acls") }} +) +{% endmacro %} +