From a50d6d29d513b80a3ea806f1d07820722893301e Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Mon, 19 Aug 2024 16:32:18 +0200 Subject: [PATCH 01/55] Dual write --- deploy/rbac-clowdapp.yml | 8 +- ...ping_permissions_bindingmapping_v2_role.py | 28 ++ rbac/management/models.py | 11 +- rbac/management/role/model.py | 3 + .../role/relation_api_dual_write_handler.py | 156 +++++++ rbac/management/role/view.py | 73 +++- rbac/migration_tool/migrate.py | 41 +- ...sharedSystemRolesReplicatedRoleBindings.py | 57 ++- rbac/migration_tool/utils.py | 10 +- rbac/rbac/settings.py | 1 + tests/identity_request.py | 2 + tests/management/access/test_view.py | 17 +- tests/management/group/test_view.py | 2 +- tests/management/policy/test_view.py | 379 ++++++++++++++++++ tests/management/role/test_view.py | 197 ++++++++- 15 files changed, 923 insertions(+), 62 deletions(-) create mode 100644 rbac/management/migrations/0048_bindingmapping_permissions_bindingmapping_v2_role.py create mode 100644 rbac/management/role/relation_api_dual_write_handler.py create mode 100644 tests/management/policy/test_view.py diff --git a/deploy/rbac-clowdapp.yml b/deploy/rbac-clowdapp.yml index d5e46c359..c5d84f044 100644 --- a/deploy/rbac-clowdapp.yml +++ b/deploy/rbac-clowdapp.yml @@ -220,7 +220,8 @@ objects: value: ${SA_NAME} - name: RELATION_API_SERVER value: ${RELATION_API_SERVER} - + - name: REPLICATION_TO_RELATION_ENABLED + value: ${REPLICATION_TO_RELATION_ENABLED} - name: scheduler-service minReplicas: ${{MIN_SCHEDULER_REPLICAS}} metadata: @@ -457,6 +458,8 @@ objects: value: ${GROUP_SEEDING_ENABLED} - name: BYPASS_BOP_VERIFICATION value: ${BYPASS_BOP_VERIFICATION} + - name: REPLICATION_TO_RELATION_ENABLED + value: ${REPLICATION_TO_RELATION_ENABLED} - name: ROLE_CREATE_ALLOW_LIST value: ${ROLE_CREATE_ALLOW_LIST} - name: RBAC_DESTRUCTIVE_API_ENABLED_UNTIL @@ -910,3 +913,6 @@ parameters: - name: RELATION_API_SERVER description: The gRPC API server to use for the relation value: "localhost:9000" +- name: REPLICATION_TO_RELATION_ENABLED + description: Enable replication to Relation API + value: "False" diff --git a/rbac/management/migrations/0048_bindingmapping_permissions_bindingmapping_v2_role.py b/rbac/management/migrations/0048_bindingmapping_permissions_bindingmapping_v2_role.py new file mode 100644 index 000000000..1e0b8d377 --- /dev/null +++ b/rbac/management/migrations/0048_bindingmapping_permissions_bindingmapping_v2_role.py @@ -0,0 +1,28 @@ +# Generated by Django 4.2.15 on 2024-08-21 15:26 + +import django.contrib.postgres.fields +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ("management", "0047_rolemapping_rolev2_rolemapping_role_v2_rolebinding"), + ] + + operations = [ + migrations.AddField( + model_name="bindingmapping", + name="permissions", + field=django.contrib.postgres.fields.ArrayField( + base_field=models.CharField(max_length=200), blank=True, null=True, size=None + ), + ), + migrations.AddField( + model_name="bindingmapping", + name="v2_role", + field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to="management.v2role"), + preserve_default=False, + ), + ] diff --git a/rbac/management/models.py b/rbac/management/models.py index c18574c07..f620cc837 100644 --- a/rbac/management/models.py +++ b/rbac/management/models.py @@ -20,7 +20,16 @@ from management.permission.model import Permission from management.principal.model import Principal from management.group.model import Group -from management.role.model import Access, ExtRoleRelation, ExtTenant, ResourceDefinition, Role, V2Role, BindingMapping +from management.role.model import ( + Access, + ExtRoleRelation, + ExtTenant, + ResourceDefinition, + Role, + V2Role, + RoleMapping, + BindingMapping, +) from management.policy.model import Policy from management.audit_log.model import AuditLog from management.workspace.model import Workspace diff --git a/rbac/management/role/model.py b/rbac/management/role/model.py index 6c5370cf5..62a97abdc 100644 --- a/rbac/management/role/model.py +++ b/rbac/management/role/model.py @@ -20,6 +20,7 @@ from uuid import uuid4 from django.conf import settings +from django.contrib.postgres.fields import ArrayField from django.db import models from django.db.models import signals from django.utils import timezone @@ -138,6 +139,8 @@ class BindingMapping(models.Model): id = models.UUIDField(default=uuid4, primary_key=True) v1_role = models.ForeignKey(Role, on_delete=models.CASCADE) + v2_role = models.ForeignKey(V2Role, on_delete=models.CASCADE) + permissions = ArrayField(models.CharField(max_length=200), blank=True, null=True) def role_related_obj_change_cache_handler(sender=None, instance=None, using=None, **kwargs): diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py new file mode 100644 index 000000000..ef20d2d42 --- /dev/null +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -0,0 +1,156 @@ +# +# Copyright 2024 Red Hat, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# + +"""Class to handle Dual Write API related operations.""" +import logging + +from management.models import BindingMapping, V2Role, Workspace +from migration_tool.migrate import migrate_role +from migration_tool.utils import relationship_to_json + +from rbac.env import ENVIRONMENT + +logger = logging.getLogger(__name__) # pylint: disable=invalid-name + + +class DualWriteException(Exception): + """DualWrite exception.""" + + pass + + +class RelationApiDualWriteHandler: + """Class to handle Dual Write API related operations.""" + + def __init__(self, role): + """Initialize RelationApiDualWriteHandler.""" + if not self.replication_enabled(): + return + try: + self.role_relations = [] + self.current_role_relations = [] + self.role = role + self.tenant_id = role.tenant_id + self.org_id = role.tenant.org_id + self.root_workspace = Workspace.objects.get( + name="root", description="Root workspace", tenant_id=self.tenant_id + ) + except Exception as e: + raise DualWriteException(e) + + def replication_enabled(self): + """Check whether replication enabled.""" + return ENVIRONMENT.get_value("REPLICATION_TO_RELATION_ENABLED", default=False, cast=bool) + + def generate_relations_from_current_state_of_role(self): + """Generate relations from current state of role and UUIDs for v2 role and role binding from database.""" + if not self.replication_enabled(): + return + try: + logger.info( + "[Dual Write] Generate relations from current state of role(%s): '%s'", self.role.uuid, self.role.name + ) + relations = migrate_role(self.role, False, str(self.root_workspace.uuid), self.org_id, True, True, False) + self.current_role_relations = relations + except Exception as e: + raise DualWriteException(e) + + def regenerate_relations_and_mappings_for_role(self): + """Delete and generated relations with mapping for a role.""" + if not self.replication_enabled(): + return [] + self.delete_mappings() + return self.generate_relations_and_mappings_for_role() + + def generate_relations_and_mappings_for_role(self): + """Generate relations and mappings for a role with new UUIDs for v2 role and role bindings.""" + if not self.replication_enabled(): + return [] + try: + logger.info("[Dual Write] Generate new relations from role(%s): '%s'", self.role.uuid, self.role.name) + relations = migrate_role(self.role, False, str(self.root_workspace.uuid), self.org_id) + self.role_relations = relations + return relations + except Exception as e: + raise DualWriteException(e) + + def get_current_role_relations(self): + """Get current roles relations.""" + return self.current_role_relations + + def delete_mappings(self): + """Delete mappings for a role.""" + if not self.replication_enabled(): + return + try: + logger.info("[Dual Write] Delete mappings for role(%s): '%s'", self.role.uuid, self.role.name) + v2_roles_ids = BindingMapping.objects.filter(v1_role=self.role.id).values("v2_role_id") + # this deletes also records in BindingMapping table and role binding + V2Role.objects.filter(id__in=v2_roles_ids).delete() + except Exception as e: + raise DualWriteException(e) + + def set_role(self, role): + """Set a role.""" + self.role = role + + def build_replication_event(self): + """Build replication event.""" + if not self.replication_enabled(): + return {} + logger.info("[Dual Write] Build Replication event for role(%s): '%s'", self.role.uuid, self.role.name) + relations_to_add = [] + for relation in self.role_relations: + relations_to_add.append(relationship_to_json(relation)) + + relations_to_remove = [] + for relation in self.current_role_relations: + relations_to_remove.append(relationship_to_json(relation)) + + replication_event = {"relations_to_add": relations_to_add, "relations_to_remove": relations_to_remove} + return replication_event + + def generate_replication_event_to_outbox(self, role): + """Generate replication event to outbox table.""" + if not self.replication_enabled(): + return + self.set_role(role) + self.regenerate_relations_and_mappings_for_role() + return self.save_replication_event_to_outbox() + + def save_replication_event_to_outbox(self): + """Generate and store replication event to outbox table.""" + if not self.replication_enabled(): + return {} + try: + replication_event = self.build_replication_event() + self.save_replication_event(replication_event) + except Exception as e: + raise DualWriteException(e) + return replication_event + + def save_replication_event(self, replication_event): + """Save replication event.""" + if not self.replication_enabled(): + return + logger.info( + "[Dual Write] Save replication event into outbox table for role(%s): '%s'", self.role.uuid, self.role.name + ) + logger.info( + "[Dual Write] Replication event: %s for role(%s): '%s'", replication_event, self.role.uuid, self.role.name + ) + # TODO: serialize and store event in to outbox table diff --git a/rbac/management/role/view.py b/rbac/management/role/view.py index 69b512713..d80c651ef 100644 --- a/rbac/management/role/view.py +++ b/rbac/management/role/view.py @@ -17,8 +17,10 @@ """View for role management.""" import json +import logging import os import re +import traceback from django.conf import settings from django.core.exceptions import ValidationError @@ -26,6 +28,7 @@ from django.db.models import Q from django.db.models.aggregates import Count from django.http import Http404 +from django.shortcuts import get_object_or_404 from django.utils.translation import gettext as _ from django_filters import rest_framework as filters from management.filters import CommonFilters @@ -33,11 +36,13 @@ from management.notifications.notification_handlers import role_obj_change_notification_handler from management.permissions import RoleAccessPermission from management.querysets import get_role_queryset +from management.role.relation_api_dual_write_handler import DualWriteException, RelationApiDualWriteHandler from management.role.serializer import AccessSerializer, RoleDynamicSerializer, RolePatchSerializer from management.utils import validate_uuid from rest_framework import mixins, serializers, status, viewsets from rest_framework.decorators import action from rest_framework.filters import OrderingFilter +from rest_framework.response import Response from .model import Role from .serializer import RoleSerializer @@ -66,6 +71,8 @@ if TESTING_APP: settings.ROLE_CREATE_ALLOW_LIST.append(TESTING_APP) +logger = logging.getLogger(__name__) # pylint: disable=invalid-name + class RoleFilter(CommonFilters): """Filter for role.""" @@ -209,12 +216,20 @@ def create(self, request, *args, **kwargs): } """ self.validate_role(request) + try: + with transaction.atomic(): + create_role = super().create(request=request, args=args, kwargs=kwargs) - create_role = super().create(request=request, args=args, kwargs=kwargs) + if status.is_success(create_role.status_code): + auditlog = AuditLog() + auditlog.log_create(request, AuditLog.ROLE) - if status.is_success(create_role.status_code): - auditlog = AuditLog() - auditlog.log_create(request, AuditLog.ROLE) + role = get_object_or_404(Role, uuid=create_role.data["uuid"]) + dual_write_handler = RelationApiDualWriteHandler(role) + dual_write_handler.generate_relations_and_mappings_for_role() + dual_write_handler.save_replication_event_to_outbox() + except DualWriteException as e: + return self.dual_write_exception_response(e) return create_role @@ -327,9 +342,17 @@ def destroy(self, request, *args, **kwargs): message = "System roles cannot be deleted." error = {key: [_(message)]} raise serializers.ValidationError(error) - with transaction.atomic(): - self.delete_policies_if_no_role_attached(role) - response = super().destroy(request=request, args=args, kwargs=kwargs) + try: + with transaction.atomic(): + self.delete_policies_if_no_role_attached(role) + dual_write_handler = RelationApiDualWriteHandler(role) + dual_write_handler.generate_relations_from_current_state_of_role() + response = super().destroy(request=request, args=args, kwargs=kwargs) + dual_write_handler.delete_mappings() + dual_write_handler.save_replication_event_to_outbox() + except DualWriteException as e: + return self.dual_write_exception_response(e) + if response.status_code == status.HTTP_204_NO_CONTENT: role_obj_change_notification_handler(role, "deleted", request.user) @@ -371,7 +394,7 @@ def update(self, request, *args, **kwargs): @apiParam (Path) {String} id Role unique identifier @apiParam (Request Body) {String} name Role name - @apiParam (Request Body) {Array} access Access definition + @apiParam (Request Body) {ArRray} access Access definition @apiParamExample {json} Request Body: { "name": "RoleA", @@ -418,15 +441,45 @@ def update(self, request, *args, **kwargs): validate_uuid(kwargs.get("uuid"), "role uuid validation") self.validate_role(request) - role = self.get_object() - update_role = super().update(request=request, args=args, kwargs=kwargs) + update_role = self.update_with_relation_api_replication(request=request, args=args, kwargs=kwargs) if status.is_success(update_role.status_code): auditlog = AuditLog() + role = self.get_object() auditlog.log_edit(request, AuditLog.ROLE, role) return update_role + def update_with_relation_api_replication(self, request, *args, **kwargs): + """Update a role with replicating data into Relation API.""" + try: + role = self.get_object() + dual_write_handler = RelationApiDualWriteHandler(role) + dual_write_handler.generate_relations_from_current_state_of_role() + with transaction.atomic(): + response = super().update(request=request, args=args, kwargs=kwargs) + dual_write_handler.generate_replication_event_to_outbox(self.get_object()) + except DualWriteException as e: + return self.dual_write_exception_response(e) + + return response + + def dual_write_exception_response(self, e): + """Dual write exception response.""" + logging.error(traceback.format_exc()) + return Response( + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + data={ + "errors": [ + { + "detail": "Dual Write Exception:" + str(e), + "source": "role", + "status": str(status.HTTP_500_INTERNAL_SERVER_ERROR), + } + ] + }, + ) + @action(detail=True, methods=["get"]) def access(self, request, uuid=None): """Return access objects for specified role.""" diff --git a/rbac/migration_tool/migrate.py b/rbac/migration_tool/migrate.py index 33f8db18e..c1934c258 100644 --- a/rbac/migration_tool/migrate.py +++ b/rbac/migration_tool/migrate.py @@ -32,18 +32,26 @@ logger = logging.getLogger(__name__) # pylint: disable=invalid-name -def spicedb_relationships(v2_role_bindings: FrozenSet[V2rolebinding], root_workspace: str): +def spicedb_relationships( + v2_role_bindings: FrozenSet[V2rolebinding], root_workspace: str, v1_role, create_binding_to_db=True +): """Generate a set of relationships for the given set of v2 role bindings.""" relationships = list() for v2_role_binding in v2_role_bindings: relationships.append( create_relationship("role_binding", v2_role_binding.id, "role", v2_role_binding.role.id, "granted") ) - v2_role_data = v2_role_binding.role - v1_role = Role.objects.get(uuid=v2_role_binding.originalRole.id) - v2_role, _ = V2Role.objects.get_or_create(id=v2_role_data.id, is_system=v2_role_data.is_system) - v2_role.v1_roles.add(v1_role) - BindingMapping.objects.create(id=v2_role_binding.id, v1_role=v1_role) + if create_binding_to_db: + v2_role_data = v2_role_binding.role + v2_role, _ = V2Role.objects.get_or_create(id=v2_role_data.id, is_system=v2_role_data.is_system) + v2_role.v1_roles.add(v1_role) + BindingMapping.objects.create( + id=v2_role_binding.id, + v1_role=v1_role, + v2_role=v2_role, + permissions=list(v2_role_binding.role.permissions), + ) + for perm in v2_role_binding.role.permissions: relationships.append(create_relationship("role", v2_role_binding.role.id, "user", "*", perm)) for group in v2_role_binding.groups: @@ -52,6 +60,7 @@ def spicedb_relationships(v2_role_bindings: FrozenSet[V2rolebinding], root_works for bound_resource in v2_role_binding.resources: parent_relation = "parent" if bound_resource.resource_type == "workspace" else "workspace" + if not (bound_resource.resource_type == "workspace" and bound_resource.resourceId == root_workspace): relationships.append( create_relationship( @@ -75,7 +84,15 @@ def spicedb_relationships(v2_role_bindings: FrozenSet[V2rolebinding], root_works return relationships -def migrate_role(role: Role, write_db: bool, root_workspace: str, default_workspace: str): +def migrate_role( + role: Role, + write_db: bool, + root_workspace: str, + default_workspace: str, + use_binding_from_db=False, + use_mapping_from_db=False, + create_binding_to_db=True, +): """Migrate a role from v1 to v2.""" v1_role = extract_info_into_v1_role(role) # With the replicated role bindings algorithm, role bindings are scoped by group, so we need to add groups @@ -87,9 +104,15 @@ def migrate_role(role: Role, write_db: bool, root_workspace: str, default_worksp v1_role = dataclasses.replace(v1_role, groups=frozenset(groups)) # This is where we wire in the implementation we're using into the Migrator - v2_roles = [v2_role for v2_role in v1_role_to_v2_mapping(v1_role, root_workspace, default_workspace)] - relationships = spicedb_relationships(frozenset(v2_roles), root_workspace) + v2_roles = [ + v2_role + for v2_role in v1_role_to_v2_mapping( + v1_role, role.id, root_workspace, default_workspace, use_binding_from_db, use_mapping_from_db + ) + ] + relationships = spicedb_relationships(frozenset(v2_roles), root_workspace, role, create_binding_to_db) output_relationships(relationships, write_db) + return relationships def migrate_workspace(tenant: Tenant, write_db: bool): diff --git a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py index 952c5b168..41d2b1894 100644 --- a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py +++ b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py @@ -20,6 +20,7 @@ import uuid from typing import Callable, FrozenSet, Type +from management.models import BindingMapping from management.role.model import Role from management.workspace.model import Workspace from migration_tool.ingest import add_element @@ -32,9 +33,8 @@ V2group, V2role, V2rolebinding, + cleanNameForV2SchemaCompatibility, ) -from migration_tool.models import cleanNameForV2SchemaCompatibility - logger = logging.getLogger(__name__) @@ -95,7 +95,14 @@ def set_system_roles(cls): skipped_apps = {"cost-management", "playbook-dispatcher", "approval"} -def v1_role_to_v2_mapping(v1_role: V1role, root_workspace: str, default_workspace: str) -> FrozenSet[V2rolebinding]: +def v1_role_to_v2_mapping( + v1_role: V1role, + v1_role_db_id, + root_workspace: str, + default_workspace: str, + use_binding_from_db=False, + use_mapping_from_db=False, +) -> FrozenSet[V2rolebinding]: """Convert a V1 role to a set of V2 role bindings.""" perm_groupings: Permissiongroupings = {} # Group V2 permissions by target @@ -126,8 +133,7 @@ def v1_role_to_v2_mapping(v1_role: V1role, root_workspace: str, default_workspac v2_perm, ) # Project permission sets to system roles - resource_roles = extract_system_roles(perm_groupings, v1_role) - + resource_roles = extract_system_roles(perm_groupings, v1_role, v1_role_db_id, use_mapping_from_db) # Construct rolebindings v2_role_bindings = [] v2_groups = v1groups_to_v2groups(v1_role.groups) @@ -135,13 +141,27 @@ def v1_role_to_v2_mapping(v1_role: V1role, root_workspace: str, default_workspac for resource in resources: if v2_groups: for v2_group in v2_groups: - role_binding_id = str(uuid.uuid4()) + if use_binding_from_db: + binding_mapping = BindingMapping.objects.filter( + v1_role=v1_role_db_id, v2_role_id=role.id + ).first() + if binding_mapping is None: + raise Exception("V2 role bindings not found in db") + role_binding_id = str(binding_mapping.id) + else: + role_binding_id = str(uuid.uuid4()) v2_role_binding = V2rolebinding( role_binding_id, v1_role, role, frozenset({resource}), frozenset({v2_group}) ) v2_role_bindings.append(v2_role_binding) else: - role_binding_id = str(uuid.uuid4()) + if use_binding_from_db: + binding_mapping = BindingMapping.objects.filter(v1_role=v1_role_db_id, v2_role_id=role.id).first() + if binding_mapping is None: + raise Exception("V2 role bindings not found in db") + role_binding_id = str(binding_mapping.id) + else: + role_binding_id = str(uuid.uuid4()) v2_role_binding = V2rolebinding(role_binding_id, v1_role, role, frozenset({resource}), v2_groups) v2_role_bindings.append(v2_role_binding) return frozenset(v2_role_bindings) @@ -151,7 +171,7 @@ def v1_role_to_v2_mapping(v1_role: V1role, root_workspace: str, default_workspac custom_roles_created = 0 -def extract_system_roles(perm_groupings, v1_role): +def extract_system_roles(perm_groupings, v1_role, db_role_id, use_mapping_from_db=False): """Extract system roles from a set of permissions.""" resource_roles = {} system_roles = SystemRole.get_system_roles() @@ -205,7 +225,15 @@ def extract_system_roles(perm_groupings, v1_role): else: candidate_system_roles[candidate] = {v1_role.id} # Add a custom role - add_element(resource_roles, V2role(str(uuid.uuid4()), False, frozenset(permissions)), resource) + if use_mapping_from_db: + binding_mapping = BindingMapping.objects.filter( + v1_role=db_role_id, permissions__contains=permissions + ).first() + v2_uuid = str(binding_mapping.v2_role_id) + else: + v2_uuid = uuid.uuid4() + + add_element(resource_roles, V2role(str(v2_uuid), False, frozenset(permissions)), resource) global custom_roles_created custom_roles_created += 1 return resource_roles @@ -230,10 +258,17 @@ def split_resourcedef_literal(resourceDef: V1resourcedef): def shared_system_role_replicated_role_bindings_v1_to_v2_mapping( - v1_role: V1role, root_workspace: Workspace, default_workspace: Workspace + v1_role: V1role, + v1_role_db_id, + root_workspace: Workspace, + default_workspace: Workspace, + use_binding_from_db=False, + use_mapping_from_db=False, ) -> FrozenSet[V2rolebinding]: """Convert a V1 role to a set of V2 role bindings.""" - return v1_role_to_v2_mapping(v1_role, root_workspace, default_workspace) + return v1_role_to_v2_mapping( + v1_role, v1_role_db_id, root_workspace, default_workspace, use_binding_from_db, use_mapping_from_db + ) def v1groups_to_v2groups(v1groups: FrozenSet[V1group]): diff --git a/rbac/migration_tool/utils.py b/rbac/migration_tool/utils.py index 956dd363e..090c2fa5d 100644 --- a/rbac/migration_tool/utils.py +++ b/rbac/migration_tool/utils.py @@ -45,7 +45,6 @@ def validate_and_create_obj_ref(obj_name, obj_id): validate_all(object_type) except ValidationFailed as err: logger.error(err) - obj_ref = common_pb2.ObjectReference(type=object_type, id=obj_id) try: validate_all(obj_ref) @@ -91,6 +90,15 @@ def stringify_spicedb_relationship(rel: common_pb2.Relationship): ) +def relationship_to_json(rel): + """Convert a relationship to a JSON object.""" + return { + "resource": {"type": rel.resource.type.name, "id": rel.resource.id}, + "relation": rel.relation, + "subject": {"type": rel.subject.subject.type.name, "id": rel.subject.subject.id}, + } + + def output_relationships(relationships: list, write_db: bool): """Output relationships to the console and optionally write them to the database.""" for rel in relationships: diff --git a/rbac/rbac/settings.py b/rbac/rbac/settings.py index a07e3938f..352bbbb1f 100644 --- a/rbac/rbac/settings.py +++ b/rbac/rbac/settings.py @@ -272,6 +272,7 @@ "api": {"handlers": LOGGING_HANDLERS, "level": RBAC_LOGGING_LEVEL}, "rbac": {"handlers": LOGGING_HANDLERS, "level": RBAC_LOGGING_LEVEL}, "management": {"handlers": LOGGING_HANDLERS, "level": RBAC_LOGGING_LEVEL}, + "migration_tool": {"handlers": LOGGING_HANDLERS, "level": RBAC_LOGGING_LEVEL}, }, } diff --git a/tests/identity_request.py b/tests/identity_request.py index 71bcbd4a5..2b5d89a88 100644 --- a/tests/identity_request.py +++ b/tests/identity_request.py @@ -16,6 +16,7 @@ # """Test Case extension to collect common test data.""" import uuid +import os from base64 import b64encode from json import dumps as json_dumps @@ -37,6 +38,7 @@ class IdentityRequest(TestCase): def setUpClass(cls): """Set up each test class.""" super().setUpClass() + os.environ["REPLICATION_TO_RELATION_ENABLED"] = "True" cls.customer_data = cls._create_customer_data() cls.user_data = cls._create_user_data() cls.request_context = cls._create_request_context(cls.customer_data, cls.user_data) diff --git a/tests/management/access/test_view.py b/tests/management/access/test_view.py index be07ded5c..06e3d76c7 100644 --- a/tests/management/access/test_view.py +++ b/tests/management/access/test_view.py @@ -26,7 +26,7 @@ from api.models import Tenant, User from datetime import timedelta from management.cache import TenantCache -from management.models import Group, Permission, Principal, Policy, Role, Access +from management.models import Group, Permission, Principal, Policy, Role, Access, Workspace from tests.identity_request import IdentityRequest @@ -46,7 +46,7 @@ def setUp(self): self.access_data = { "permission": "app:*:*", - "resourceDefinitions": [{"attributeFilter": {"key": "key1", "operation": "equal", "value": "value1"}}], + "resourceDefinitions": [{"attributeFilter": {"key": "key1.id", "operation": "equal", "value": "value1"}}], } test_tenant_org_id = "100001" @@ -85,6 +85,8 @@ def setUp(self): self.group.save() self.permission = Permission.objects.create(permission="app:*:*", tenant=self.tenant) Permission.objects.create(permission="app:foo:bar", tenant=self.tenant) + Workspace.objects.create(name="root", description="Root workspace", tenant=self.tenant) + Workspace.objects.create(name="root", description="Root workspace", tenant=self.test_tenant) def tearDown(self): """Tear down access view tests.""" @@ -92,6 +94,7 @@ def tearDown(self): Principal.objects.all().delete() Role.objects.all().delete() Policy.objects.all().delete() + Workspace.objects.all().delete() def create_role(self, role_name, headers, in_access_data=None): """Create a role.""" @@ -276,7 +279,7 @@ def test_get_access_no_app_supplied(self, mock_request): policy_name = "policyA" access_data = { "permission": "app:test_foo:test_bar", - "resourceDefinitions": [{"attributeFilter": {"key": "keyA", "operation": "equal", "value": "valueA"}}], + "resourceDefinitions": [{"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}}], } response = self.create_role(role_name, self.test_headers, access_data) role_uuid = response.data.get("uuid") @@ -317,7 +320,7 @@ def test_get_access_multiple_apps_supplied(self, mock_request): policy_name = "policyA" access_data = { "permission": "app:test_foo:test_bar", - "resourceDefinitions": [{"attributeFilter": {"key": "keyA", "operation": "equal", "value": "valueA"}}], + "resourceDefinitions": [{"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}}], } response = self.create_role(role_name, self.test_headers, access_data) role_uuid = response.data.get("uuid") @@ -356,7 +359,7 @@ def test_get_access_no_partial_match(self, mock_request): policy_name = "policyA" access_data = { "permission": "app:test_foo:test_bar", - "resourceDefinitions": [{"attributeFilter": {"key": "keyA", "operation": "equal", "value": "valueA"}}], + "resourceDefinitions": [{"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}}], } response = self.create_role(role_name, self.test_headers, access_data) role_uuid = response.data.get("uuid") @@ -397,7 +400,7 @@ def test_get_access_no_subset_match(self, mock_request): policy_name = "policyA" access_data = { "permission": "app:test_foo:test_bar", - "resourceDefinitions": [{"attributeFilter": {"key": "keyA", "operation": "equal", "value": "valueA"}}], + "resourceDefinitions": [{"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}}], } response = self.create_role(role_name, self.test_headers, access_data) role_uuid = response.data.get("uuid") @@ -568,7 +571,7 @@ def test_get_access_no_match(self, mock_request): policy_name = "policyA" access_data = { "permission": "app:test_foo:test_bar", - "resourceDefinitions": [{"attributeFilter": {"key": "keyA", "operation": "equal", "value": "valueA"}}], + "resourceDefinitions": [{"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}}], } response = self.create_role(role_name, self.test_headers, access_data) role_uuid = response.data.get("uuid") diff --git a/tests/management/group/test_view.py b/tests/management/group/test_view.py index adb9dbe59..b210c365a 100644 --- a/tests/management/group/test_view.py +++ b/tests/management/group/test_view.py @@ -2707,7 +2707,7 @@ def setUp(self): self.headers = request.META self.access_data = { "permission": "app:*:*", - "resourceDefinitions": [{"attributeFilter": {"key": "key1", "operation": "equal", "value": "value1"}}], + "resourceDefinitions": [{"attributeFilter": {"key": "key1.id", "operation": "equal", "value": "value1"}}], } self.principal = Principal(username=self.user_data["username"], tenant=self.tenant) diff --git a/tests/management/policy/test_view.py b/tests/management/policy/test_view.py new file mode 100644 index 000000000..f0f525af7 --- /dev/null +++ b/tests/management/policy/test_view.py @@ -0,0 +1,379 @@ +# +# Copyright 2019 Red Hat, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# +"""Test the policy viewset.""" + +from uuid import uuid4 + +from django.urls import reverse +from rest_framework import status +from rest_framework.test import APIClient + +from api.models import User +from management.models import Group, Principal, Policy, Role, Permission, Workspace + +from tests.identity_request import IdentityRequest + + +class PolicyViewsetTests(IdentityRequest): + """Test the policy viewset.""" + + def setUp(self): + """Set up the policy viewset tests.""" + super().setUp() + request = self.request_context["request"] + user = User() + user.username = self.user_data["username"] + user.account = self.customer_data["account_id"] + request.user = user + + self.principal = Principal(username=self.user_data["username"], tenant=self.tenant) + self.principal.save() + self.group = Group(name="groupA", tenant=self.tenant) + self.group.save() + self.group.principals.add(self.principal) + self.group.save() + Permission.objects.create(permission="app:*:*", tenant=self.tenant) + Workspace.objects.create(name="root", description="Root workspace", tenant=self.tenant) + + def tearDown(self): + """Tear down policy viewset tests.""" + Group.objects.all().delete() + Principal.objects.all().delete() + Role.objects.all().delete() + Policy.objects.all().delete() + Workspace.objects.all().delete() + + def create_role(self, role_name, in_access_data=None): + """Create a role.""" + access_data = { + "permission": "app:*:*", + "resourceDefinitions": [{"attributeFilter": {"key": "key1.id", "operation": "equal", "value": "value1"}}], + } + if in_access_data: + access_data = in_access_data + test_data = {"name": role_name, "access": [access_data]} + + # create a role + url = reverse("role-list") + client = APIClient() + response = client.post(url, test_data, format="json", **self.headers) + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + return response + + def create_policy(self, policy_name, group, roles, status=status.HTTP_201_CREATED): + """Create a policy.""" + # create a policy + test_data = {"name": policy_name, "group": group, "roles": roles} + url = reverse("policy-list") + client = APIClient() + response = client.post(url, test_data, format="json", **self.headers) + self.assertEqual(response.status_code, status) + return response + + def test_create_policy_success(self): + """Test that we can create a policy.""" + role_name = "roleA" + response = self.create_role(role_name) + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + role_uuid = response.data.get("uuid") + policy_name = "policyA" + response = self.create_policy(policy_name, self.group.uuid, [role_uuid]) + + # test that we can retrieve the policy + url = reverse("policy-detail", kwargs={"uuid": response.data.get("uuid")}) + client = APIClient() + response = client.get(url, **self.headers) + uuid = response.data.get("uuid") + policy = Policy.objects.get(uuid=uuid) + + self.assertIsNotNone(uuid) + self.assertIsNotNone(response.data.get("name")) + self.assertEqual(policy_name, response.data.get("name")) + self.assertEqual(policy.tenant, self.tenant) + self.assertEqual(str(self.group.uuid), response.data.get("group").get("uuid")) + + def test_delete_policy_success(self): + """Test that we can delete a policy.""" + role_name = "roleA" + response = self.create_role(role_name) + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + role_uuid = response.data.get("uuid") + policy_name = "policyA" + response = self.create_policy(policy_name, self.group.uuid, [role_uuid]) + policy_uuid = response.data.get("uuid") + + client = APIClient() + url = reverse("policy-detail", kwargs={"uuid": policy_uuid}) + response = client.delete(url, **self.headers) + self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) + + url = reverse("policy-detail", kwargs={"uuid": policy_uuid}) + client = APIClient() + response = client.get(url, **self.headers) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + + def test_create_policy_invalid_group(self): + """Test that we cannot create a policy with invalid group.""" + role_name = "roleA" + response = self.create_role(role_name) + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + role_uuid = response.data.get("uuid") + policy_name = "policyA" + self.create_policy(policy_name, uuid4(), [role_uuid], status.HTTP_400_BAD_REQUEST) + + def test_create_policy_invalid_role(self): + """Test that we cannot create a policy with an invalid role.""" + policy_name = "policyA" + self.create_policy(policy_name, self.group.uuid, [uuid4()], status.HTTP_400_BAD_REQUEST) + + def test_create_policy_no_role(self): + """Test that we cannot create a policy without roles.""" + policy_name = "policyA" + self.create_policy(policy_name, self.group.uuid, [], status.HTTP_400_BAD_REQUEST) + + def test_create_policy_invalid(self): + """Test that creating an invalid policy returns an error.""" + test_data = {} + url = reverse("policy-list") + client = APIClient() + response = client.post(url, test_data, format="json", **self.headers) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + + def test_create_policy_no_name(self): + """Test that creating a policy with no name returns a 400.""" + role_name = "roleA" + response = self.create_role(role_name) + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + role_uuid = response.data.get("uuid") + policy_name = None + self.create_policy(policy_name, self.group.uuid, [role_uuid], status.HTTP_400_BAD_REQUEST) + + def test_read_policy_invalid(self): + """Test that reading an invalid policy returns an error.""" + url = reverse("policy-detail", kwargs={"uuid": uuid4()}) + client = APIClient() + response = client.get(url, **self.headers) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + + def test_read_policy_list_success(self): + """Test that we can read a list of policies.""" + role_name = "roleA" + response = self.create_role(role_name) + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + + role_uuid = response.data.get("uuid") + policy_name = "policyA" + response = self.create_policy(policy_name, self.group.uuid, [role_uuid]) + + # list a policies + url = reverse("policy-list") + client = APIClient() + response = client.get(url, **self.headers) + + self.assertEqual(response.status_code, status.HTTP_200_OK) + for keyname in ["meta", "links", "data"]: + self.assertIn(keyname, response.data) + self.assertIsInstance(response.data.get("data"), list) + self.assertEqual(len(response.data.get("data")), 1) + + policy = response.data.get("data")[0] + self.assertIsNotNone(policy.get("name")) + self.assertEqual(policy.get("name"), policy_name) + + def test_update_policy_success(self): + """Test that we can update an existing policy.""" + role_name = "roleA" + response = self.create_role(role_name) + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + + role_uuid = response.data.get("uuid") + policy_name = "policyA" + response = self.create_policy(policy_name, self.group.uuid, [role_uuid]) + updated_name = policy_name + "_update" + policy_uuid = response.data.get("uuid") + test_data = response.data + test_data["name"] = updated_name + test_data["group"] = self.group.uuid + test_data["roles"] = [role_uuid] + del test_data["uuid"] + url = reverse("policy-detail", kwargs={"uuid": policy_uuid}) + client = APIClient() + response = client.put(url, test_data, format="json", **self.headers) + self.assertEqual(response.status_code, status.HTTP_200_OK) + + self.assertIsNotNone(response.data.get("uuid")) + self.assertEqual(updated_name, response.data.get("name")) + + def test_update_policy_bad_group(self): + """Test that we cannot update an existing policy with a bad group.""" + role_name = "roleA" + response = self.create_role(role_name) + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + + role_uuid = response.data.get("uuid") + policy_name = "policyA" + response = self.create_policy(policy_name, self.group.uuid, [role_uuid]) + updated_name = policy_name + "_update" + policy_uuid = response.data.get("uuid") + test_data = response.data + test_data["name"] = updated_name + test_data["group"] = uuid4() + test_data["roles"] = [role_uuid] + del test_data["uuid"] + url = reverse("policy-detail", kwargs={"uuid": policy_uuid}) + client = APIClient() + response = client.put(url, test_data, format="json", **self.headers) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + + def test_update_policy_bad_role(self): + """Test that we cannot update an existing policy with a bad role.""" + role_name = "roleA" + response = self.create_role(role_name) + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + + role_uuid = response.data.get("uuid") + policy_name = "policyA" + response = self.create_policy(policy_name, self.group.uuid, [role_uuid]) + updated_name = policy_name + "_update" + policy_uuid = response.data.get("uuid") + test_data = response.data + test_data["name"] = updated_name + test_data["group"] = self.group.uuid + test_data["roles"] = [uuid4()] + del test_data["uuid"] + url = reverse("policy-detail", kwargs={"uuid": policy_uuid}) + client = APIClient() + response = client.put(url, test_data, format="json", **self.headers) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + + def test_update_policy_no_role(self): + """Test that we can update an existing policy to have no roles.""" + role_name = "roleA" + response = self.create_role(role_name) + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + + role_uuid = response.data.get("uuid") + policy_name = "policyA" + response = self.create_policy(policy_name, self.group.uuid, [role_uuid]) + updated_name = policy_name + "_update" + policy_uuid = response.data.get("uuid") + test_data = response.data + test_data["name"] = updated_name + test_data["group"] = self.group.uuid + test_data["roles"] = [] + del test_data["uuid"] + url = reverse("policy-detail", kwargs={"uuid": policy_uuid}) + client = APIClient() + response = client.put(url, test_data, format="json", **self.headers) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + + def test_update_policy_invalid(self): + """Test that updating an invalid policy returns an error.""" + url = reverse("policy-detail", kwargs={"uuid": uuid4()}) + client = APIClient() + response = client.put(url, {}, format="json", **self.headers) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + + def test_delete_role_success(self): + """Test that we can delete an existing role.""" + role_name = "roleA" + response = self.create_role(role_name) + role_uuid = response.data.get("uuid") + policy_name = "policyA" + response = self.create_policy(policy_name, self.group.uuid, [role_uuid]) + policy_uuid = response.data.get("uuid") + url = reverse("policy-detail", kwargs={"uuid": policy_uuid}) + client = APIClient() + response = client.delete(url, **self.headers) + self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) + + # verify the policy no longer exists + response = client.get(url, **self.headers) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + + def test_delete_policy_invalid(self): + """Test that deleting an invalid policy returns an error.""" + url = reverse("policy-detail", kwargs={"uuid": uuid4()}) + client = APIClient() + response = client.delete(url, **self.headers) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + + def test_policy_removed_on_group_deletion(self): + """Test that we can an existing policy is cleaned up when the group is deleted.""" + role_name = "roleA" + response = self.create_role(role_name) + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + + role_uuid = response.data.get("uuid") + policy_name = "policyA" + response = self.create_policy(policy_name, self.group.uuid, [role_uuid]) + policy_uuid = response.data.get("uuid") + + url = reverse("group-detail", kwargs={"uuid": self.group.uuid}) + client = APIClient() + response = client.delete(url, **self.headers) + + url = reverse("policy-detail", kwargs={"uuid": policy_uuid}) + client = APIClient() + response = client.get(url, **self.headers) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + + def test_policy_removed_on_all_role_deletion(self): + """Test that we can an existing policy is cleaned up when the all roles are deleted.""" + role_name = "roleA" + response = self.create_role(role_name) + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + + role_uuid = response.data.get("uuid") + policy_name = "policyA" + response = self.create_policy(policy_name, self.group.uuid, [role_uuid]) + policy_uuid = response.data.get("uuid") + + url = reverse("role-detail", kwargs={"uuid": role_uuid}) + client = APIClient() + response = client.delete(url, **self.headers) + + url = reverse("policy-detail", kwargs={"uuid": policy_uuid}) + client = APIClient() + response = client.get(url, **self.headers) + self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + + def test_policy_removed_on_one_role_deletion(self): + """Test that we can an existing policy remains when not all roles are deleted.""" + roles = [] + role_name = "roleA" + response = self.create_role(role_name) + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + roles.append(response.data.get("uuid")) + + role_name = "roleB" + response = self.create_role(role_name) + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + roles.append(response.data.get("uuid")) + + policy_name = "policyA" + response = self.create_policy(policy_name, self.group.uuid, roles) + policy_uuid = response.data.get("uuid") + + url = reverse("role-detail", kwargs={"uuid": roles[0]}) + client = APIClient() + response = client.delete(url, **self.headers) + + url = reverse("policy-detail", kwargs={"uuid": policy_uuid}) + client = APIClient() + response = client.get(url, **self.headers) + self.assertEqual(response.status_code, status.HTTP_200_OK) diff --git a/tests/management/role/test_view.py b/tests/management/role/test_view.py index cffc87e18..fe94830cc 100644 --- a/tests/management/role/test_view.py +++ b/tests/management/role/test_view.py @@ -35,21 +35,74 @@ ResourceDefinition, ExtRoleRelation, ExtTenant, + RoleMapping, + Workspace, + BindingMapping, ) + from tests.core.test_kafka import copy_call_args from tests.identity_request import IdentityRequest -from unittest.mock import ANY, patch +from unittest.mock import ANY, patch, call URL = reverse("role-list") +def replication_event_for_v1_role(v1_role_uuid, root_workspace_uuid): + """Create a replication event for a v1 role.""" + return { + "relations_to_add": relation_api_tuples_for_v1_role(v1_role_uuid, root_workspace_uuid), + "relations_to_remove": [], + } + + +def relation_api_tuples_for_v1_role(v1_role_uuid, root_workspace_uuid): + """Create a relation API tuple for a v1 role.""" + role_id = Role.objects.get(uuid=v1_role_uuid).id + role_binding = BindingMapping.objects.filter(v1_role=role_id) + + relations = [] + for binding in role_binding: + relation_tuple = relation_api_tuple( + "role_binding", str(binding.id), "granted", "role", str(binding.v2_role.id) + ) + relations.append(relation_tuple) + + for permission in binding.permissions: + relation_tuple = relation_api_tuple("role", str(binding.v2_role.id), permission, "user", "*") + relations.append(relation_tuple) + if "app_all_read" in binding.permissions: + relation_tuple = relation_api_tuple( + "workspace", root_workspace_uuid, "user_grant", "role_binding", str(binding.id) + ) + relations.append(relation_tuple) + else: + relation_tuple = relation_api_tuple("keya/id", "valueA", "workspace", "workspace", root_workspace_uuid) + relations.append(relation_tuple) + + relation_tuple = relation_api_tuple("keya/id", "valueA", "user_grant", "role_binding", str(binding.id)) + relations.append(relation_tuple) + return relations + + +def relation_api_tuple(resource_type, resource_id, relation, subject_type, subject_id): + return { + "resource": relation_api_resource(resource_type, resource_id), + "relation": relation, + "subject": relation_api_resource(subject_type, subject_id), + } + + +def relation_api_resource(type_resource, id_resource): + """Helper function for creating a relation resource in json.""" + return {"type": type_resource, "id": id_resource} + + class RoleViewsetTests(IdentityRequest): """Test the role viewset.""" def setUp(self): """Set up the role viewset tests.""" super().setUp() - sys_role_config = {"name": "system_role", "display_name": "system_display", "system": True} def_role_config = {"name": "default_role", "display_name": "default_display", "platform_default": True} @@ -133,6 +186,7 @@ def setUp(self): self.access3 = Access.objects.create(permission=self.permission2, role=self.sysRole, tenant=self.tenant) Permission.objects.create(permission="cost-management:*:*", tenant=self.tenant) + self.root_workspace = Workspace.objects.create(name="root", description="Root workspace", tenant=self.tenant) def tearDown(self): """Tear down role viewset tests.""" @@ -144,7 +198,8 @@ def tearDown(self): Access.objects.all().delete() ExtTenant.objects.all().delete() ExtRoleRelation.objects.all().delete() - + RoleMapping.objects.all().delete() + Workspace.objects.all().delete() # we need to delete old test_tenant's that may exist in cache test_tenant_org_id = "100001" cached_tenants = TenantCache() @@ -155,7 +210,9 @@ def create_role(self, role_name, role_display="", in_access_data=None): access_data = [ { "permission": "app:*:*", - "resourceDefinitions": [{"attributeFilter": {"key": "key1", "operation": "equal", "value": "value1"}}], + "resourceDefinitions": [ + {"attributeFilter": {"key": "key1.id", "operation": "equal", "value": "value1"}} + ], }, {"permission": "app:*:read", "resourceDefinitions": []}, ] @@ -202,7 +259,7 @@ def test_create_role_success(self, send_kafka_message): { "permission": "app:*:*", "resourceDefinitions": [ - {"attributeFilter": {"key": "keyA", "operation": "equal", "value": "valueA"}} + {"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}} ], }, {"permission": "app:*:read", "resourceDefinitions": []}, @@ -271,20 +328,34 @@ def test_create_role_success(self, send_kafka_message): ANY, ) - def test_create_role_with_display_success(self): + @patch("management.role.relation_api_dual_write_handler.RelationApiDualWriteHandler.save_replication_event") + def test_create_role_with_display_success(self, mock_method): """Test that we can create a role.""" role_name = "roleD" role_display = "display name for roleD" access_data = [ { "permission": "app:*:*", - "resourceDefinitions": [{"attributeFilter": {"key": "keyA", "operation": "equal", "value": "valueA"}}], + "resourceDefinitions": [ + {"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}} + ], }, {"permission": "app:*:read", "resourceDefinitions": []}, ] response = self.create_role(role_name, role_display=role_display, in_access_data=access_data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) + role_id = Role.objects.get(uuid=response.data.get("uuid")).id + role_mapping = RoleMapping.objects.filter(v1_role=role_id) + role_binding = BindingMapping.objects.filter(v1_role=role_id) + + self.assertEqual(len(role_binding), 2) + self.assertEqual(len(role_mapping), 2) + + replication_event = replication_event_for_v1_role(response.data.get("uuid"), str(self.root_workspace.uuid)) + + mock_method.assert_called_once() + mock_method.assert_called_with(replication_event) # test that we can retrieve the role url = reverse("role-detail", kwargs={"uuid": response.data.get("uuid")}) client = APIClient() @@ -304,7 +375,9 @@ def test_create_role_without_required_permission(self): access_data = [ { "permission": self.permission.permission, - "resourceDefinitions": [{"attributeFilter": {"key": "keyA", "operation": "equal", "value": "valueA"}}], + "resourceDefinitions": [ + {"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}} + ], } ] response = self.create_role(role_name, in_access_data=access_data) @@ -341,7 +414,9 @@ def test_create_role_allow_list(self): access_data = [ { "permission": "cost-management:*:*", - "resourceDefinitions": [{"attributeFilter": {"key": "keyA", "operation": "equal", "value": "valueA"}}], + "resourceDefinitions": [ + {"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}} + ], } ] response = self.create_role(role_name, in_access_data=access_data) @@ -364,7 +439,9 @@ def test_create_role_allow_list_fail(self): access_data = [ { "permission": "someApp:*:*", - "resourceDefinitions": [{"attributeFilter": {"key": "keyA", "operation": "equal", "value": "valueA"}}], + "resourceDefinitions": [ + {"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}} + ], } ] response = self.create_role(role_name, in_access_data=access_data) @@ -376,7 +453,7 @@ def test_create_role_appfilter_structure_fail(self): access_data = [ { "permission": "cost-management:*:*", - "resourceDefinitions": {"attributeFilter": {"key": "keyA", "operation": "in", "foo": "valueA"}}, + "resourceDefinitions": {"attributeFilter": {"key": "keyA.id", "operation": "in", "foo": "valueA"}}, } ] response = self.create_role(role_name, in_access_data=access_data) @@ -389,7 +466,7 @@ def test_create_role_appfilter_fields_fail(self): access_data = [ { "permission": "cost-management:*:*", - "resourceDefinitions": [{"attributeFilter": {"key": "keyA", "operation": "in", "foo": "valueA"}}], + "resourceDefinitions": [{"attributeFilter": {"key": "keyA.id", "operation": "in", "foo": "valueA"}}], } ] response = self.create_role(role_name, in_access_data=access_data) @@ -401,7 +478,9 @@ def test_create_role_appfilter_operation_fail(self): access_data = [ { "permission": "cost-management:*:*", - "resourceDefinitions": [{"attributeFilter": {"key": "keyA", "operation": "boop", "value": "valueA"}}], + "resourceDefinitions": [ + {"attributeFilter": {"key": "keyA.id", "operation": "boop", "value": "valueA"}} + ], } ] response = self.create_role(role_name, in_access_data=access_data) @@ -414,7 +493,9 @@ def test_create_role_permission_does_not_exist_fail(self): access_data = [ { "permission": permission, - "resourceDefinitions": [{"attributeFilter": {"key": "keyA", "operation": "equal", "value": "valueA"}}], + "resourceDefinitions": [ + {"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}} + ], } ] response = self.create_role(role_name, in_access_data=access_data) @@ -1128,7 +1209,6 @@ def test_patch_role_success(self): al_response = al_client.get(al_url, **self.headers) retrieve_data = al_response.data.get("data") al_list = retrieve_data - print(al_list) al_dict = al_list[1] al_dict_principal_username = al_dict["principal_username"] @@ -1258,7 +1338,9 @@ def test_update_role_invalid_permission(self): access_data = [ { "permission": "cost-management:*:*", - "resourceDefinitions": [{"attributeFilter": {"key": "keyA", "operation": "equal", "value": "valueA"}}], + "resourceDefinitions": [ + {"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}} + ], } ] response = self.create_role(role_name, in_access_data=access_data) @@ -1274,6 +1356,45 @@ def test_update_role_invalid_permission(self): response = client.put(url, test_data, format="json", **self.headers) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + @patch("management.role.relation_api_dual_write_handler.RelationApiDualWriteHandler.save_replication_event") + def test_update_role(self, mock_method): + """Test that updating a role with an invalid permission returns an error.""" + # Set up + role_name = "test_update_role" + access_data = [ + { + "permission": "app:*:*", + "resourceDefinitions": [ + {"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}} + ], + }, + {"permission": "app:*:read", "resourceDefinitions": []}, + ] + + new_access_data = [ + { + "permission": "app:*:*", + "resourceDefinitions": [ + {"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}} + ], + }, + {"permission": "app:*:read", "resourceDefinitions": []}, + ] + response = self.create_role(role_name, in_access_data=access_data) + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + role_uuid = response.data.get("uuid") + test_data = response.data + test_data["access"] = new_access_data + url = reverse("role-detail", kwargs={"uuid": role_uuid}) + client = APIClient() + current_relations = relation_api_tuples_for_v1_role(role_uuid, str(self.root_workspace.uuid)) + + response = client.put(url, test_data, format="json", **self.headers) + replication_event = replication_event_for_v1_role(response.data.get("uuid"), str(self.root_workspace.uuid)) + replication_event["relations_to_remove"] = current_relations + mock_method.assert_called_with(replication_event) + self.assertEqual(response.status_code, status.HTTP_200_OK) + def test_update_role_invalid_resource_defs_structure(self): """Test that updating a role with an invalid resource definitions returns an error.""" # Set up @@ -1281,7 +1402,9 @@ def test_update_role_invalid_resource_defs_structure(self): access_data = [ { "permission": "cost-management:*:*", - "resourceDefinitions": [{"attributeFilter": {"key": "keyA", "operation": "equal", "value": "valueA"}}], + "resourceDefinitions": [ + {"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}} + ], } ] response = self.create_role(role_name, in_access_data=access_data) @@ -1289,7 +1412,7 @@ def test_update_role_invalid_resource_defs_structure(self): role_uuid = response.data.get("uuid") test_data = response.data test_data.get("access")[0]["resourceDefinitions"] = { - "attributeFilter": {"key": "keyA", "operation": "equal", "value": "valueA"} + "attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"} } # Test update failure @@ -1305,7 +1428,9 @@ def test_update_role_appfilter_operation_fail(self): access_data = [ { "permission": "cost-management:*:*", - "resourceDefinitions": [{"attributeFilter": {"key": "keyA", "operation": "equal", "value": "valueA"}}], + "resourceDefinitions": [ + {"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}} + ], } ] response = self.create_role(role_name, in_access_data=access_data) @@ -1331,7 +1456,9 @@ def test_update_role_permission_does_not_exist_fail(self): access_data = [ { "permission": "cost-management:*:*", - "resourceDefinitions": [{"attributeFilter": {"key": "keyA", "operation": "equal", "value": "valueA"}}], + "resourceDefinitions": [ + {"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}} + ], } ] response = self.create_role(role_name, in_access_data=access_data) @@ -1348,6 +1475,31 @@ def test_update_role_permission_does_not_exist_fail(self): self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual(response.data.get("errors")[0].get("detail"), f"Permission does not exist: {permission}") + @patch("management.role.relation_api_dual_write_handler.RelationApiDualWriteHandler.save_replication_event") + def test_delete_role(self, mock_method): + """Test that we can delete an existing role.""" + role_name = "roleA" + access_data = [ + { + "permission": "app:*:*", + "resourceDefinitions": [ + {"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}} + ], + }, + {"permission": "app:*:read", "resourceDefinitions": []}, + ] + response = self.create_role(role_name, in_access_data=access_data) + + role_uuid = response.data.get("uuid") + url = reverse("role-detail", kwargs={"uuid": role_uuid}) + client = APIClient() + replication_event = {"relations_to_add": [], "relations_to_remove": []} + current_relations = relation_api_tuples_for_v1_role(role_uuid, str(self.root_workspace.uuid)) + replication_event["relations_to_remove"] = current_relations + response = client.delete(url, **self.headers) + mock_method.assert_called_with(replication_event) + self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) + @patch("core.kafka.RBACProducer.send_kafka_message") def test_delete_role_success(self, send_kafka_message): """Test that we can delete an existing role.""" @@ -1420,13 +1572,16 @@ def test_delete_system_role(self): def test_update_admin_default_role(self): """Test that admin default roles are protected from deletion""" + url = reverse("role-detail", kwargs={"uuid": self.adminRole.uuid}) client = APIClient() access_data = [ { "admin_default": True, "permission": "app:*:*", - "resourceDefinitions": [{"attributeFilter": {"key": "key1", "operation": "equal", "value": "value1"}}], + "resourceDefinitions": [ + {"attributeFilter": {"key": "key1.id", "operation": "equal", "value": "value1"}} + ], }, {"permission": "app:*:read", "resourceDefinitions": []}, ] From 0cf70b6f2ea2db26cb9ddca360a89a795ae4637e Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Wed, 28 Aug 2024 11:54:54 +0200 Subject: [PATCH 02/55] Remove bindings and its dependencies --- ...ping_permissions_bindingmapping_v2_role.py | 28 ----------- ...049_remove_rolemapping_v1_role_and_more.py | 34 +++++++++++++ rbac/management/models.py | 4 +- rbac/management/role/model.py | 24 --------- .../role/relation_api_dual_write_handler.py | 6 +-- rbac/migration_tool/migrate.py | 22 +++++---- ...sharedSystemRolesReplicatedRoleBindings.py | 49 ++++++++++--------- tests/management/role/test_view.py | 8 +-- tests/migration_tool/tests_migrate.py | 4 +- 9 files changed, 81 insertions(+), 98 deletions(-) delete mode 100644 rbac/management/migrations/0048_bindingmapping_permissions_bindingmapping_v2_role.py create mode 100644 rbac/management/migrations/0049_remove_rolemapping_v1_role_and_more.py diff --git a/rbac/management/migrations/0048_bindingmapping_permissions_bindingmapping_v2_role.py b/rbac/management/migrations/0048_bindingmapping_permissions_bindingmapping_v2_role.py deleted file mode 100644 index 1e0b8d377..000000000 --- a/rbac/management/migrations/0048_bindingmapping_permissions_bindingmapping_v2_role.py +++ /dev/null @@ -1,28 +0,0 @@ -# Generated by Django 4.2.15 on 2024-08-21 15:26 - -import django.contrib.postgres.fields -from django.db import migrations, models -import django.db.models.deletion - - -class Migration(migrations.Migration): - - dependencies = [ - ("management", "0047_rolemapping_rolev2_rolemapping_role_v2_rolebinding"), - ] - - operations = [ - migrations.AddField( - model_name="bindingmapping", - name="permissions", - field=django.contrib.postgres.fields.ArrayField( - base_field=models.CharField(max_length=200), blank=True, null=True, size=None - ), - ), - migrations.AddField( - model_name="bindingmapping", - name="v2_role", - field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to="management.v2role"), - preserve_default=False, - ), - ] diff --git a/rbac/management/migrations/0049_remove_rolemapping_v1_role_and_more.py b/rbac/management/migrations/0049_remove_rolemapping_v1_role_and_more.py new file mode 100644 index 000000000..347224bb2 --- /dev/null +++ b/rbac/management/migrations/0049_remove_rolemapping_v1_role_and_more.py @@ -0,0 +1,34 @@ +# Generated by Django 4.2.15 on 2024-08-28 09:54 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('management', '0048_outbox'), + ] + + operations = [ + migrations.RemoveField( + model_name='rolemapping', + name='v1_role', + ), + migrations.RemoveField( + model_name='rolemapping', + name='v2_role', + ), + migrations.RemoveField( + model_name='v2role', + name='v1_roles', + ), + migrations.DeleteModel( + name='BindingMapping', + ), + migrations.DeleteModel( + name='RoleMapping', + ), + migrations.DeleteModel( + name='V2Role', + ), + ] diff --git a/rbac/management/models.py b/rbac/management/models.py index f620cc837..3ee03014c 100644 --- a/rbac/management/models.py +++ b/rbac/management/models.py @@ -26,9 +26,7 @@ ExtTenant, ResourceDefinition, Role, - V2Role, - RoleMapping, - BindingMapping, + #BindingMapping, ) from management.policy.model import Policy from management.audit_log.model import AuditLog diff --git a/rbac/management/role/model.py b/rbac/management/role/model.py index 62a97abdc..77794eea0 100644 --- a/rbac/management/role/model.py +++ b/rbac/management/role/model.py @@ -119,30 +119,6 @@ class Meta: ] -class V2Role(models.Model): - """V2 role definition.""" - - id = models.UUIDField(default=uuid4, primary_key=True) - is_system = models.BooleanField(default=False) - v1_roles = models.ManyToManyField(Role, through="RoleMapping") - - -class RoleMapping(models.Model): - """V2 role mapping definition.""" - - v1_role = models.ForeignKey(Role, on_delete=models.CASCADE) - v2_role = models.ForeignKey(V2Role, on_delete=models.CASCADE) - - -class BindingMapping(models.Model): - """V2 role binding definition.""" - - id = models.UUIDField(default=uuid4, primary_key=True) - v1_role = models.ForeignKey(Role, on_delete=models.CASCADE) - v2_role = models.ForeignKey(V2Role, on_delete=models.CASCADE) - permissions = ArrayField(models.CharField(max_length=200), blank=True, null=True) - - def role_related_obj_change_cache_handler(sender=None, instance=None, using=None, **kwargs): """Signal handler for invalidating Principal cache on Role object change.""" logger.info( diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py index ef20d2d42..f02b7e961 100644 --- a/rbac/management/role/relation_api_dual_write_handler.py +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -18,7 +18,7 @@ """Class to handle Dual Write API related operations.""" import logging -from management.models import BindingMapping, V2Role, Workspace +from management.models import Workspace from migration_tool.migrate import migrate_role from migration_tool.utils import relationship_to_json @@ -98,9 +98,9 @@ def delete_mappings(self): return try: logger.info("[Dual Write] Delete mappings for role(%s): '%s'", self.role.uuid, self.role.name) - v2_roles_ids = BindingMapping.objects.filter(v1_role=self.role.id).values("v2_role_id") + #v2_roles_ids = BindingMapping.objects.filter(v1_role=self.role.id).values("v2_role_id") # this deletes also records in BindingMapping table and role binding - V2Role.objects.filter(id__in=v2_roles_ids).delete() + # V2Role.objects.filter(id__in=v2_roles_ids).delete() except Exception as e: raise DualWriteException(e) diff --git a/rbac/migration_tool/migrate.py b/rbac/migration_tool/migrate.py index c1934c258..054e55f10 100644 --- a/rbac/migration_tool/migrate.py +++ b/rbac/migration_tool/migrate.py @@ -21,6 +21,7 @@ from django.conf import settings from management.role.model import BindingMapping, Role, V2Role +from management.workspace.model import Workspace from migration_tool.models import V1group, V2rolebinding from migration_tool.sharedSystemRolesReplicatedRoleBindings import v1_role_to_v2_mapping from migration_tool.utils import create_relationship, output_relationships @@ -41,16 +42,17 @@ def spicedb_relationships( relationships.append( create_relationship("role_binding", v2_role_binding.id, "role", v2_role_binding.role.id, "granted") ) - if create_binding_to_db: - v2_role_data = v2_role_binding.role - v2_role, _ = V2Role.objects.get_or_create(id=v2_role_data.id, is_system=v2_role_data.is_system) - v2_role.v1_roles.add(v1_role) - BindingMapping.objects.create( - id=v2_role_binding.id, - v1_role=v1_role, - v2_role=v2_role, - permissions=list(v2_role_binding.role.permissions), - ) + + #if create_binding_to_db: + # v2_role_data = v2_role_binding.role + # v2_role, _ = V2Role.objects.get_or_create(id=v2_role_data.id, is_system=v2_role_data.is_system) + # v2_role.v1_roles.add(v1_role) + # BindingMapping.objects.create( + # id=v2_role_binding.id, + # v1_role=v1_role, + # v2_role=v2_role, + # permissions=list(v2_role_binding.role.permissions), + # ) for perm in v2_role_binding.role.permissions: relationships.append(create_relationship("role", v2_role_binding.role.id, "user", "*", perm)) diff --git a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py index 41d2b1894..8fc9d365b 100644 --- a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py +++ b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py @@ -20,7 +20,7 @@ import uuid from typing import Callable, FrozenSet, Type -from management.models import BindingMapping +#from management.models import BindingMapping from management.role.model import Role from management.workspace.model import Workspace from migration_tool.ingest import add_element @@ -141,27 +141,28 @@ def v1_role_to_v2_mapping( for resource in resources: if v2_groups: for v2_group in v2_groups: - if use_binding_from_db: - binding_mapping = BindingMapping.objects.filter( - v1_role=v1_role_db_id, v2_role_id=role.id - ).first() - if binding_mapping is None: - raise Exception("V2 role bindings not found in db") - role_binding_id = str(binding_mapping.id) - else: - role_binding_id = str(uuid.uuid4()) + #if use_binding_from_db: + # binding_mapping = BindingMapping.objects.filter( + # v1_role=v1_role_db_id, v2_role_id=role.id + # ).first() + # if binding_mapping is None: + # raise Exception("V2 role bindings not found in db") + # role_binding_id = str(binding_mapping.id) + #else: + # + role_binding_id = str(uuid.uuid4()) v2_role_binding = V2rolebinding( role_binding_id, v1_role, role, frozenset({resource}), frozenset({v2_group}) ) v2_role_bindings.append(v2_role_binding) else: - if use_binding_from_db: - binding_mapping = BindingMapping.objects.filter(v1_role=v1_role_db_id, v2_role_id=role.id).first() - if binding_mapping is None: - raise Exception("V2 role bindings not found in db") - role_binding_id = str(binding_mapping.id) - else: - role_binding_id = str(uuid.uuid4()) + #if use_binding_from_db: + # binding_mapping = BindingMapping.objects.filter(v1_role=v1_role_db_id, v2_role_id=role.id).first() + # if binding_mapping is None: + # raise Exception("V2 role bindings not found in db") + # role_binding_id = str(binding_mapping.id) + #else: + role_binding_id = str(uuid.uuid4()) v2_role_binding = V2rolebinding(role_binding_id, v1_role, role, frozenset({resource}), v2_groups) v2_role_bindings.append(v2_role_binding) return frozenset(v2_role_bindings) @@ -225,13 +226,13 @@ def extract_system_roles(perm_groupings, v1_role, db_role_id, use_mapping_from_d else: candidate_system_roles[candidate] = {v1_role.id} # Add a custom role - if use_mapping_from_db: - binding_mapping = BindingMapping.objects.filter( - v1_role=db_role_id, permissions__contains=permissions - ).first() - v2_uuid = str(binding_mapping.v2_role_id) - else: - v2_uuid = uuid.uuid4() + #if use_mapping_from_db: + #binding_mapping = BindingMapping.objects.filter( + # v1_role=db_role_id, permissions__contains=permissions + #).first() + #v2_uuid = str(binding_mapping.v2_role_id) + #else: + v2_uuid = uuid.uuid4() add_element(resource_roles, V2role(str(v2_uuid), False, frozenset(permissions)), resource) global custom_roles_created diff --git a/tests/management/role/test_view.py b/tests/management/role/test_view.py index fe94830cc..fd5474059 100644 --- a/tests/management/role/test_view.py +++ b/tests/management/role/test_view.py @@ -37,7 +37,7 @@ ExtTenant, RoleMapping, Workspace, - BindingMapping, + #BindingMapping, ) from tests.core.test_kafka import copy_call_args @@ -58,8 +58,8 @@ def replication_event_for_v1_role(v1_role_uuid, root_workspace_uuid): def relation_api_tuples_for_v1_role(v1_role_uuid, root_workspace_uuid): """Create a relation API tuple for a v1 role.""" role_id = Role.objects.get(uuid=v1_role_uuid).id - role_binding = BindingMapping.objects.filter(v1_role=role_id) - + #role_binding = BindingMapping.objects.filter(v1_role=role_id) + role_binding = [] relations = [] for binding in role_binding: relation_tuple = relation_api_tuple( @@ -347,7 +347,7 @@ def test_create_role_with_display_success(self, mock_method): role_id = Role.objects.get(uuid=response.data.get("uuid")).id role_mapping = RoleMapping.objects.filter(v1_role=role_id) - role_binding = BindingMapping.objects.filter(v1_role=role_id) + role_binding = [] #BindingMapping.objects.filter(v1_role=role_id) self.assertEqual(len(role_binding), 2) self.assertEqual(len(role_mapping), 2) diff --git a/tests/migration_tool/tests_migrate.py b/tests/migration_tool/tests_migrate.py index fae745078..74c598960 100644 --- a/tests/migration_tool/tests_migrate.py +++ b/tests/migration_tool/tests_migrate.py @@ -93,8 +93,8 @@ def test_migration_of_data(self, logger_mock): """Test that we get the correct access for a principal.""" kwargs = {"exclude_apps": ["app1"], "orgs": ["1234567"]} migrate_data(**kwargs) - self.assertEqual(V2Role.objects.count(), 3) - self.assertEqual(BindingMapping.objects.count(), 3) + #self.assertEqual(V2Role.objects.count(), 3) + #self.assertEqual(BindingMapping.objects.count(), 3) org_id = self.tenant.org_id root_workspace_id = f"root-workspace-{self.tenant.org_id}" From 7ede630da3e744957aebf66b167536a707dbb93a Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Wed, 28 Aug 2024 12:32:31 +0200 Subject: [PATCH 03/55] Add BindingMapping model with JSON field to capture mappings --- .../migrations/0050_bindingmapping.py | 22 +++++++++++++++++++ rbac/management/models.py | 2 +- rbac/management/role/model.py | 10 +++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 rbac/management/migrations/0050_bindingmapping.py diff --git a/rbac/management/migrations/0050_bindingmapping.py b/rbac/management/migrations/0050_bindingmapping.py new file mode 100644 index 000000000..40b0bcb2d --- /dev/null +++ b/rbac/management/migrations/0050_bindingmapping.py @@ -0,0 +1,22 @@ +# Generated by Django 4.2.15 on 2024-08-28 10:27 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('management', '0049_remove_rolemapping_v1_role_and_more'), + ] + + operations = [ + migrations.CreateModel( + name='BindingMapping', + fields=[ + ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('mappings', models.JSONField(default=dict)), + ('role', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='binding_mapping', to='management.role')), + ], + ), + ] diff --git a/rbac/management/models.py b/rbac/management/models.py index 3ee03014c..d1f579474 100644 --- a/rbac/management/models.py +++ b/rbac/management/models.py @@ -26,7 +26,7 @@ ExtTenant, ResourceDefinition, Role, - #BindingMapping, + BindingMapping, ) from management.policy.model import Policy from management.audit_log.model import AuditLog diff --git a/rbac/management/role/model.py b/rbac/management/role/model.py index 77794eea0..ccc223193 100644 --- a/rbac/management/role/model.py +++ b/rbac/management/role/model.py @@ -118,6 +118,16 @@ class Meta: models.UniqueConstraint(fields=["ext_tenant", "ext_id"], name="unique external id per external tenant") ] +class BindingMapping(models.Model): + """V2 role binding definition.""" + mappings = models.JSONField(default=dict) + # One-to-one relationship with Role + role = models.OneToOneField( + Role, + on_delete=models.CASCADE, + related_name='binding_mapping' + ) + def role_related_obj_change_cache_handler(sender=None, instance=None, using=None, **kwargs): """Signal handler for invalidating Principal cache on Role object change.""" From f188deb4b76b30d3b7ca49a9b5019b4ad5b02577 Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Wed, 28 Aug 2024 12:33:05 +0200 Subject: [PATCH 04/55] Create v2 mappings during migration --- rbac/migration_tool/migrate.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/rbac/migration_tool/migrate.py b/rbac/migration_tool/migrate.py index 054e55f10..e24e769cd 100644 --- a/rbac/migration_tool/migrate.py +++ b/rbac/migration_tool/migrate.py @@ -43,16 +43,19 @@ def spicedb_relationships( create_relationship("role_binding", v2_role_binding.id, "role", v2_role_binding.role.id, "granted") ) - #if create_binding_to_db: - # v2_role_data = v2_role_binding.role - # v2_role, _ = V2Role.objects.get_or_create(id=v2_role_data.id, is_system=v2_role_data.is_system) - # v2_role.v1_roles.add(v1_role) - # BindingMapping.objects.create( - # id=v2_role_binding.id, - # v1_role=v1_role, - # v2_role=v2_role, - # permissions=list(v2_role_binding.role.permissions), - # ) + if create_binding_to_db: + v2_role_data = v2_role_binding.role + + binding_mapping, _ = BindingMapping.objects.get_or_create(role=v1_role) + if not binding_mapping.mappings: + binding_mapping.mappings = {} + + binding_mapping.mappings[v2_role_binding.id] = { + "v2_role_uuid": str(v2_role_data.id), + "permissions": list(v2_role_binding.role.permissions), + } + + binding_mapping.save() for perm in v2_role_binding.role.permissions: relationships.append(create_relationship("role", v2_role_binding.role.id, "user", "*", perm)) From cb171496981167573034d8a4189071693cdee785 Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Wed, 28 Aug 2024 14:30:17 +0200 Subject: [PATCH 05/55] Lock mapping records during updaets in role actions --- rbac/management/role/model.py | 7 +- rbac/migration_tool/migrate.py | 2 +- ...sharedSystemRolesReplicatedRoleBindings.py | 65 ++++++++++++------- tests/management/role/test_view.py | 58 ++++++++++------- tests/migration_tool/tests_migrate.py | 55 +++++++++------- 5 files changed, 113 insertions(+), 74 deletions(-) diff --git a/rbac/management/role/model.py b/rbac/management/role/model.py index ccc223193..dbe6ffc4a 100644 --- a/rbac/management/role/model.py +++ b/rbac/management/role/model.py @@ -20,7 +20,6 @@ from uuid import uuid4 from django.conf import settings -from django.contrib.postgres.fields import ArrayField from django.db import models from django.db.models import signals from django.utils import timezone @@ -118,14 +117,16 @@ class Meta: models.UniqueConstraint(fields=["ext_tenant", "ext_id"], name="unique external id per external tenant") ] + class BindingMapping(models.Model): - """V2 role binding definition.""" + """V2 binding Mapping definition.""" + mappings = models.JSONField(default=dict) # One-to-one relationship with Role role = models.OneToOneField( Role, on_delete=models.CASCADE, - related_name='binding_mapping' + related_name="binding_mapping" ) diff --git a/rbac/migration_tool/migrate.py b/rbac/migration_tool/migrate.py index e24e769cd..3fbb18086 100644 --- a/rbac/migration_tool/migrate.py +++ b/rbac/migration_tool/migrate.py @@ -46,7 +46,7 @@ def spicedb_relationships( if create_binding_to_db: v2_role_data = v2_role_binding.role - binding_mapping, _ = BindingMapping.objects.get_or_create(role=v1_role) + binding_mapping, _ = BindingMapping.objects.select_for_update().get_or_create(role=v1_role) if not binding_mapping.mappings: binding_mapping.mappings = {} diff --git a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py index 8fc9d365b..6d2a0fb88 100644 --- a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py +++ b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py @@ -20,7 +20,7 @@ import uuid from typing import Callable, FrozenSet, Type -#from management.models import BindingMapping +from management.models import BindingMapping from management.role.model import Role from management.workspace.model import Workspace from migration_tool.ingest import add_element @@ -141,28 +141,36 @@ def v1_role_to_v2_mapping( for resource in resources: if v2_groups: for v2_group in v2_groups: - #if use_binding_from_db: - # binding_mapping = BindingMapping.objects.filter( - # v1_role=v1_role_db_id, v2_role_id=role.id - # ).first() - # if binding_mapping is None: - # raise Exception("V2 role bindings not found in db") - # role_binding_id = str(binding_mapping.id) - #else: - # - role_binding_id = str(uuid.uuid4()) + if use_binding_from_db: + binding_mapping = BindingMapping.objects.filter(role_id=v1_role_db_id).first() + if binding_mapping is None: + raise Exception("V2 role bindings not found in db") + + role_binding_id = None + for role_binding_uuid, data in binding_mapping.mappings.items(): + if data["v2_role_uuid"] == role.id: + role_binding_id = str(role_binding_uuid) + + if role_binding_id is None: + raise Exception("role_binding_id not found in mappings") + else: + role_binding_id = str(uuid.uuid4()) v2_role_binding = V2rolebinding( role_binding_id, v1_role, role, frozenset({resource}), frozenset({v2_group}) ) v2_role_bindings.append(v2_role_binding) else: - #if use_binding_from_db: - # binding_mapping = BindingMapping.objects.filter(v1_role=v1_role_db_id, v2_role_id=role.id).first() - # if binding_mapping is None: - # raise Exception("V2 role bindings not found in db") - # role_binding_id = str(binding_mapping.id) - #else: - role_binding_id = str(uuid.uuid4()) + if use_binding_from_db: + binding_mapping = BindingMapping.objects.filter(role_id=v1_role_db_id).first() + role_binding_id = None + for role_binding_uuid, data in binding_mapping.mappings.items(): + if data["v2_role_uuid"] == role.id: + role_binding_id = str(role_binding_uuid) + + if role_binding_id is None: + raise Exception("role_binding_id not found in mappings") + else: + role_binding_id = str(uuid.uuid4()) v2_role_binding = V2rolebinding(role_binding_id, v1_role, role, frozenset({resource}), v2_groups) v2_role_bindings.append(v2_role_binding) return frozenset(v2_role_bindings) @@ -226,13 +234,20 @@ def extract_system_roles(perm_groupings, v1_role, db_role_id, use_mapping_from_d else: candidate_system_roles[candidate] = {v1_role.id} # Add a custom role - #if use_mapping_from_db: - #binding_mapping = BindingMapping.objects.filter( - # v1_role=db_role_id, permissions__contains=permissions - #).first() - #v2_uuid = str(binding_mapping.v2_role_id) - #else: - v2_uuid = uuid.uuid4() + if use_mapping_from_db: + binding_mapping = BindingMapping.objects.filter(role_id=db_role_id).first() + if binding_mapping is None: + raise Exception("V2 role bindings not found in db") + + v2_uuid = None + for v1_role_uuid, data in binding_mapping.mappings.items(): + if set(data["permissions"]) == set(permissions): + v2_uuid = data["v2_role_uuid"] + + if v2_uuid is None: + raise Exception("v2_uuid not found in mappings") + else: + v2_uuid = uuid.uuid4() add_element(resource_roles, V2role(str(v2_uuid), False, frozenset(permissions)), resource) global custom_roles_created diff --git a/tests/management/role/test_view.py b/tests/management/role/test_view.py index fd5474059..36d49506d 100644 --- a/tests/management/role/test_view.py +++ b/tests/management/role/test_view.py @@ -16,9 +16,11 @@ # """Test the role viewset.""" +import json from uuid import uuid4 from django.conf import settings +from django.core.serializers.json import DjangoJSONEncoder from django.test.utils import override_settings from django.urls import reverse, resolve from rest_framework import status @@ -35,9 +37,8 @@ ResourceDefinition, ExtRoleRelation, ExtTenant, - RoleMapping, Workspace, - #BindingMapping, + BindingMapping, ) from tests.core.test_kafka import copy_call_args @@ -47,6 +48,17 @@ URL = reverse("role-list") +def normalize_and_sort(json_obj): + for key, value in json_obj.items(): + if isinstance(value, list): + sorted_list = sorted( + [json.dumps(item, sort_keys=True, cls=DjangoJSONEncoder) for item in value] + ) + + json_obj[key] = [json.loads(item) for item in sorted_list] + return json_obj + + def replication_event_for_v1_role(v1_role_uuid, root_workspace_uuid): """Create a replication event for a v1 role.""" return { @@ -58,28 +70,27 @@ def replication_event_for_v1_role(v1_role_uuid, root_workspace_uuid): def relation_api_tuples_for_v1_role(v1_role_uuid, root_workspace_uuid): """Create a relation API tuple for a v1 role.""" role_id = Role.objects.get(uuid=v1_role_uuid).id - #role_binding = BindingMapping.objects.filter(v1_role=role_id) - role_binding = [] + role_binding = BindingMapping.objects.filter(role=role_id).first() relations = [] - for binding in role_binding: + for role_binding_uuid, data in role_binding.mappings.items(): relation_tuple = relation_api_tuple( - "role_binding", str(binding.id), "granted", "role", str(binding.v2_role.id) + "role_binding", str(role_binding_uuid), "granted", "role", str(data["v2_role_uuid"]) ) relations.append(relation_tuple) - for permission in binding.permissions: - relation_tuple = relation_api_tuple("role", str(binding.v2_role.id), permission, "user", "*") + for permission in data["permissions"]: + relation_tuple = relation_api_tuple("role", str(data["v2_role_uuid"]), permission, "user", "*") relations.append(relation_tuple) - if "app_all_read" in binding.permissions: + if "app_all_read" in data["permissions"]: relation_tuple = relation_api_tuple( - "workspace", root_workspace_uuid, "user_grant", "role_binding", str(binding.id) + "workspace", root_workspace_uuid, "user_grant", "role_binding", str(role_binding_uuid) ) relations.append(relation_tuple) else: relation_tuple = relation_api_tuple("keya/id", "valueA", "workspace", "workspace", root_workspace_uuid) relations.append(relation_tuple) - relation_tuple = relation_api_tuple("keya/id", "valueA", "user_grant", "role_binding", str(binding.id)) + relation_tuple = relation_api_tuple("keya/id", "valueA", "user_grant", "role_binding", str(role_binding_uuid)) relations.append(relation_tuple) return relations @@ -198,7 +209,6 @@ def tearDown(self): Access.objects.all().delete() ExtTenant.objects.all().delete() ExtRoleRelation.objects.all().delete() - RoleMapping.objects.all().delete() Workspace.objects.all().delete() # we need to delete old test_tenant's that may exist in cache test_tenant_org_id = "100001" @@ -345,17 +355,14 @@ def test_create_role_with_display_success(self, mock_method): response = self.create_role(role_name, role_display=role_display, in_access_data=access_data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) - role_id = Role.objects.get(uuid=response.data.get("uuid")).id - role_mapping = RoleMapping.objects.filter(v1_role=role_id) - role_binding = [] #BindingMapping.objects.filter(v1_role=role_id) - - self.assertEqual(len(role_binding), 2) - self.assertEqual(len(role_mapping), 2) - replication_event = replication_event_for_v1_role(response.data.get("uuid"), str(self.root_workspace.uuid)) mock_method.assert_called_once() - mock_method.assert_called_with(replication_event) + actual_call_arg = mock_method.call_args[0][0] + expected_sorted = normalize_and_sort(replication_event) + actual_sorted = normalize_and_sort(actual_call_arg) + self.assertEqual(set(expected_sorted), set(actual_sorted)) + # test that we can retrieve the role url = reverse("role-detail", kwargs={"uuid": response.data.get("uuid")}) client = APIClient() @@ -1392,7 +1399,11 @@ def test_update_role(self, mock_method): response = client.put(url, test_data, format="json", **self.headers) replication_event = replication_event_for_v1_role(response.data.get("uuid"), str(self.root_workspace.uuid)) replication_event["relations_to_remove"] = current_relations - mock_method.assert_called_with(replication_event) + actual_call_arg = mock_method.call_args[0][0] + expected_sorted = normalize_and_sort(replication_event) + actual_sorted = normalize_and_sort(actual_call_arg) + self.assertEqual(set(expected_sorted), set(actual_sorted)) + self.assertEqual(response.status_code, status.HTTP_200_OK) def test_update_role_invalid_resource_defs_structure(self): @@ -1497,7 +1508,10 @@ def test_delete_role(self, mock_method): current_relations = relation_api_tuples_for_v1_role(role_uuid, str(self.root_workspace.uuid)) replication_event["relations_to_remove"] = current_relations response = client.delete(url, **self.headers) - mock_method.assert_called_with(replication_event) + actual_call_arg = mock_method.call_args[0][0] + expected_sorted = normalize_and_sort(replication_event) + actual_sorted = normalize_and_sort(actual_call_arg) + self.assertEqual(set(expected_sorted), set(actual_sorted)) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) @patch("core.kafka.RBACProducer.send_kafka_message") diff --git a/tests/migration_tool/tests_migrate.py b/tests/migration_tool/tests_migrate.py index 74c598960..e6bd953ce 100644 --- a/tests/migration_tool/tests_migrate.py +++ b/tests/migration_tool/tests_migrate.py @@ -93,29 +93,38 @@ def test_migration_of_data(self, logger_mock): """Test that we get the correct access for a principal.""" kwargs = {"exclude_apps": ["app1"], "orgs": ["1234567"]} migrate_data(**kwargs) - #self.assertEqual(V2Role.objects.count(), 3) - #self.assertEqual(BindingMapping.objects.count(), 3) org_id = self.tenant.org_id root_workspace_id = f"root-workspace-{self.tenant.org_id}" - v2_role_a2 = self.role_a2.v2role_set.first() - rolebinding_a2 = self.role_a2.bindingmapping_set.first() - v2_role_a31 = self.role_a3.v2role_set.first() - v2_role_a32 = self.role_a3.v2role_set.last() - rolebinding_a31 = self.role_a3.bindingmapping_set.first() - rolebinding_a32 = self.role_a3.bindingmapping_set.last() + role_binding = BindingMapping.objects.filter(role=self.role_a2).first() + + mappings_a2 = role_binding.mappings + first_key = list(mappings_a2.keys())[0] + + v2_role_a2 = mappings_a2[first_key]["v2_role_uuid"] # self.role_a2.v2role_set.first() + rolebinding_a2 = first_key + + role_binding_a3 = BindingMapping.objects.filter(role=self.role_a3).first() + mappings_a3 = role_binding_a3.mappings + first_key = list(mappings_a3.keys())[0] + v2_role_a31_value = mappings_a3[first_key]["v2_role_uuid"] + v2_role_a31 = v2_role_a31_value + + last_key = list(mappings_a3.keys())[-1] + v2_role_a32 = mappings_a3[last_key]["v2_role_uuid"] + + rolebinding_a31 = first_key + rolebinding_a32 = last_key + workspace_1 = "123456" workspace_2 = "654321" # Switch these two if rolebinding order is not the same as v2 roles - if ( - call(f"role_binding:{rolebinding_a31.id}#granted@role:{v2_role_a31.id}") - not in logger_mock.info.call_args_list - ): + if call(f"role_binding:{rolebinding_a31}#granted@role:{v2_role_a31}") not in logger_mock.info.call_args_list: rolebinding_a31, rolebinding_a32 = rolebinding_a32, rolebinding_a31 # Switch these two if binding is not in correct order if ( - call(f"workspace:{self.aws_account_id_1}#user_grant@role_binding:{rolebinding_a31.id}") + call(f"workspace:{self.aws_account_id_1}#user_grant@role_binding:{rolebinding_a31}") not in logger_mock.info.call_args_list ): workspace_1, workspace_2 = workspace_2, workspace_1 @@ -135,19 +144,19 @@ def test_migration_of_data(self, logger_mock): call(f"group:{self.group_a2.uuid}#member@user:{self.principal1.uuid}"), call(f"group:{self.group_a2.uuid}#member@user:{self.principal2.uuid}"), ## Role binding to role_a2 - call(f"role_binding:{rolebinding_a2.id}#granted@role:{v2_role_a2.id}"), - call(f"role:{v2_role_a2.id}#inventory_hosts_write@user:*"), - call(f"role_binding:{rolebinding_a2.id}#subject@group:{self.group_a2.uuid}"), + call(f"role_binding:{rolebinding_a2}#granted@role:{v2_role_a2}"), + call(f"role:{v2_role_a2}#inventory_hosts_write@user:*"), + call(f"role_binding:{rolebinding_a2}#subject@group:{self.group_a2.uuid}"), call(f"workspace:{self.aws_account_id_1}#parent@workspace:{root_workspace_id}"), - call(f"workspace:{self.aws_account_id_1}#user_grant@role_binding:{rolebinding_a2.id}"), + call(f"workspace:{self.aws_account_id_1}#user_grant@role_binding:{rolebinding_a2}"), ## Role binding to role_a3 - call(f"role_binding:{rolebinding_a31.id}#granted@role:{v2_role_a31.id}"), - call(f"role:{v2_role_a31.id}#inventory_hosts_write@user:*"), + call(f"role_binding:{rolebinding_a31}#granted@role:{v2_role_a31}"), + call(f"role:{v2_role_a31}#inventory_hosts_write@user:*"), call(f"workspace:{workspace_1}#parent@workspace:{root_workspace_id}"), - call(f"workspace:{workspace_1}#user_grant@role_binding:{rolebinding_a31.id}"), - call(f"role_binding:{rolebinding_a32.id}#granted@role:{v2_role_a32.id}"), - call(f"role:{v2_role_a32.id}#inventory_hosts_write@user:*"), + call(f"workspace:{workspace_1}#user_grant@role_binding:{rolebinding_a31}"), + call(f"role_binding:{rolebinding_a32}#granted@role:{v2_role_a32}"), + call(f"role:{v2_role_a32}#inventory_hosts_write@user:*"), call(f"workspace:{workspace_2}#parent@workspace:{root_workspace_id}"), - call(f"workspace:{workspace_2}#user_grant@role_binding:{rolebinding_a32.id}"), + call(f"workspace:{workspace_2}#user_grant@role_binding:{rolebinding_a32}"), ] logger_mock.info.assert_has_calls(tuples, any_order=True) From dae2ad8863e7b19be86306d7e98d1f5747ef8ef0 Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Wed, 28 Aug 2024 14:42:21 +0200 Subject: [PATCH 06/55] Lint fixes in mappings migrations and dual write for role --- ...049_remove_rolemapping_v1_role_and_more.py | 20 +++++++++---------- .../migrations/0050_bindingmapping.py | 17 +++++++++++----- rbac/management/role/model.py | 6 +----- tests/management/role/test_view.py | 8 ++++---- tests/migration_tool/tests_migrate.py | 2 +- 5 files changed, 28 insertions(+), 25 deletions(-) diff --git a/rbac/management/migrations/0049_remove_rolemapping_v1_role_and_more.py b/rbac/management/migrations/0049_remove_rolemapping_v1_role_and_more.py index 347224bb2..b91ef39ba 100644 --- a/rbac/management/migrations/0049_remove_rolemapping_v1_role_and_more.py +++ b/rbac/management/migrations/0049_remove_rolemapping_v1_role_and_more.py @@ -6,29 +6,29 @@ class Migration(migrations.Migration): dependencies = [ - ('management', '0048_outbox'), + ("management", "0048_outbox"), ] operations = [ migrations.RemoveField( - model_name='rolemapping', - name='v1_role', + model_name="rolemapping", + name="v1_role", ), migrations.RemoveField( - model_name='rolemapping', - name='v2_role', + model_name="rolemapping", + name="v2_role", ), migrations.RemoveField( - model_name='v2role', - name='v1_roles', + model_name="v2role", + name="v1_roles", ), migrations.DeleteModel( - name='BindingMapping', + name="BindingMapping", ), migrations.DeleteModel( - name='RoleMapping', + name="RoleMapping", ), migrations.DeleteModel( - name='V2Role', + name="V2Role", ), ] diff --git a/rbac/management/migrations/0050_bindingmapping.py b/rbac/management/migrations/0050_bindingmapping.py index 40b0bcb2d..331bf1f6e 100644 --- a/rbac/management/migrations/0050_bindingmapping.py +++ b/rbac/management/migrations/0050_bindingmapping.py @@ -7,16 +7,23 @@ class Migration(migrations.Migration): dependencies = [ - ('management', '0049_remove_rolemapping_v1_role_and_more'), + ("management", "0049_remove_rolemapping_v1_role_and_more"), ] operations = [ migrations.CreateModel( - name='BindingMapping', + name="BindingMapping", fields=[ - ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), - ('mappings', models.JSONField(default=dict)), - ('role', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='binding_mapping', to='management.role')), + ("id", models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")), + ("mappings", models.JSONField(default=dict)), + ( + "role", + models.OneToOneField( + on_delete=django.db.models.deletion.CASCADE, + related_name="binding_mapping", + to="management.role", + ), + ), ], ), ] diff --git a/rbac/management/role/model.py b/rbac/management/role/model.py index dbe6ffc4a..e2f86619a 100644 --- a/rbac/management/role/model.py +++ b/rbac/management/role/model.py @@ -123,11 +123,7 @@ class BindingMapping(models.Model): mappings = models.JSONField(default=dict) # One-to-one relationship with Role - role = models.OneToOneField( - Role, - on_delete=models.CASCADE, - related_name="binding_mapping" - ) + role = models.OneToOneField(Role, on_delete=models.CASCADE, related_name="binding_mapping") def role_related_obj_change_cache_handler(sender=None, instance=None, using=None, **kwargs): diff --git a/tests/management/role/test_view.py b/tests/management/role/test_view.py index 36d49506d..c8141e518 100644 --- a/tests/management/role/test_view.py +++ b/tests/management/role/test_view.py @@ -51,9 +51,7 @@ def normalize_and_sort(json_obj): for key, value in json_obj.items(): if isinstance(value, list): - sorted_list = sorted( - [json.dumps(item, sort_keys=True, cls=DjangoJSONEncoder) for item in value] - ) + sorted_list = sorted([json.dumps(item, sort_keys=True, cls=DjangoJSONEncoder) for item in value]) json_obj[key] = [json.loads(item) for item in sorted_list] return json_obj @@ -90,7 +88,9 @@ def relation_api_tuples_for_v1_role(v1_role_uuid, root_workspace_uuid): relation_tuple = relation_api_tuple("keya/id", "valueA", "workspace", "workspace", root_workspace_uuid) relations.append(relation_tuple) - relation_tuple = relation_api_tuple("keya/id", "valueA", "user_grant", "role_binding", str(role_binding_uuid)) + relation_tuple = relation_api_tuple( + "keya/id", "valueA", "user_grant", "role_binding", str(role_binding_uuid) + ) relations.append(relation_tuple) return relations diff --git a/tests/migration_tool/tests_migrate.py b/tests/migration_tool/tests_migrate.py index e6bd953ce..8074741aa 100644 --- a/tests/migration_tool/tests_migrate.py +++ b/tests/migration_tool/tests_migrate.py @@ -102,7 +102,7 @@ def test_migration_of_data(self, logger_mock): mappings_a2 = role_binding.mappings first_key = list(mappings_a2.keys())[0] - v2_role_a2 = mappings_a2[first_key]["v2_role_uuid"] # self.role_a2.v2role_set.first() + v2_role_a2 = mappings_a2[first_key]["v2_role_uuid"] rolebinding_a2 = first_key role_binding_a3 = BindingMapping.objects.filter(role=self.role_a3).first() From 3bf54cf37404f9d2aa5853f35a483b066996a0cb Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Wed, 28 Aug 2024 14:43:11 +0200 Subject: [PATCH 07/55] Remove delete mappings for replication event --- .../role/relation_api_dual_write_handler.py | 12 ------------ rbac/management/role/view.py | 1 - 2 files changed, 13 deletions(-) diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py index f02b7e961..5f3e3053d 100644 --- a/rbac/management/role/relation_api_dual_write_handler.py +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -92,18 +92,6 @@ def get_current_role_relations(self): """Get current roles relations.""" return self.current_role_relations - def delete_mappings(self): - """Delete mappings for a role.""" - if not self.replication_enabled(): - return - try: - logger.info("[Dual Write] Delete mappings for role(%s): '%s'", self.role.uuid, self.role.name) - #v2_roles_ids = BindingMapping.objects.filter(v1_role=self.role.id).values("v2_role_id") - # this deletes also records in BindingMapping table and role binding - # V2Role.objects.filter(id__in=v2_roles_ids).delete() - except Exception as e: - raise DualWriteException(e) - def set_role(self, role): """Set a role.""" self.role = role diff --git a/rbac/management/role/view.py b/rbac/management/role/view.py index d80c651ef..1e9e11317 100644 --- a/rbac/management/role/view.py +++ b/rbac/management/role/view.py @@ -348,7 +348,6 @@ def destroy(self, request, *args, **kwargs): dual_write_handler = RelationApiDualWriteHandler(role) dual_write_handler.generate_relations_from_current_state_of_role() response = super().destroy(request=request, args=args, kwargs=kwargs) - dual_write_handler.delete_mappings() dual_write_handler.save_replication_event_to_outbox() except DualWriteException as e: return self.dual_write_exception_response(e) From ab3d6ca1deca115b46d88c7dfdb481ab538f8644 Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Wed, 28 Aug 2024 15:00:34 +0200 Subject: [PATCH 08/55] Store replication event into outbox table in relation_api_dual_write_handler --- .../role/relation_api_dual_write_handler.py | 12 ++++++++---- rbac/management/role/view.py | 6 +++--- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py index 5f3e3053d..2715ae06c 100644 --- a/rbac/management/role/relation_api_dual_write_handler.py +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -18,7 +18,7 @@ """Class to handle Dual Write API related operations.""" import logging -from management.models import Workspace +from management.models import Outbox, Workspace from migration_tool.migrate import migrate_role from migration_tool.utils import relationship_to_json @@ -36,7 +36,7 @@ class DualWriteException(Exception): class RelationApiDualWriteHandler: """Class to handle Dual Write API related operations.""" - def __init__(self, role): + def __init__(self, role, event_type): """Initialize RelationApiDualWriteHandler.""" if not self.replication_enabled(): return @@ -49,6 +49,7 @@ def __init__(self, role): self.root_workspace = Workspace.objects.get( name="root", description="Root workspace", tenant_id=self.tenant_id ) + self.event_type = event_type except Exception as e: raise DualWriteException(e) @@ -73,7 +74,6 @@ def regenerate_relations_and_mappings_for_role(self): """Delete and generated relations with mapping for a role.""" if not self.replication_enabled(): return [] - self.delete_mappings() return self.generate_relations_and_mappings_for_role() def generate_relations_and_mappings_for_role(self): @@ -141,4 +141,8 @@ def save_replication_event(self, replication_event): logger.info( "[Dual Write] Replication event: %s for role(%s): '%s'", replication_event, self.role.uuid, self.role.name ) - # TODO: serialize and store event in to outbox table + # https://debezium.io/documentation/reference/stable/transformations/outbox-event-router.html#basic-outbox-table + outbox_record = Outbox.objects.create( + aggregatetype="Role", aggregateid=self.role.uuid, event_type=self.event_type, payload=replication_event + ) + outbox_record.delete() diff --git a/rbac/management/role/view.py b/rbac/management/role/view.py index 1e9e11317..82351c083 100644 --- a/rbac/management/role/view.py +++ b/rbac/management/role/view.py @@ -225,7 +225,7 @@ def create(self, request, *args, **kwargs): auditlog.log_create(request, AuditLog.ROLE) role = get_object_or_404(Role, uuid=create_role.data["uuid"]) - dual_write_handler = RelationApiDualWriteHandler(role) + dual_write_handler = RelationApiDualWriteHandler(role, "CREATE") dual_write_handler.generate_relations_and_mappings_for_role() dual_write_handler.save_replication_event_to_outbox() except DualWriteException as e: @@ -345,7 +345,7 @@ def destroy(self, request, *args, **kwargs): try: with transaction.atomic(): self.delete_policies_if_no_role_attached(role) - dual_write_handler = RelationApiDualWriteHandler(role) + dual_write_handler = RelationApiDualWriteHandler(role, "DELETE") dual_write_handler.generate_relations_from_current_state_of_role() response = super().destroy(request=request, args=args, kwargs=kwargs) dual_write_handler.save_replication_event_to_outbox() @@ -453,7 +453,7 @@ def update_with_relation_api_replication(self, request, *args, **kwargs): """Update a role with replicating data into Relation API.""" try: role = self.get_object() - dual_write_handler = RelationApiDualWriteHandler(role) + dual_write_handler = RelationApiDualWriteHandler(role, "UPDATE") dual_write_handler.generate_relations_from_current_state_of_role() with transaction.atomic(): response = super().update(request=request, args=args, kwargs=kwargs) From f9474df30871de9544a81de1ca28a8372377cdd6 Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Wed, 28 Aug 2024 16:12:20 +0200 Subject: [PATCH 09/55] Remove set_role from relation_api_dual_write_handler --- rbac/management/role/relation_api_dual_write_handler.py | 6 +----- rbac/management/role/view.py | 2 +- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py index 2715ae06c..aa18b7dbb 100644 --- a/rbac/management/role/relation_api_dual_write_handler.py +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -92,10 +92,6 @@ def get_current_role_relations(self): """Get current roles relations.""" return self.current_role_relations - def set_role(self, role): - """Set a role.""" - self.role = role - def build_replication_event(self): """Build replication event.""" if not self.replication_enabled(): @@ -116,7 +112,7 @@ def generate_replication_event_to_outbox(self, role): """Generate replication event to outbox table.""" if not self.replication_enabled(): return - self.set_role(role) + self.role = role self.regenerate_relations_and_mappings_for_role() return self.save_replication_event_to_outbox() diff --git a/rbac/management/role/view.py b/rbac/management/role/view.py index 82351c083..21642e841 100644 --- a/rbac/management/role/view.py +++ b/rbac/management/role/view.py @@ -393,7 +393,7 @@ def update(self, request, *args, **kwargs): @apiParam (Path) {String} id Role unique identifier @apiParam (Request Body) {String} name Role name - @apiParam (Request Body) {ArRray} access Access definition + @apiParam (Request Body) {Array} access Access definition @apiParamExample {json} Request Body: { "name": "RoleA", From de674c69f1a907998365cadc351e151dec8f9c4f Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Wed, 28 Aug 2024 17:12:06 +0200 Subject: [PATCH 10/55] Replace whole json field in bindingMapinng for update/create --- .../role/relation_api_dual_write_handler.py | 6 ++-- rbac/migration_tool/migrate.py | 30 ++++++++++++++----- 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py index aa18b7dbb..29a553470 100644 --- a/rbac/management/role/relation_api_dual_write_handler.py +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -65,7 +65,9 @@ def generate_relations_from_current_state_of_role(self): logger.info( "[Dual Write] Generate relations from current state of role(%s): '%s'", self.role.uuid, self.role.name ) - relations = migrate_role(self.role, False, str(self.root_workspace.uuid), self.org_id, True, True, False) + relations = migrate_role( + self.role, False, str(self.root_workspace.uuid), self.org_id, True, True, True, False + ) self.current_role_relations = relations except Exception as e: raise DualWriteException(e) @@ -82,7 +84,7 @@ def generate_relations_and_mappings_for_role(self): return [] try: logger.info("[Dual Write] Generate new relations from role(%s): '%s'", self.role.uuid, self.role.name) - relations = migrate_role(self.role, False, str(self.root_workspace.uuid), self.org_id) + relations = migrate_role(self.role, False, str(self.root_workspace.uuid), self.org_id, True) self.role_relations = relations return relations except Exception as e: diff --git a/rbac/migration_tool/migrate.py b/rbac/migration_tool/migrate.py index 3fbb18086..be239052d 100644 --- a/rbac/migration_tool/migrate.py +++ b/rbac/migration_tool/migrate.py @@ -34,10 +34,16 @@ def spicedb_relationships( - v2_role_bindings: FrozenSet[V2rolebinding], root_workspace: str, v1_role, create_binding_to_db=True + v2_role_bindings: FrozenSet[V2rolebinding], + root_workspace: str, + v1_role, + in_transaction=False, + create_binding_to_db=True, ): """Generate a set of relationships for the given set of v2 role bindings.""" relationships = list() + binding_mappings = {} + for v2_role_binding in v2_role_bindings: relationships.append( create_relationship("role_binding", v2_role_binding.id, "role", v2_role_binding.role.id, "granted") @@ -46,17 +52,14 @@ def spicedb_relationships( if create_binding_to_db: v2_role_data = v2_role_binding.role - binding_mapping, _ = BindingMapping.objects.select_for_update().get_or_create(role=v1_role) - if not binding_mapping.mappings: - binding_mapping.mappings = {} + if binding_mappings.get(v2_role_binding.id) is None: + binding_mappings[v2_role_binding.id] = {} - binding_mapping.mappings[v2_role_binding.id] = { + binding_mappings[v2_role_binding.id] = { "v2_role_uuid": str(v2_role_data.id), "permissions": list(v2_role_binding.role.permissions), } - binding_mapping.save() - for perm in v2_role_binding.role.permissions: relationships.append(create_relationship("role", v2_role_binding.role.id, "user", "*", perm)) for group in v2_role_binding.groups: @@ -86,6 +89,14 @@ def spicedb_relationships( ) ) + if create_binding_to_db: + if in_transaction: + binding_mapping, _ = BindingMapping.objects.select_for_update().get_or_create(role_id=v1_role) + else: + binding_mapping, _ = BindingMapping.objects.get_or_create(role=v1_role) + binding_mapping.mappings = binding_mappings + binding_mapping.save() + return relationships @@ -94,6 +105,7 @@ def migrate_role( write_db: bool, root_workspace: str, default_workspace: str, + in_transaction=False, use_binding_from_db=False, use_mapping_from_db=False, create_binding_to_db=True, @@ -115,7 +127,9 @@ def migrate_role( v1_role, role.id, root_workspace, default_workspace, use_binding_from_db, use_mapping_from_db ) ] - relationships = spicedb_relationships(frozenset(v2_roles), root_workspace, role, create_binding_to_db) + relationships = spicedb_relationships( + frozenset(v2_roles), root_workspace, role, in_transaction, create_binding_to_db + ) output_relationships(relationships, write_db) return relationships From e9b9959502eb4dec1d963e4682d8d5b2722dabfe Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Thu, 29 Aug 2024 11:34:15 +0200 Subject: [PATCH 11/55] Extract methods to get data from mappings in BindingMapping model --- rbac/management/role/model.py | 22 ++++++++++++ rbac/migration_tool/migrate.py | 2 +- ...sharedSystemRolesReplicatedRoleBindings.py | 35 +++++-------------- 3 files changed, 32 insertions(+), 27 deletions(-) diff --git a/rbac/management/role/model.py b/rbac/management/role/model.py index e2f86619a..af4b363bf 100644 --- a/rbac/management/role/model.py +++ b/rbac/management/role/model.py @@ -125,6 +125,28 @@ class BindingMapping(models.Model): # One-to-one relationship with Role role = models.OneToOneField(Role, on_delete=models.CASCADE, related_name="binding_mapping") + def find_role_binding_by_v2_role(self, v2_role_id): + """Find role binding by v2 role id.""" + role_binding_id = None + for role_binding_uuid, data in self.mappings.items(): + if data["v2_role_uuid"] == v2_role_id: + role_binding_id = str(role_binding_uuid) + + if role_binding_id is None: + raise Exception(f"role_binding_id not found in mappings for v2 role {v2_role_id} ") + return role_binding_id + + def find_v2_role_by_permission(self, permissions): + """Find v2 role by permissions.""" + v2_uuid = None + for v1_role_uuid, data in self.mappings.items(): + if set(data["permissions"]) == set(permissions): + v2_uuid = data["v2_role_uuid"] + + if v2_uuid is None: + raise Exception(f"v2_uuid not found in mappings for v1 role {self.role.uuid}") + return v2_uuid + def role_related_obj_change_cache_handler(sender=None, instance=None, using=None, **kwargs): """Signal handler for invalidating Principal cache on Role object change.""" diff --git a/rbac/migration_tool/migrate.py b/rbac/migration_tool/migrate.py index be239052d..551b9f25c 100644 --- a/rbac/migration_tool/migrate.py +++ b/rbac/migration_tool/migrate.py @@ -91,7 +91,7 @@ def spicedb_relationships( if create_binding_to_db: if in_transaction: - binding_mapping, _ = BindingMapping.objects.select_for_update().get_or_create(role_id=v1_role) + binding_mapping, _ = BindingMapping.objects.select_for_update().get_or_create(role=v1_role) else: binding_mapping, _ = BindingMapping.objects.get_or_create(role=v1_role) binding_mapping.mappings = binding_mappings diff --git a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py index 6d2a0fb88..86401a849 100644 --- a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py +++ b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py @@ -142,17 +142,10 @@ def v1_role_to_v2_mapping( if v2_groups: for v2_group in v2_groups: if use_binding_from_db: - binding_mapping = BindingMapping.objects.filter(role_id=v1_role_db_id).first() + binding_mapping = BindingMapping.objects.get(role_id=v1_role_db_id) if binding_mapping is None: - raise Exception("V2 role bindings not found in db") - - role_binding_id = None - for role_binding_uuid, data in binding_mapping.mappings.items(): - if data["v2_role_uuid"] == role.id: - role_binding_id = str(role_binding_uuid) - - if role_binding_id is None: - raise Exception("role_binding_id not found in mappings") + raise Exception(f"binding_mapping not found in db for role {v1_role_db_id}") + role_binding_id = binding_mapping.find_role_binding_by_v2_role(role.id) else: role_binding_id = str(uuid.uuid4()) v2_role_binding = V2rolebinding( @@ -161,14 +154,10 @@ def v1_role_to_v2_mapping( v2_role_bindings.append(v2_role_binding) else: if use_binding_from_db: - binding_mapping = BindingMapping.objects.filter(role_id=v1_role_db_id).first() - role_binding_id = None - for role_binding_uuid, data in binding_mapping.mappings.items(): - if data["v2_role_uuid"] == role.id: - role_binding_id = str(role_binding_uuid) - - if role_binding_id is None: - raise Exception("role_binding_id not found in mappings") + binding_mapping = BindingMapping.objects.get(role_id=v1_role_db_id) + if binding_mapping is None: + raise Exception(f"binding_mapping not found in db for role {v1_role_db_id}") + role_binding_id = binding_mapping.find_role_binding_by_v2_role(role.id) else: role_binding_id = str(uuid.uuid4()) v2_role_binding = V2rolebinding(role_binding_id, v1_role, role, frozenset({resource}), v2_groups) @@ -235,17 +224,11 @@ def extract_system_roles(perm_groupings, v1_role, db_role_id, use_mapping_from_d candidate_system_roles[candidate] = {v1_role.id} # Add a custom role if use_mapping_from_db: - binding_mapping = BindingMapping.objects.filter(role_id=db_role_id).first() + binding_mapping = BindingMapping.objects.get(role_id=db_role_id) if binding_mapping is None: raise Exception("V2 role bindings not found in db") - v2_uuid = None - for v1_role_uuid, data in binding_mapping.mappings.items(): - if set(data["permissions"]) == set(permissions): - v2_uuid = data["v2_role_uuid"] - - if v2_uuid is None: - raise Exception("v2_uuid not found in mappings") + v2_uuid = binding_mapping.find_v2_role_by_permission(permissions) else: v2_uuid = uuid.uuid4() From 121133082ef6c5f2700f4b281d2031bffa279a64 Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Thu, 5 Sep 2024 20:57:14 -0400 Subject: [PATCH 12/55] RHCLOUD-35015: Bring mapping reads within locked transaction to prevent out of order replication (#1180) * Bring mapping reads within locked transaction to prevent out of order replication * Bring mapping and initial role read within locked transaction to prevent out of order replication It turns out that the initial role state was being queried outside of the atomic block and without a row lock and thus subject to a concurrent update causing policy, access, or mappings to not be the current state by the time the outbox event is written, which would create inconsistency. This commit refactors how django rest will retrieve the initial object so that it's just simply locked from the beginning and we don't have to be so careful. * Omit partial_update from update queryset partial_update doesn't need the lock or the mapping because it does not update access or policy tables. * Remove commented parameters * Remove more commented parameters * Grammar * Add note about repeatable read option * Create mapping if none loaded prior (i.e. for new Roles) * Create mapping in one SQL statement rather than two * Consolidate audit in perform_; fix lint and programming errors * Add back permission check to queryset logic * Fix whitespace * Fix typo in assert method * Fix permissions check in get_queryset * Add missing fine grained permissions case * Fix tests that reference renamed method * Handle when role is updated but binding does not exist * Go back to original policy delete method (what was I thinking!?) * Don't bother getting relations if binding does not exist --- rbac/internal/views.py | 4 +- .../management/commands/migrate_relations.py | 2 +- .../role/relation_api_dual_write_handler.py | 95 ++++++---- rbac/management/role/serializer.py | 28 +-- rbac/management/role/view.py | 174 ++++++++++++------ rbac/migration_tool/migrate.py | 106 +++++------ ...sharedSystemRolesReplicatedRoleBindings.py | 50 ++--- tests/internal/test_views.py | 8 +- tests/management/role/test_view.py | 6 +- 9 files changed, 268 insertions(+), 205 deletions(-) diff --git a/rbac/internal/views.py b/rbac/internal/views.py index b086f6bd3..5ab4b9091 100644 --- a/rbac/internal/views.py +++ b/rbac/internal/views.py @@ -480,7 +480,7 @@ def get_param_list(request, param_name): def data_migration(request): """View method for running migrations from V1 to V2 spiceDB schema. - POST /_private/api/utils/data_migration/?exclude_apps=cost_management,rbac&orgs=id_1,id_2&write_db=True + POST /_private/api/utils/data_migration/?exclude_apps=cost_management,rbac&orgs=id_1,id_2&write_relationships=True """ if request.method != "POST": return HttpResponse('Invalid method, only "POST" is allowed.', status=405) @@ -489,7 +489,7 @@ def data_migration(request): args = { "exclude_apps": get_param_list(request, "exclude_apps"), "orgs": get_param_list(request, "orgs"), - "write_db": request.GET.get("write_db", "False") == "True", + "write_relationships": request.GET.get("write_relationships", "False") == "True", } migrate_data_in_worker.delay(args) return HttpResponse("Data migration from V1 to V2 are running in a background worker.", status=202) diff --git a/rbac/management/management/commands/migrate_relations.py b/rbac/management/management/commands/migrate_relations.py index ad88aa9d5..151f3f45b 100644 --- a/rbac/management/management/commands/migrate_relations.py +++ b/rbac/management/management/commands/migrate_relations.py @@ -25,7 +25,7 @@ def handle(self, *args, **options): kwargs = { "exclude_apps": options["exclude_apps"], "orgs": options["org_list"], - "write_db": options["write_to_db"], + "write_relationships": options["write_relationships"], } migrate_data(**kwargs) logger.info("*** Migration completed. ***\n") diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py index 29a553470..e0876247f 100644 --- a/rbac/management/role/relation_api_dual_write_handler.py +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -19,6 +19,7 @@ import logging from management.models import Outbox, Workspace +from management.role.model import BindingMapping from migration_tool.migrate import migrate_role from migration_tool.utils import relationship_to_json @@ -44,6 +45,7 @@ def __init__(self, role, event_type): self.role_relations = [] self.current_role_relations = [] self.role = role + self.binding_mapping = None self.tenant_id = role.tenant_id self.org_id = role.tenant.org_id self.root_workspace = Workspace.objects.get( @@ -57,7 +59,11 @@ def replication_enabled(self): """Check whether replication enabled.""" return ENVIRONMENT.get_value("REPLICATION_TO_RELATION_ENABLED", default=False, cast=bool) - def generate_relations_from_current_state_of_role(self): + def get_current_role_relations(self): + """Get current roles relations.""" + return self.current_role_relations + + def load_relations_from_current_state_of_role(self): """Generate relations from current state of role and UUIDs for v2 role and role binding from database.""" if not self.replication_enabled(): return @@ -65,36 +71,76 @@ def generate_relations_from_current_state_of_role(self): logger.info( "[Dual Write] Generate relations from current state of role(%s): '%s'", self.role.uuid, self.role.name ) - relations = migrate_role( - self.role, False, str(self.root_workspace.uuid), self.org_id, True, True, True, False + + self.binding_mapping = self.role.binding_mapping + + relations, _ = migrate_role( + self.role, + write_relationships=False, + root_workspace=str(self.root_workspace.uuid), + default_workspace=self.org_id, + current_bindings=self.binding_mapping, ) + self.current_role_relations = relations + except BindingMapping.DoesNotExist: + logger.warning( + "[Dual Write] Binding mapping not found for role(%s): '%s'. " + "Assuming no current relations exist. " + "If this is NOT the case, relations are inconsistent!", + self.role.uuid, + self.role.name, + ) except Exception as e: raise DualWriteException(e) - def regenerate_relations_and_mappings_for_role(self): - """Delete and generated relations with mapping for a role.""" + def generate_replication_event_to_outbox(self, role): + """Generate replication event to outbox table.""" if not self.replication_enabled(): - return [] - return self.generate_relations_and_mappings_for_role() + return + self.role = role + self._generate_relations_and_mappings_for_role() + return self.save_replication_event_to_outbox() + + def save_replication_event_to_outbox(self): + """Generate and store replication event to outbox table.""" + if not self.replication_enabled(): + return {} + try: + replication_event = self._build_replication_event() + self._save_replication_event(replication_event) + except Exception as e: + raise DualWriteException(e) + return replication_event - def generate_relations_and_mappings_for_role(self): + def _generate_relations_and_mappings_for_role(self): """Generate relations and mappings for a role with new UUIDs for v2 role and role bindings.""" if not self.replication_enabled(): return [] try: logger.info("[Dual Write] Generate new relations from role(%s): '%s'", self.role.uuid, self.role.name) - relations = migrate_role(self.role, False, str(self.root_workspace.uuid), self.org_id, True) + + relations, mappings = migrate_role( + self.role, + write_relationships=False, + root_workspace=str(self.root_workspace.uuid), + default_workspace=self.org_id, + current_bindings=self.binding_mapping, + ) + self.role_relations = relations + + if self.binding_mapping is None: + self.binding_mapping = BindingMapping.objects.create(role=self.role, mappings=mappings) + else: + self.binding_mapping.mappings = mappings + self.binding_mapping.save(force_update=True) + return relations except Exception as e: raise DualWriteException(e) - def get_current_role_relations(self): - """Get current roles relations.""" - return self.current_role_relations - - def build_replication_event(self): + def _build_replication_event(self): """Build replication event.""" if not self.replication_enabled(): return {} @@ -110,26 +156,7 @@ def build_replication_event(self): replication_event = {"relations_to_add": relations_to_add, "relations_to_remove": relations_to_remove} return replication_event - def generate_replication_event_to_outbox(self, role): - """Generate replication event to outbox table.""" - if not self.replication_enabled(): - return - self.role = role - self.regenerate_relations_and_mappings_for_role() - return self.save_replication_event_to_outbox() - - def save_replication_event_to_outbox(self): - """Generate and store replication event to outbox table.""" - if not self.replication_enabled(): - return {} - try: - replication_event = self.build_replication_event() - self.save_replication_event(replication_event) - except Exception as e: - raise DualWriteException(e) - return replication_event - - def save_replication_event(self, replication_event): + def _save_replication_event(self, replication_event): """Save replication event.""" if not self.replication_enabled(): return diff --git a/rbac/management/role/serializer.py b/rbac/management/role/serializer.py index 3caff09ba..d3bb7d714 100644 --- a/rbac/management/role/serializer.py +++ b/rbac/management/role/serializer.py @@ -18,7 +18,6 @@ """Serializer for role management.""" from django.utils.translation import gettext as _ from management.group.model import Group -from management.notifications.notification_handlers import role_obj_change_notification_handler from management.serializer_override_mixin import SerializerCreateOverrideMixin from management.utils import filter_queryset_by_tenant, get_principal, validate_and_get_key from rest_framework import serializers @@ -139,7 +138,6 @@ def create(self, validated_data): role = Role.objects.create(name=name, description=description, display_name=display_name, tenant=tenant) create_access_for_role(role, access_list, tenant) - role_obj_change_notification_handler(role, "created", self.context["request"].user) return role def update(self, instance, validated_data): @@ -153,7 +151,6 @@ def update(self, instance, validated_data): create_access_for_role(instance, access_list, tenant) - role_obj_change_notification_handler(instance, "updated", self.context["request"].user) return instance def get_external_role_id(self, obj): @@ -395,15 +392,22 @@ def validate_role_update(instance, validated_data): def update_role(role_name, update_data, tenant, clear_access=True): """Update role attribute.""" - role, created = Role.objects.update_or_create( - name=role_name, - tenant=tenant, - defaults={ - "name": update_data.get("updated_name"), - "display_name": update_data.get("updated_display_name"), - "description": update_data.get("updated_description"), - }, - ) + role = Role.objects.get(name=role_name, tenant=tenant) + + update_fields = [] + + if "updated_name" in update_data: + role.name = update_data["updated_name"] + update_fields.append("name") + if "updated_display_name" in update_data: + role.display_name = update_data["updated_display_name"] + update_fields.append("display_name") + if "updated_description" in update_data: + role.description = update_data["updated_description"] + update_fields.append("description") + + role.save(update_fields=update_fields) + if clear_access: role.access.all().delete() diff --git a/rbac/management/role/view.py b/rbac/management/role/view.py index 21642e841..2f48fd54b 100644 --- a/rbac/management/role/view.py +++ b/rbac/management/role/view.py @@ -28,14 +28,13 @@ from django.db.models import Q from django.db.models.aggregates import Count from django.http import Http404 -from django.shortcuts import get_object_or_404 from django.utils.translation import gettext as _ from django_filters import rest_framework as filters from management.filters import CommonFilters from management.models import AuditLog, Permission from management.notifications.notification_handlers import role_obj_change_notification_handler from management.permissions import RoleAccessPermission -from management.querysets import get_role_queryset +from management.querysets import get_role_queryset, user_has_perm from management.role.relation_api_dual_write_handler import DualWriteException, RelationApiDualWriteHandler from management.role.serializer import AccessSerializer, RoleDynamicSerializer, RolePatchSerializer from management.utils import validate_uuid @@ -44,6 +43,7 @@ from rest_framework.filters import OrderingFilter from rest_framework.response import Response +from rbac.env import ENVIRONMENT from .model import Role from .serializer import RoleSerializer @@ -138,8 +138,55 @@ class RoleViewSet( ordering = ("name",) def get_queryset(self): - """Obtain queryset for requesting user based on access.""" - return get_role_queryset(self.request) + """Obtain queryset for requesting user based on access and action.""" + # NOTE: partial_update intentionally omitted because it does not update access or policy. + if self.action not in ["update", "destroy"]: + return get_role_queryset(self.request) + else: + # Update queryset differs from normal role queryset in a few ways: + # - Remove counts; those are not returned in updates + # and they prevent us from being able to lock the result + # (postgres does not allow select for update with 'group by') + # - No scope checks since these are not relevant to updates + # - We also lock the role + # - We don't bother including system roles because they are not updated this way + + # This lock is necessary to ensure the mapping is always based on the current role + # state which requires we prevent concurrent modifications to + # policy, access, and the mappings themselves. + # Because this does not lock binding_mapping, policy, and access, + # the role must always be locked for those edits also. + + # It is important that the lock is here. + # Because we reuse this Role object when reading and + # determining current relations to remove, + # this lock prevents any accidental and non-obvious race conditions from occuring. + # (such as if this was innocently changed to select related access or policy rows) + + # NOTE: If we want to try REPEATABLE READ isolation instead of READ COMMITTED, + # this should work with that as well. + # You would be able to remove `select_for_update` here, + # and instead rely on REPEATABLE READ's lost update detection to abort the tx. + # Nothing else should need to change. + + base_query = Role.objects.filter(tenant=self.request.tenant).select_for_update() + + # TODO: May be redundant with RolePermissions check but copied from querysets.py for safety + if ENVIRONMENT.get_value("ALLOW_ANY", default=False, cast=bool): + return base_query + + if self.request.user.admin: + return base_query + + access = user_has_perm(self.request, "role") + + if access == "All": + return base_query + + if access == "None": + return Role.objects.none() + + return base_query.filter(uuid__in=access) def get_serializer_class(self): """Get serializer class based on route.""" @@ -218,21 +265,10 @@ def create(self, request, *args, **kwargs): self.validate_role(request) try: with transaction.atomic(): - create_role = super().create(request=request, args=args, kwargs=kwargs) - - if status.is_success(create_role.status_code): - auditlog = AuditLog() - auditlog.log_create(request, AuditLog.ROLE) - - role = get_object_or_404(Role, uuid=create_role.data["uuid"]) - dual_write_handler = RelationApiDualWriteHandler(role, "CREATE") - dual_write_handler.generate_relations_and_mappings_for_role() - dual_write_handler.save_replication_event_to_outbox() + return super().create(request=request, args=args, kwargs=kwargs) except DualWriteException as e: return self.dual_write_exception_response(e) - return create_role - def list(self, request, *args, **kwargs): """Obtain the list of roles for the tenant. @@ -336,29 +372,13 @@ def destroy(self, request, *args, **kwargs): HTTP/1.1 204 NO CONTENT """ validate_uuid(kwargs.get("uuid"), "role uuid validation") - role = self.get_object() - if role.system or role.platform_default: - key = "role" - message = "System roles cannot be deleted." - error = {key: [_(message)]} - raise serializers.ValidationError(error) + try: with transaction.atomic(): - self.delete_policies_if_no_role_attached(role) - dual_write_handler = RelationApiDualWriteHandler(role, "DELETE") - dual_write_handler.generate_relations_from_current_state_of_role() - response = super().destroy(request=request, args=args, kwargs=kwargs) - dual_write_handler.save_replication_event_to_outbox() + return super().destroy(request=request, args=args, kwargs=kwargs) except DualWriteException as e: return self.dual_write_exception_response(e) - if response.status_code == status.HTTP_204_NO_CONTENT: - role_obj_change_notification_handler(role, "deleted", request.user) - - auditlog = AuditLog() - auditlog.log_delete(request, AuditLog.ROLE, role) - return response - def partial_update(self, request, *args, **kwargs): """Patch a role.""" validate_uuid(kwargs.get("uuid"), "role uuid validation") @@ -370,14 +390,7 @@ def partial_update(self, request, *args, **kwargs): error = {key: [_(message)]} raise serializers.ValidationError(error) - role = self.get_object() - partial_update_role = super().update(request=request, args=args, kwargs=kwargs) - - if status.is_success(partial_update_role.status_code): - auditlog = AuditLog() - auditlog.log_edit(request, AuditLog.ROLE, role) - - return partial_update_role + return super().update(request=request, args=args, kwargs=kwargs) def update(self, request, *args, **kwargs): """Update a role. @@ -440,28 +453,71 @@ def update(self, request, *args, **kwargs): validate_uuid(kwargs.get("uuid"), "role uuid validation") self.validate_role(request) - update_role = self.update_with_relation_api_replication(request=request, args=args, kwargs=kwargs) - - if status.is_success(update_role.status_code): - auditlog = AuditLog() - role = self.get_object() - auditlog.log_edit(request, AuditLog.ROLE, role) - - return update_role - - def update_with_relation_api_replication(self, request, *args, **kwargs): - """Update a role with replicating data into Relation API.""" try: - role = self.get_object() - dual_write_handler = RelationApiDualWriteHandler(role, "UPDATE") - dual_write_handler.generate_relations_from_current_state_of_role() with transaction.atomic(): - response = super().update(request=request, args=args, kwargs=kwargs) - dual_write_handler.generate_replication_event_to_outbox(self.get_object()) + return super().update(request=request, args=args, kwargs=kwargs) except DualWriteException as e: return self.dual_write_exception_response(e) - return response + def perform_create(self, serializer): + """ + Create the role and publish outbox, notification, and audit events. + + Assumes concurrent updates are prevented (e.g. with atomic block and locks). + """ + role = serializer.save() + + dual_write_handler = RelationApiDualWriteHandler(role, "CREATE") + dual_write_handler.generate_replication_event_to_outbox(role) + + role_obj_change_notification_handler(role, "created", self.request.user) + + auditlog = AuditLog() + auditlog.log_create(self.request, AuditLog.ROLE) + + def perform_update(self, serializer): + """ + Update the role and publish outbox, notification, and audit events. + + Assumes concurrent updates are prevented (e.g. with atomic block and locks). + """ + if self.action != "partial_update": + dual_write_handler = RelationApiDualWriteHandler(serializer.instance, "UPDATE") + dual_write_handler.load_relations_from_current_state_of_role() + + role = serializer.save() + + if self.action != "partial_update": + dual_write_handler.generate_replication_event_to_outbox(role) + role_obj_change_notification_handler(role, "updated", self.request.user) + + auditlog = AuditLog() + auditlog.log_edit(self.request, AuditLog.ROLE, role) + + def perform_destroy(self, instance: Role): + """ + Delete the role and publish outbox, notification, and audit events. + + Assumes concurrent updates are prevented (e.g. with atomic block and locks). + """ + if instance.system or instance.platform_default: + key = "role" + message = "System roles cannot be deleted." + error = {key: [_(message)]} + raise serializers.ValidationError(error) + + dual_write_handler = RelationApiDualWriteHandler(instance, "DELETE") + dual_write_handler.load_relations_from_current_state_of_role() + + self.delete_policies_if_no_role_attached(instance) + instance.delete() + + dual_write_handler.save_replication_event_to_outbox() + role_obj_change_notification_handler(instance, "deleted", self.request.user) + + # Audit in perform_destroy because it needs access to deleted instance + auditlog = AuditLog() + auditlog.log_delete(self.request, AuditLog.ROLE, instance) def dual_write_exception_response(self, e): """Dual write exception response.""" diff --git a/rbac/migration_tool/migrate.py b/rbac/migration_tool/migrate.py index 551b9f25c..db0247804 100644 --- a/rbac/migration_tool/migrate.py +++ b/rbac/migration_tool/migrate.py @@ -17,10 +17,11 @@ import dataclasses import logging -from typing import FrozenSet +from typing import Any, FrozenSet, Optional from django.conf import settings -from management.role.model import BindingMapping, Role, V2Role +from kessel.relations.v1beta1 import common_pb2 +from management.role.model import BindingMapping, Role from management.workspace.model import Workspace from migration_tool.models import V1group, V2rolebinding from migration_tool.sharedSystemRolesReplicatedRoleBindings import v1_role_to_v2_mapping @@ -32,33 +33,34 @@ logger = logging.getLogger(__name__) # pylint: disable=invalid-name +BindingMappings = dict[str, dict[str, Any]] -def spicedb_relationships( + +def get_kessel_relation_tuples( v2_role_bindings: FrozenSet[V2rolebinding], root_workspace: str, - v1_role, - in_transaction=False, - create_binding_to_db=True, -): - """Generate a set of relationships for the given set of v2 role bindings.""" - relationships = list() - binding_mappings = {} +) -> tuple[list[common_pb2.Relationship], BindingMappings]: + """Generate a set of relationships and BindingMappings for the given set of v2 role bindings.""" + relationships: list[common_pb2.Relationship] = list() + + # Dictionary of v2 role binding ID to v2 role UUID and its permissions + # for the given v1 role. + binding_mappings: BindingMappings = {} for v2_role_binding in v2_role_bindings: relationships.append( create_relationship("role_binding", v2_role_binding.id, "role", v2_role_binding.role.id, "granted") ) - if create_binding_to_db: - v2_role_data = v2_role_binding.role + v2_role_data = v2_role_binding.role - if binding_mappings.get(v2_role_binding.id) is None: - binding_mappings[v2_role_binding.id] = {} + if binding_mappings.get(v2_role_binding.id) is None: + binding_mappings[v2_role_binding.id] = {} - binding_mappings[v2_role_binding.id] = { - "v2_role_uuid": str(v2_role_data.id), - "permissions": list(v2_role_binding.role.permissions), - } + binding_mappings[v2_role_binding.id] = { + "v2_role_uuid": str(v2_role_data.id), + "permissions": list(v2_role_binding.role.permissions), + } for perm in v2_role_binding.role.permissions: relationships.append(create_relationship("role", v2_role_binding.role.id, "user", "*", perm)) @@ -89,27 +91,16 @@ def spicedb_relationships( ) ) - if create_binding_to_db: - if in_transaction: - binding_mapping, _ = BindingMapping.objects.select_for_update().get_or_create(role=v1_role) - else: - binding_mapping, _ = BindingMapping.objects.get_or_create(role=v1_role) - binding_mapping.mappings = binding_mappings - binding_mapping.save() - - return relationships + return relationships, binding_mappings def migrate_role( role: Role, - write_db: bool, + write_relationships: bool, root_workspace: str, default_workspace: str, - in_transaction=False, - use_binding_from_db=False, - use_mapping_from_db=False, - create_binding_to_db=True, -): + current_bindings: Optional[BindingMapping] = None, +) -> tuple[list[common_pb2.Relationship], BindingMappings]: """Migrate a role from v1 to v2.""" v1_role = extract_info_into_v1_role(role) # With the replicated role bindings algorithm, role bindings are scoped by group, so we need to add groups @@ -122,19 +113,14 @@ def migrate_role( # This is where we wire in the implementation we're using into the Migrator v2_roles = [ - v2_role - for v2_role in v1_role_to_v2_mapping( - v1_role, role.id, root_workspace, default_workspace, use_binding_from_db, use_mapping_from_db - ) + v2_role for v2_role in v1_role_to_v2_mapping(v1_role, root_workspace, default_workspace, current_bindings) ] - relationships = spicedb_relationships( - frozenset(v2_roles), root_workspace, role, in_transaction, create_binding_to_db - ) - output_relationships(relationships, write_db) - return relationships + relationships, mappings = get_kessel_relation_tuples(frozenset(v2_roles), root_workspace) + output_relationships(relationships, write_relationships) + return relationships, mappings -def migrate_workspace(tenant: Tenant, write_db: bool): +def migrate_workspace(tenant: Tenant, write_relationships: bool): """Migrate a workspace from v1 to v2.""" root_workspace = f"root-workspace-{tenant.org_id}" # Org id represents the default workspace for now @@ -144,20 +130,20 @@ def migrate_workspace(tenant: Tenant, write_db: bool): ] # Include realm for tenant relationships.append(create_relationship("tenant", str(tenant.org_id), "realm", settings.ENV_NAME, "realm")) - output_relationships(relationships, write_db) + output_relationships(relationships, write_relationships) return root_workspace, tenant.org_id -def migrate_users(tenant: Tenant, write_db: bool): +def migrate_users(tenant: Tenant, write_relationships: bool): """Write users relationship to tenant.""" relationships = [ create_relationship("tenant", str(tenant.org_id), "user", str(principal.uuid), "member") for principal in tenant.principal_set.all() ] - output_relationships(relationships, write_db) + output_relationships(relationships, write_relationships) -def migrate_users_for_groups(tenant: Tenant, write_db: bool): +def migrate_users_for_groups(tenant: Tenant, write_relationships: bool): """Write users relationship to groups.""" relationships = [] for group in tenant.group_set.all(): @@ -167,21 +153,21 @@ def migrate_users_for_groups(tenant: Tenant, write_db: bool): ) for user in user_set: relationships.append(create_relationship("group", str(group.uuid), "user", str(user.uuid), "member")) - output_relationships(relationships, write_db) + output_relationships(relationships, write_relationships) -def migrate_data_for_tenant(tenant: Tenant, app_list: list, write_db: bool): +def migrate_data_for_tenant(tenant: Tenant, app_list: list, write_relationships: bool): """Migrate all data for a given tenant.""" logger.info("Creating workspace.") - root_workspace, default_workspace = migrate_workspace(tenant, write_db) + root_workspace, default_workspace = migrate_workspace(tenant, write_relationships) logger.info("Workspace migrated.") logger.info("Relating users to tenant.") - migrate_users(tenant, write_db) + migrate_users(tenant, write_relationships) logger.info("Finished relationship between users and tenant.") logger.info("Migrating relations of group and user.") - migrate_users_for_groups(tenant, write_db) + migrate_users_for_groups(tenant, write_relationships) logger.info("Finished migrating relations of group and user.") roles = tenant.role_set.all() @@ -190,12 +176,22 @@ def migrate_data_for_tenant(tenant: Tenant, app_list: list, write_db: bool): for role in roles: logger.info(f"Migrating role: {role.name} with UUID {role.uuid}.") - migrate_role(role, write_db, root_workspace, default_workspace) + + _, mappings = migrate_role(role, write_relationships, root_workspace, default_workspace) + + # Insert is forced with `create` in order to prevent this from + # accidentally running concurrently with dual-writes. + # If migration should be rerun, then the bindings table should be dropped. + # If changing this to update_or_create, + # always ensure writes are paused before running. + # Thus must always be the case, but `create` will at least start failing you if you forget. + BindingMapping.objects.create(role=role, mappings=mappings) + logger.info(f"Migration completed for role: {role.name} with UUID {role.uuid}.") logger.info(f"Migrated {roles.count()} roles for tenant: {tenant.org_id}") -def migrate_data(exclude_apps: list = [], orgs: list = [], write_db: bool = False): +def migrate_data(exclude_apps: list = [], orgs: list = [], write_relationships: bool = False): """Migrate all data for all tenants.""" count = 0 tenants = Tenant.objects.exclude(tenant_name="public") @@ -205,7 +201,7 @@ def migrate_data(exclude_apps: list = [], orgs: list = [], write_db: bool = Fals for tenant in tenants.iterator(): logger.info(f"Migrating data for tenant: {tenant.org_id}") try: - migrate_data_for_tenant(tenant, exclude_apps, write_db) + migrate_data_for_tenant(tenant, exclude_apps, write_relationships) except Exception as e: logger.error(f"Failed to migrate data for tenant: {tenant.org_id}. Error: {e}") raise e diff --git a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py index 86401a849..00cbcf6bf 100644 --- a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py +++ b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py @@ -18,11 +18,10 @@ import json import logging import uuid -from typing import Callable, FrozenSet, Type +from typing import Callable, FrozenSet, Optional, Type from management.models import BindingMapping from management.role.model import Role -from management.workspace.model import Workspace from migration_tool.ingest import add_element from migration_tool.models import ( V1group, @@ -68,7 +67,7 @@ def inventory_to_workspace(v2_perm): class SystemRole: """A system role.""" - SYSTEM_ROLES = {} + SYSTEM_ROLES: dict[frozenset[str], V2role] = {} @classmethod def get_system_roles(cls): @@ -97,11 +96,9 @@ def set_system_roles(cls): def v1_role_to_v2_mapping( v1_role: V1role, - v1_role_db_id, root_workspace: str, default_workspace: str, - use_binding_from_db=False, - use_mapping_from_db=False, + binding_mapping: Optional[BindingMapping], ) -> FrozenSet[V2rolebinding]: """Convert a V1 role to a set of V2 role bindings.""" perm_groupings: Permissiongroupings = {} @@ -133,7 +130,7 @@ def v1_role_to_v2_mapping( v2_perm, ) # Project permission sets to system roles - resource_roles = extract_system_roles(perm_groupings, v1_role, v1_role_db_id, use_mapping_from_db) + resource_roles = extract_system_roles(perm_groupings, v1_role, binding_mapping) # Construct rolebindings v2_role_bindings = [] v2_groups = v1groups_to_v2groups(v1_role.groups) @@ -141,10 +138,7 @@ def v1_role_to_v2_mapping( for resource in resources: if v2_groups: for v2_group in v2_groups: - if use_binding_from_db: - binding_mapping = BindingMapping.objects.get(role_id=v1_role_db_id) - if binding_mapping is None: - raise Exception(f"binding_mapping not found in db for role {v1_role_db_id}") + if binding_mapping: role_binding_id = binding_mapping.find_role_binding_by_v2_role(role.id) else: role_binding_id = str(uuid.uuid4()) @@ -153,10 +147,7 @@ def v1_role_to_v2_mapping( ) v2_role_bindings.append(v2_role_binding) else: - if use_binding_from_db: - binding_mapping = BindingMapping.objects.get(role_id=v1_role_db_id) - if binding_mapping is None: - raise Exception(f"binding_mapping not found in db for role {v1_role_db_id}") + if binding_mapping: role_binding_id = binding_mapping.find_role_binding_by_v2_role(role.id) else: role_binding_id = str(uuid.uuid4()) @@ -165,14 +156,17 @@ def v1_role_to_v2_mapping( return frozenset(v2_role_bindings) -candidate_system_roles = {} custom_roles_created = 0 -def extract_system_roles(perm_groupings, v1_role, db_role_id, use_mapping_from_db=False): +def extract_system_roles( + perm_groupings: dict[V1resourcedef, list[str]], v1_role: V1role, binding_mapping: Optional[BindingMapping] +): """Extract system roles from a set of permissions.""" - resource_roles = {} + candidate_system_roles = {} + resource_roles: dict[V2role, list[V1resourcedef]] = {} system_roles = SystemRole.get_system_roles() + for resource, permissions in perm_groupings.items(): system_role = system_roles.get(frozenset(permissions)) if system_role is not None: @@ -223,11 +217,7 @@ def extract_system_roles(perm_groupings, v1_role, db_role_id, use_mapping_from_d else: candidate_system_roles[candidate] = {v1_role.id} # Add a custom role - if use_mapping_from_db: - binding_mapping = BindingMapping.objects.get(role_id=db_role_id) - if binding_mapping is None: - raise Exception("V2 role bindings not found in db") - + if binding_mapping: v2_uuid = binding_mapping.find_v2_role_by_permission(permissions) else: v2_uuid = uuid.uuid4() @@ -256,20 +246,6 @@ def split_resourcedef_literal(resourceDef: V1resourcedef): return [json.loads(resourceDef.resource_id)] -def shared_system_role_replicated_role_bindings_v1_to_v2_mapping( - v1_role: V1role, - v1_role_db_id, - root_workspace: Workspace, - default_workspace: Workspace, - use_binding_from_db=False, - use_mapping_from_db=False, -) -> FrozenSet[V2rolebinding]: - """Convert a V1 role to a set of V2 role bindings.""" - return v1_role_to_v2_mapping( - v1_role, v1_role_db_id, root_workspace, default_workspace, use_binding_from_db, use_mapping_from_db - ) - - def v1groups_to_v2groups(v1groups: FrozenSet[V1group]): """Convert a set of V1 groups to a set of V2 groups.""" return frozenset([V2group(v1group.id, v1group.users) for v1group in v1groups]) diff --git a/tests/internal/test_views.py b/tests/internal/test_views.py index 01d6c8a4b..8bacace09 100644 --- a/tests/internal/test_views.py +++ b/tests/internal/test_views.py @@ -464,7 +464,11 @@ def test_run_migrations_of_data(self, migration_mock): **self.request.META, ) migration_mock.assert_called_once_with( - {"exclude_apps": ["rbac", "costmanagement"], "orgs": ["acct00001", "acct00002"], "write_db": False} + { + "exclude_apps": ["rbac", "costmanagement"], + "orgs": ["acct00001", "acct00002"], + "write_relationships": False, + } ) self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.assertEqual( @@ -478,7 +482,7 @@ def test_run_migrations_of_data(self, migration_mock): f"/_private/api/utils/data_migration/", **self.request.META, ) - migration_mock.assert_called_once_with({"exclude_apps": [], "orgs": [], "write_db": False}) + migration_mock.assert_called_once_with({"exclude_apps": [], "orgs": [], "write_relationships": False}) self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) self.assertEqual( response.content.decode(), diff --git a/tests/management/role/test_view.py b/tests/management/role/test_view.py index c8141e518..e36e3470d 100644 --- a/tests/management/role/test_view.py +++ b/tests/management/role/test_view.py @@ -338,7 +338,7 @@ def test_create_role_success(self, send_kafka_message): ANY, ) - @patch("management.role.relation_api_dual_write_handler.RelationApiDualWriteHandler.save_replication_event") + @patch("management.role.relation_api_dual_write_handler.RelationApiDualWriteHandler._save_replication_event") def test_create_role_with_display_success(self, mock_method): """Test that we can create a role.""" role_name = "roleD" @@ -1363,7 +1363,7 @@ def test_update_role_invalid_permission(self): response = client.put(url, test_data, format="json", **self.headers) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - @patch("management.role.relation_api_dual_write_handler.RelationApiDualWriteHandler.save_replication_event") + @patch("management.role.relation_api_dual_write_handler.RelationApiDualWriteHandler._save_replication_event") def test_update_role(self, mock_method): """Test that updating a role with an invalid permission returns an error.""" # Set up @@ -1486,7 +1486,7 @@ def test_update_role_permission_does_not_exist_fail(self): self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual(response.data.get("errors")[0].get("detail"), f"Permission does not exist: {permission}") - @patch("management.role.relation_api_dual_write_handler.RelationApiDualWriteHandler.save_replication_event") + @patch("management.role.relation_api_dual_write_handler.RelationApiDualWriteHandler._save_replication_event") def test_delete_role(self, mock_method): """Test that we can delete an existing role.""" role_name = "roleA" From bb75b16a19b63cae9c91286ca68744f72ee8745f Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Tue, 10 Sep 2024 13:22:12 -0400 Subject: [PATCH 13/55] RHCLOUD-35016: Resource to workspace relations from RBAC may be out of date; do not migrate (#1184) * Move dual write deny list (skipped apps) to settings * Make it a little easier to know how to log SQL in tests * Rename/reorganize a few methods and correct type hints * Do not manage resource-workspace relations, only workspace-workspace * Fix relations comparisons in tests * Remove unused imports * More lint fixing * Remove outdated comment * Migrate new environment variable defaults to clowdapp yaml * Add defaults for tox environment * Use same default as dual write in migrate command * Clarify argument name * Make lists actually... well... lists * Exclude resource bindings for excluded resource apps * Use environment for internal migration endpoint also * Fix typo * Make method name more consistent --- deploy/rbac-clowdapp.yml | 10 ++++ rbac/internal/views.py | 10 ++-- .../management/commands/migrate_relations.py | 10 +++- .../role/relation_api_dual_write_handler.py | 4 +- rbac/migration_tool/ingest.py | 23 ++++++-- rbac/migration_tool/migrate.py | 52 +++++++++--------- ...sharedSystemRolesReplicatedRoleBindings.py | 50 +++++++++++------ rbac/rbac/settings.py | 8 ++- tests/internal/test_views.py | 41 +++++++++----- tests/management/role/test_view.py | 54 ++++++++++++++++--- tox.ini | 4 ++ 11 files changed, 195 insertions(+), 71 deletions(-) diff --git a/deploy/rbac-clowdapp.yml b/deploy/rbac-clowdapp.yml index c5d84f044..fa043afab 100644 --- a/deploy/rbac-clowdapp.yml +++ b/deploy/rbac-clowdapp.yml @@ -462,6 +462,10 @@ objects: value: ${REPLICATION_TO_RELATION_ENABLED} - name: ROLE_CREATE_ALLOW_LIST value: ${ROLE_CREATE_ALLOW_LIST} + - name: V2_MIGRATION_APP_EXCLUDE_LIST + value: ${V2_MIGRATION_APP_EXCLUDE_LIST} + - name: V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST + value: ${V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST} - name: RBAC_DESTRUCTIVE_API_ENABLED_UNTIL value: ${RBAC_DESTRUCTIVE_API_ENABLED_UNTIL} - name: RBAC_DESTRUCTIVE_SEEDING_ENABLED_UNTIL @@ -735,6 +739,12 @@ parameters: - description: Application allow list for role creation in RBAC name: ROLE_CREATE_ALLOW_LIST value: cost-management,remediations,inventory,drift,policies,advisor,vulnerability,compliance,automation-analytics,notifications,patch,integrations,ros,staleness,config-manager,idmsvc +- description: Application exclude list for v2 migration (all permissions) + name: V2_MIGRATION_APP_EXCLUDE_LIST + value: approval +- description: Application exclude list for v2 migration (resource definitions only) + name: V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST + value: cost-management,playbook-dispatcher - description: Timestamp expiration allowance on destructive actions through the internal RBAC API name: RBAC_DESTRUCTIVE_API_ENABLED_UNTIL value: '' diff --git a/rbac/internal/views.py b/rbac/internal/views.py index 5ab4b9091..58ed3cd9e 100644 --- a/rbac/internal/views.py +++ b/rbac/internal/views.py @@ -19,6 +19,7 @@ import json import logging +from django.conf import settings import requests from core.utils import destructive_ok from django.db import transaction @@ -469,12 +470,13 @@ def ocm_performance(request): return HttpResponse('Invalid method, only "POST" is allowed.', status=405) -def get_param_list(request, param_name): +def get_param_list(request, param_name, default: list = []): """Get a list of params from a request.""" params = request.GET.get(param_name, []) if params: - params = params.split(",") - return params + return params.split(",") + else: + return default def data_migration(request): @@ -487,7 +489,7 @@ def data_migration(request): logger.info("Running V1 data migration.") args = { - "exclude_apps": get_param_list(request, "exclude_apps"), + "exclude_apps": get_param_list(request, "exclude_apps", default=settings.V2_MIGRATION_APP_EXCLUDE_LIST), "orgs": get_param_list(request, "orgs"), "write_relationships": request.GET.get("write_relationships", "False") == "True", } diff --git a/rbac/management/management/commands/migrate_relations.py b/rbac/management/management/commands/migrate_relations.py index 151f3f45b..cd7194b78 100644 --- a/rbac/management/management/commands/migrate_relations.py +++ b/rbac/management/management/commands/migrate_relations.py @@ -2,6 +2,7 @@ import logging +from django.conf import settings from django.core.management.base import BaseCommand from migration_tool.migrate import migrate_data @@ -16,8 +17,13 @@ class Command(BaseCommand): def add_arguments(self, parser): """Add arguments to command.""" parser.add_argument("--org-list", nargs="+", default=[]) - parser.add_argument("--exclude-apps", nargs="+", default=[]) - parser.add_argument("--write-to-db", default=False, action="store_true") + parser.add_argument( + "--exclude-apps", + nargs="+", + default=settings.V2_MIGRATION_APP_EXCLUDE_LIST, + help="List of apps to exclude. Default comes from environment.", + ) + parser.add_argument("--write-relationships", default=False, action="store_true") def handle(self, *args, **options): """Handle method for command.""" diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py index e0876247f..1f44862be 100644 --- a/rbac/management/role/relation_api_dual_write_handler.py +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -79,7 +79,7 @@ def load_relations_from_current_state_of_role(self): write_relationships=False, root_workspace=str(self.root_workspace.uuid), default_workspace=self.org_id, - current_bindings=self.binding_mapping, + current_mapping=self.binding_mapping, ) self.current_role_relations = relations @@ -125,7 +125,7 @@ def _generate_relations_and_mappings_for_role(self): write_relationships=False, root_workspace=str(self.root_workspace.uuid), default_workspace=self.org_id, - current_bindings=self.binding_mapping, + current_mapping=self.binding_mapping, ) self.role_relations = relations diff --git a/rbac/migration_tool/ingest.py b/rbac/migration_tool/ingest.py index e7e6a28af..0dca33c38 100644 --- a/rbac/migration_tool/ingest.py +++ b/rbac/migration_tool/ingest.py @@ -19,14 +19,20 @@ from typing import Tuple from management.role.model import Role -from migration_tool.models import V1permission, V1resourcedef, V1role +from migration_tool.models import V1group, V1permission, V1resourcedef, V1role -def extract_info_into_v1_role(role: Role): - """Extract the information from the role and returns a V1role object.""" +def aggregate_v1_role(role: Role) -> V1role: + """ + Aggregate the role's access and policy as a consolidated V1role object. + + This maps the RBAC model to preloaded, navigable objects with the key data broken down. + """ perm_res_defs: dict[Tuple[str, str], list[V1resourcedef]] = {} perm_list: list[str] = [] role_id = str(role.uuid) + + # Determine v1 permissions for access in role.access.all(): for resource_def in access.resourceDefinitions.all(): attri_filter = resource_def.attributeFilter @@ -47,7 +53,16 @@ def extract_info_into_v1_role(role: Role): res_defs = [res_def for res_def in perm_res_defs.get((role_id, perm), [])] v1_perm = V1permission(perm_parts[0], perm_parts[1], perm_parts[2], frozenset(res_defs)) v1_perms.append(v1_perm) - return V1role(role_id, frozenset(v1_perms), frozenset()) # we don't get groups from the sheet + + # With the replicated role bindings algorithm, role bindings are scoped by group, so we need to add groups + # TODO: We don't need to care about principals here – see RHCLOUD-35039 + # Maybe not even groups? See RHCLOUD-34786 + groups = set() + for policy in role.policies.all(): + principals = [str(principal) for principal in policy.group.principals.values_list("uuid", flat=True)] + groups.add(V1group(str(policy.group.uuid), frozenset(principals))) + + return V1role(role_id, frozenset(v1_perms), frozenset(groups)) def add_element(dict, key, value): diff --git a/rbac/migration_tool/migrate.py b/rbac/migration_tool/migrate.py index db0247804..c4d5eba09 100644 --- a/rbac/migration_tool/migrate.py +++ b/rbac/migration_tool/migrate.py @@ -15,7 +15,6 @@ along with this program. If not, see . """ -import dataclasses import logging from typing import Any, FrozenSet, Optional @@ -23,12 +22,12 @@ from kessel.relations.v1beta1 import common_pb2 from management.role.model import BindingMapping, Role from management.workspace.model import Workspace -from migration_tool.models import V1group, V2rolebinding -from migration_tool.sharedSystemRolesReplicatedRoleBindings import v1_role_to_v2_mapping +from migration_tool.models import V2rolebinding +from migration_tool.sharedSystemRolesReplicatedRoleBindings import v1_role_to_v2_bindings from migration_tool.utils import create_relationship, output_relationships from api.models import Tenant -from .ingest import extract_info_into_v1_role +from .ingest import aggregate_v1_role logger = logging.getLogger(__name__) # pylint: disable=invalid-name @@ -69,18 +68,26 @@ def get_kessel_relation_tuples( relationships.append(create_relationship("role_binding", v2_role_binding.id, "group", group.id, "subject")) for bound_resource in v2_role_binding.resources: - parent_relation = "parent" if bound_resource.resource_type == "workspace" else "workspace" - - if not (bound_resource.resource_type == "workspace" and bound_resource.resourceId == root_workspace): + # Is this a workspace binding, but not to the root workspace? + # If so, ensure this workspace is a child of the root workspace. + # All other resource-resource or resource-workspace relations + # which may be implied or necessary are intentionally ignored. + # These should come from the apps that own the resource. + if bound_resource.resource_type == "workspace" and not bound_resource.resourceId == root_workspace: + # This is not strictly necessary here and the relation may be a duplicate. + # Once we have more Workspace API / Inventory Group migration progress, + # this block can and probably should be removed. + # One of those APIs will add it themselves. relationships.append( create_relationship( bound_resource.resource_type, bound_resource.resourceId, "workspace", root_workspace, - parent_relation, + "parent", ) ) + relationships.append( create_relationship( bound_resource.resource_type, @@ -99,23 +106,20 @@ def migrate_role( write_relationships: bool, root_workspace: str, default_workspace: str, - current_bindings: Optional[BindingMapping] = None, + current_mapping: Optional[BindingMapping] = None, ) -> tuple[list[common_pb2.Relationship], BindingMappings]: - """Migrate a role from v1 to v2.""" - v1_role = extract_info_into_v1_role(role) - # With the replicated role bindings algorithm, role bindings are scoped by group, so we need to add groups - policies = role.policies.all() - groups = set() - for policy in policies: - principals = [str(principal) for principal in policy.group.principals.values_list("uuid", flat=True)] - groups.add(V1group(str(policy.group.uuid), frozenset(principals))) - v1_role = dataclasses.replace(v1_role, groups=frozenset(groups)) + """ + Migrate a role from v1 to v2, returning the tuples and mappings. + The mappings are returned so that we can reconstitute the corresponding tuples for a given role. + This is needed so we can remove those tuples when the role changes if needed. + """ + v1_role = aggregate_v1_role(role) # This is where we wire in the implementation we're using into the Migrator - v2_roles = [ - v2_role for v2_role in v1_role_to_v2_mapping(v1_role, root_workspace, default_workspace, current_bindings) + v2_role_bindings = [ + binding for binding in v1_role_to_v2_bindings(v1_role, root_workspace, default_workspace, current_mapping) ] - relationships, mappings = get_kessel_relation_tuples(frozenset(v2_roles), root_workspace) + relationships, mappings = get_kessel_relation_tuples(frozenset(v2_role_bindings), root_workspace) output_relationships(relationships, write_relationships) return relationships, mappings @@ -156,7 +160,7 @@ def migrate_users_for_groups(tenant: Tenant, write_relationships: bool): output_relationships(relationships, write_relationships) -def migrate_data_for_tenant(tenant: Tenant, app_list: list, write_relationships: bool): +def migrate_data_for_tenant(tenant: Tenant, exclude_apps: list, write_relationships: bool): """Migrate all data for a given tenant.""" logger.info("Creating workspace.") root_workspace, default_workspace = migrate_workspace(tenant, write_relationships) @@ -171,8 +175,8 @@ def migrate_data_for_tenant(tenant: Tenant, app_list: list, write_relationships: logger.info("Finished migrating relations of group and user.") roles = tenant.role_set.all() - if app_list: - roles = roles.exclude(access__permission__application__in=app_list) + if exclude_apps: + roles = roles.exclude(access__permission__application__in=exclude_apps) for role in roles: logger.info(f"Migrating role: {role.name} with UUID {role.uuid}.") diff --git a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py index 00cbcf6bf..345ca9d9b 100644 --- a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py +++ b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py @@ -20,6 +20,7 @@ import uuid from typing import Callable, FrozenSet, Optional, Type +from django.conf import settings from management.models import BindingMapping from management.role.model import Role from migration_tool.ingest import add_element @@ -37,7 +38,7 @@ logger = logging.getLogger(__name__) -Permissiongroupings = dict[V1resourcedef, list[str]] +Permissiongroupings = dict[V2boundresource, list[str]] Perm_bound_resources = dict[str, list[V2boundresource]] group_perms_for_rolebinding_fn = Type[ @@ -91,10 +92,7 @@ def set_system_roles(cls): add_system_role(cls.SYSTEM_ROLES, V2role(str(role.uuid), True, frozenset(permission_list))) -skipped_apps = {"cost-management", "playbook-dispatcher", "approval"} - - -def v1_role_to_v2_mapping( +def v1_role_to_v2_bindings( v1_role: V1role, root_workspace: str, default_workspace: str, @@ -102,12 +100,14 @@ def v1_role_to_v2_mapping( ) -> FrozenSet[V2rolebinding]: """Convert a V1 role to a set of V2 role bindings.""" perm_groupings: Permissiongroupings = {} - # Group V2 permissions by target + # Group V2 permissions by target resource for v1_perm in v1_role.permissions: if not is_for_enabled_app(v1_perm): continue v2_perm = v1_perm_to_v2_perm(v1_perm) if v1_perm.resourceDefs: + if not is_for_enabled_resource(v1_perm): + continue for resource_def in v1_perm.resourceDefs: resource_type = ( "workspace" @@ -129,9 +129,9 @@ def v1_role_to_v2_mapping( V2boundresource("workspace", root_workspace), v2_perm, ) - # Project permission sets to system roles - resource_roles = extract_system_roles(perm_groupings, v1_role, binding_mapping) - # Construct rolebindings + # Project permission sets to roles per set of resources + resource_roles = permission_groupings_to_v2_role_and_resource(perm_groupings, v1_role, binding_mapping) + # Construct rolebindings for each resource v2_role_bindings = [] v2_groups = v1groups_to_v2groups(v1_role.groups) for role, resources in resource_roles.items(): @@ -159,12 +159,16 @@ def v1_role_to_v2_mapping( custom_roles_created = 0 -def extract_system_roles( - perm_groupings: dict[V1resourcedef, list[str]], v1_role: V1role, binding_mapping: Optional[BindingMapping] -): - """Extract system roles from a set of permissions.""" +def permission_groupings_to_v2_role_and_resource( + perm_groupings: Permissiongroupings, v1_role: V1role, binding_mapping: Optional[BindingMapping] +) -> dict[V2role, list[V2boundresource]]: + """ + Determine V2 roles and resources they apply to from a set of V1 resources and permissions. + + Prefers to reuse system roles where possible. + """ candidate_system_roles = {} - resource_roles: dict[V2role, list[V1resourcedef]] = {} + resource_roles: dict[V2role, list[V2boundresource]] = {} system_roles = SystemRole.get_system_roles() for resource, permissions in perm_groupings.items(): @@ -229,8 +233,22 @@ def extract_system_roles( def is_for_enabled_app(perm: V1permission): - """Return true if the permission is for an app that is no longer in use.""" - return perm.app not in skipped_apps + """Return true if the permission is for an app that should migrate.""" + return perm.app not in settings.V2_MIGRATION_APP_EXCLUDE_LIST + + +def is_for_enabled_resource(perm: V1permission): + """ + Return true if the resource is for an app that should migrate. + + This setting is used when the permission is valid for V2 but the resource model is not yet finalized. + It excludes role bindings for those specific resources, and only migrates those which are bound + at the workspace level. + + Once the resource model is finalized, we should no longer exclude that app, and should instead update + the migration code to account for migrating those resources in whatever form they should migrate. + """ + return perm.app not in settings.V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST def split_resourcedef_literal(resourceDef: V1resourcedef): diff --git a/rbac/rbac/settings.py b/rbac/rbac/settings.py index 352bbbb1f..4d6dea141 100644 --- a/rbac/rbac/settings.py +++ b/rbac/rbac/settings.py @@ -346,6 +346,12 @@ ROLE_CREATE_ALLOW_LIST = ENVIRONMENT.get_value("ROLE_CREATE_ALLOW_LIST", default="").split(",") +# Dual write migration configuration +V2_MIGRATION_APP_EXCLUDE_LIST = ENVIRONMENT.get_value("V2_MIGRATION_APP_EXCLUDE_LIST", default="").split(",") +V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST = ENVIRONMENT.get_value( + "V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST", default="" +).split(",") + # Migration Setup TENANT_PARALLEL_MIGRATION_MAX_PROCESSES = ENVIRONMENT.int("TENANT_PARALLEL_MIGRATION_MAX_PROCESSES", default=2) TENANT_PARALLEL_MIGRATION_CHUNKS = ENVIRONMENT.int("TENANT_PARALLEL_MIGRATION_CHUNKS", default=2) @@ -366,7 +372,7 @@ DESTRUCTIVE_SEEDING_OK_UNTIL = datetime.datetime(1970, 1, 1, tzinfo=pytz.UTC) # disable log messages less than CRITICAL when running unit tests. -if len(sys.argv) > 1 and sys.argv[1] == "test": +if len(sys.argv) > 1 and sys.argv[1] == "test" and not ENVIRONMENT.bool("LOG_TEST_OUTPUT", default=False): logging.disable(logging.CRITICAL) # Optionally log all DB queries diff --git a/tests/internal/test_views.py b/tests/internal/test_views.py index 8bacace09..337c5d2b1 100644 --- a/tests/internal/test_views.py +++ b/tests/internal/test_views.py @@ -476,15 +476,32 @@ def test_run_migrations_of_data(self, migration_mock): "Data migration from V1 to V2 are running in a background worker.", ) - # Without params - migration_mock.reset_mock() - response = self.client.post( - f"/_private/api/utils/data_migration/", - **self.request.META, - ) - migration_mock.assert_called_once_with({"exclude_apps": [], "orgs": [], "write_relationships": False}) - self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) - self.assertEqual( - response.content.decode(), - "Data migration from V1 to V2 are running in a background worker.", - ) + # Without params uses global default + with self.settings(V2_MIGRATION_APP_EXCLUDE_LIST=["fooapp"]): + migration_mock.reset_mock() + response = self.client.post( + f"/_private/api/utils/data_migration/", + **self.request.META, + ) + migration_mock.assert_called_once_with( + {"exclude_apps": ["fooapp"], "orgs": [], "write_relationships": False} + ) + self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) + self.assertEqual( + response.content.decode(), + "Data migration from V1 to V2 are running in a background worker.", + ) + + # Without params uses none if no global default + with self.settings(V2_MIGRATION_APP_EXCLUDE_LIST=[]): + migration_mock.reset_mock() + response = self.client.post( + f"/_private/api/utils/data_migration/", + **self.request.META, + ) + migration_mock.assert_called_once_with({"exclude_apps": [], "orgs": [], "write_relationships": False}) + self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED) + self.assertEqual( + response.content.decode(), + "Data migration from V1 to V2 are running in a background worker.", + ) diff --git a/tests/management/role/test_view.py b/tests/management/role/test_view.py index e36e3470d..35dd0a7b8 100644 --- a/tests/management/role/test_view.py +++ b/tests/management/role/test_view.py @@ -85,9 +85,6 @@ def relation_api_tuples_for_v1_role(v1_role_uuid, root_workspace_uuid): ) relations.append(relation_tuple) else: - relation_tuple = relation_api_tuple("keya/id", "valueA", "workspace", "workspace", root_workspace_uuid) - relations.append(relation_tuple) - relation_tuple = relation_api_tuple( "keya/id", "valueA", "user_grant", "role_binding", str(role_binding_uuid) ) @@ -108,6 +105,14 @@ def relation_api_resource(type_resource, id_resource): return {"type": type_resource, "id": id_resource} +def find_in_list(list, predicate): + """Find an item in a list.""" + for item in list: + if predicate(item): + return item + return None + + class RoleViewsetTests(IdentityRequest): """Test the role viewset.""" @@ -338,6 +343,43 @@ def test_create_role_success(self, send_kafka_message): ANY, ) + @override_settings(V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST=["app"]) + @patch("management.role.relation_api_dual_write_handler.RelationApiDualWriteHandler._save_replication_event") + def test_role_replication_exluded_resource(self, mock_method): + """Test that excluded resources do not replicate via dual write.""" + # Set up + role_name = "test_update_role" + access_data = [ + { + "permission": "app:*:*", + "resourceDefinitions": [ + {"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}} + ], + }, + {"permission": "app:*:read", "resourceDefinitions": []}, + ] + + response = self.create_role(role_name, in_access_data=access_data) + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + + actual_call_arg = mock_method.call_args[0][0] + actual_sorted = normalize_and_sort(actual_call_arg) + to_add = actual_sorted["relations_to_add"] + + self.assertEqual([], actual_sorted["relations_to_remove"]) + self.assertEqual(3, len(to_add), "too many relations (should not add relations for excluded resource)") + + role_binding = find_in_list(to_add, lambda r: r["resource"]["type"] == "role_binding")["resource"]["id"] + workspace = find_in_list(to_add, lambda r: r["resource"]["type"] == "workspace") + + self.assertEquals( + role_binding, workspace["subject"]["id"], "expected binding to workspace (not to excluded resource)" + ) + + role = find_in_list(to_add, lambda r: r["resource"]["type"] == "role") + + self.assertEquals(role["relation"], "app_all_read", "expected workspace permission") + @patch("management.role.relation_api_dual_write_handler.RelationApiDualWriteHandler._save_replication_event") def test_create_role_with_display_success(self, mock_method): """Test that we can create a role.""" @@ -361,7 +403,7 @@ def test_create_role_with_display_success(self, mock_method): actual_call_arg = mock_method.call_args[0][0] expected_sorted = normalize_and_sort(replication_event) actual_sorted = normalize_and_sort(actual_call_arg) - self.assertEqual(set(expected_sorted), set(actual_sorted)) + self.assertEqual(expected_sorted, actual_sorted) # test that we can retrieve the role url = reverse("role-detail", kwargs={"uuid": response.data.get("uuid")}) @@ -1402,7 +1444,7 @@ def test_update_role(self, mock_method): actual_call_arg = mock_method.call_args[0][0] expected_sorted = normalize_and_sort(replication_event) actual_sorted = normalize_and_sort(actual_call_arg) - self.assertEqual(set(expected_sorted), set(actual_sorted)) + self.assertEqual(expected_sorted, actual_sorted) self.assertEqual(response.status_code, status.HTTP_200_OK) @@ -1511,7 +1553,7 @@ def test_delete_role(self, mock_method): actual_call_arg = mock_method.call_args[0][0] expected_sorted = normalize_and_sort(replication_event) actual_sorted = normalize_and_sort(actual_call_arg) - self.assertEqual(set(expected_sorted), set(actual_sorted)) + self.assertEqual(expected_sorted, actual_sorted) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) @patch("core.kafka.RBACProducer.send_kafka_message") diff --git a/tox.ini b/tox.ini index accf2093e..796ba3bb6 100644 --- a/tox.ini +++ b/tox.ini @@ -20,6 +20,8 @@ application-import-names = rbac, api [testenv] ;passenv = CI TRAVIS TRAVIS_* setenv = + LOG_TEST_OUTPUT={env:LOG_TEST_OUTPUT:False} + LOG_DATABASE_QUERIES={env:LOG_DATABASE_QUERIES:False} DATABASE_NAME={env:DATABASE_NAME:postgres} DATABASE_HOST={env:DATABASE_HOST:localhost} DATABASE_PORT={env:DATABASE_PORT:15432} @@ -28,6 +30,8 @@ setenv = PGPASSWORD={env:PGPASSWORD:postgres} TESTING_APPLICATION=app ROLE_CREATE_ALLOW_LIST=cost-management + V2_MIGRATION_APP_EXCLUDE_LIST=approval + V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST=cost-management,playbook-dispatcher NOTIFICATIONS_TOPIC=platform.notifications.ingress EXTERNAL_SYNC_TOPIC=platform.rbac.sync EXTERNAL_CHROME_TOPIC=platform.chrome From ce91b516619b8a0926ff9a76216f295c90e002cb Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Tue, 10 Sep 2024 13:44:34 -0400 Subject: [PATCH 14/55] Relate resources and workspaces to default (not root) (#1186) Everything should go to/under the "default" workspace in order to maintain equivalent inheritance behavior, while at the same time allow for escaping inheritance with new workspaces under the root without having to move resources around. --- .../role/relation_api_dual_write_handler.py | 7 +------ rbac/migration_tool/migrate.py | 17 +++++++---------- .../sharedSystemRolesReplicatedRoleBindings.py | 5 ++--- tests/management/role/test_view.py | 16 ++++++++-------- tests/migration_tool/tests_migrate.py | 6 +++--- 5 files changed, 21 insertions(+), 30 deletions(-) diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py index 1f44862be..720493319 100644 --- a/rbac/management/role/relation_api_dual_write_handler.py +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -18,7 +18,7 @@ """Class to handle Dual Write API related operations.""" import logging -from management.models import Outbox, Workspace +from management.models import Outbox from management.role.model import BindingMapping from migration_tool.migrate import migrate_role from migration_tool.utils import relationship_to_json @@ -48,9 +48,6 @@ def __init__(self, role, event_type): self.binding_mapping = None self.tenant_id = role.tenant_id self.org_id = role.tenant.org_id - self.root_workspace = Workspace.objects.get( - name="root", description="Root workspace", tenant_id=self.tenant_id - ) self.event_type = event_type except Exception as e: raise DualWriteException(e) @@ -77,7 +74,6 @@ def load_relations_from_current_state_of_role(self): relations, _ = migrate_role( self.role, write_relationships=False, - root_workspace=str(self.root_workspace.uuid), default_workspace=self.org_id, current_mapping=self.binding_mapping, ) @@ -123,7 +119,6 @@ def _generate_relations_and_mappings_for_role(self): relations, mappings = migrate_role( self.role, write_relationships=False, - root_workspace=str(self.root_workspace.uuid), default_workspace=self.org_id, current_mapping=self.binding_mapping, ) diff --git a/rbac/migration_tool/migrate.py b/rbac/migration_tool/migrate.py index c4d5eba09..bdc10d034 100644 --- a/rbac/migration_tool/migrate.py +++ b/rbac/migration_tool/migrate.py @@ -37,7 +37,7 @@ def get_kessel_relation_tuples( v2_role_bindings: FrozenSet[V2rolebinding], - root_workspace: str, + default_workspace: str, ) -> tuple[list[common_pb2.Relationship], BindingMappings]: """Generate a set of relationships and BindingMappings for the given set of v2 role bindings.""" relationships: list[common_pb2.Relationship] = list() @@ -73,7 +73,7 @@ def get_kessel_relation_tuples( # All other resource-resource or resource-workspace relations # which may be implied or necessary are intentionally ignored. # These should come from the apps that own the resource. - if bound_resource.resource_type == "workspace" and not bound_resource.resourceId == root_workspace: + if bound_resource.resource_type == "workspace" and not bound_resource.resourceId == default_workspace: # This is not strictly necessary here and the relation may be a duplicate. # Once we have more Workspace API / Inventory Group migration progress, # this block can and probably should be removed. @@ -83,7 +83,7 @@ def get_kessel_relation_tuples( bound_resource.resource_type, bound_resource.resourceId, "workspace", - root_workspace, + default_workspace, "parent", ) ) @@ -104,7 +104,6 @@ def get_kessel_relation_tuples( def migrate_role( role: Role, write_relationships: bool, - root_workspace: str, default_workspace: str, current_mapping: Optional[BindingMapping] = None, ) -> tuple[list[common_pb2.Relationship], BindingMappings]: @@ -116,10 +115,8 @@ def migrate_role( """ v1_role = aggregate_v1_role(role) # This is where we wire in the implementation we're using into the Migrator - v2_role_bindings = [ - binding for binding in v1_role_to_v2_bindings(v1_role, root_workspace, default_workspace, current_mapping) - ] - relationships, mappings = get_kessel_relation_tuples(frozenset(v2_role_bindings), root_workspace) + v2_role_bindings = [binding for binding in v1_role_to_v2_bindings(v1_role, default_workspace, current_mapping)] + relationships, mappings = get_kessel_relation_tuples(frozenset(v2_role_bindings), default_workspace) output_relationships(relationships, write_relationships) return relationships, mappings @@ -163,7 +160,7 @@ def migrate_users_for_groups(tenant: Tenant, write_relationships: bool): def migrate_data_for_tenant(tenant: Tenant, exclude_apps: list, write_relationships: bool): """Migrate all data for a given tenant.""" logger.info("Creating workspace.") - root_workspace, default_workspace = migrate_workspace(tenant, write_relationships) + _, default_workspace = migrate_workspace(tenant, write_relationships) logger.info("Workspace migrated.") logger.info("Relating users to tenant.") @@ -181,7 +178,7 @@ def migrate_data_for_tenant(tenant: Tenant, exclude_apps: list, write_relationsh for role in roles: logger.info(f"Migrating role: {role.name} with UUID {role.uuid}.") - _, mappings = migrate_role(role, write_relationships, root_workspace, default_workspace) + _, mappings = migrate_role(role, write_relationships, default_workspace) # Insert is forced with `create` in order to prevent this from # accidentally running concurrently with dual-writes. diff --git a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py index 345ca9d9b..a49c97b72 100644 --- a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py +++ b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py @@ -94,7 +94,6 @@ def set_system_roles(cls): def v1_role_to_v2_bindings( v1_role: V1role, - root_workspace: str, default_workspace: str, binding_mapping: Optional[BindingMapping], ) -> FrozenSet[V2rolebinding]: @@ -126,13 +125,13 @@ def v1_role_to_v2_bindings( else: add_element( perm_groupings, - V2boundresource("workspace", root_workspace), + V2boundresource("workspace", default_workspace), v2_perm, ) # Project permission sets to roles per set of resources resource_roles = permission_groupings_to_v2_role_and_resource(perm_groupings, v1_role, binding_mapping) # Construct rolebindings for each resource - v2_role_bindings = [] + v2_role_bindings: list[V2rolebinding] = [] v2_groups = v1groups_to_v2groups(v1_role.groups) for role, resources in resource_roles.items(): for resource in resources: diff --git a/tests/management/role/test_view.py b/tests/management/role/test_view.py index 35dd0a7b8..b7ac764c2 100644 --- a/tests/management/role/test_view.py +++ b/tests/management/role/test_view.py @@ -57,15 +57,15 @@ def normalize_and_sort(json_obj): return json_obj -def replication_event_for_v1_role(v1_role_uuid, root_workspace_uuid): +def replication_event_for_v1_role(v1_role_uuid, default_workspace_uuid): """Create a replication event for a v1 role.""" return { - "relations_to_add": relation_api_tuples_for_v1_role(v1_role_uuid, root_workspace_uuid), + "relations_to_add": relation_api_tuples_for_v1_role(v1_role_uuid, default_workspace_uuid), "relations_to_remove": [], } -def relation_api_tuples_for_v1_role(v1_role_uuid, root_workspace_uuid): +def relation_api_tuples_for_v1_role(v1_role_uuid, default_workspace_uuid): """Create a relation API tuple for a v1 role.""" role_id = Role.objects.get(uuid=v1_role_uuid).id role_binding = BindingMapping.objects.filter(role=role_id).first() @@ -81,7 +81,7 @@ def relation_api_tuples_for_v1_role(v1_role_uuid, root_workspace_uuid): relations.append(relation_tuple) if "app_all_read" in data["permissions"]: relation_tuple = relation_api_tuple( - "workspace", root_workspace_uuid, "user_grant", "role_binding", str(role_binding_uuid) + "workspace", default_workspace_uuid, "user_grant", "role_binding", str(role_binding_uuid) ) relations.append(relation_tuple) else: @@ -397,7 +397,7 @@ def test_create_role_with_display_success(self, mock_method): response = self.create_role(role_name, role_display=role_display, in_access_data=access_data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) - replication_event = replication_event_for_v1_role(response.data.get("uuid"), str(self.root_workspace.uuid)) + replication_event = replication_event_for_v1_role(response.data.get("uuid"), str(self.tenant.org_id)) mock_method.assert_called_once() actual_call_arg = mock_method.call_args[0][0] @@ -1436,10 +1436,10 @@ def test_update_role(self, mock_method): test_data["access"] = new_access_data url = reverse("role-detail", kwargs={"uuid": role_uuid}) client = APIClient() - current_relations = relation_api_tuples_for_v1_role(role_uuid, str(self.root_workspace.uuid)) + current_relations = relation_api_tuples_for_v1_role(role_uuid, str(self.tenant.org_id)) response = client.put(url, test_data, format="json", **self.headers) - replication_event = replication_event_for_v1_role(response.data.get("uuid"), str(self.root_workspace.uuid)) + replication_event = replication_event_for_v1_role(response.data.get("uuid"), str(self.tenant.org_id)) replication_event["relations_to_remove"] = current_relations actual_call_arg = mock_method.call_args[0][0] expected_sorted = normalize_and_sort(replication_event) @@ -1547,7 +1547,7 @@ def test_delete_role(self, mock_method): url = reverse("role-detail", kwargs={"uuid": role_uuid}) client = APIClient() replication_event = {"relations_to_add": [], "relations_to_remove": []} - current_relations = relation_api_tuples_for_v1_role(role_uuid, str(self.root_workspace.uuid)) + current_relations = relation_api_tuples_for_v1_role(role_uuid, str(self.tenant.org_id)) replication_event["relations_to_remove"] = current_relations response = client.delete(url, **self.headers) actual_call_arg = mock_method.call_args[0][0] diff --git a/tests/migration_tool/tests_migrate.py b/tests/migration_tool/tests_migrate.py index 8074741aa..f17c19c88 100644 --- a/tests/migration_tool/tests_migrate.py +++ b/tests/migration_tool/tests_migrate.py @@ -147,16 +147,16 @@ def test_migration_of_data(self, logger_mock): call(f"role_binding:{rolebinding_a2}#granted@role:{v2_role_a2}"), call(f"role:{v2_role_a2}#inventory_hosts_write@user:*"), call(f"role_binding:{rolebinding_a2}#subject@group:{self.group_a2.uuid}"), - call(f"workspace:{self.aws_account_id_1}#parent@workspace:{root_workspace_id}"), + call(f"workspace:{self.aws_account_id_1}#parent@workspace:{org_id}"), call(f"workspace:{self.aws_account_id_1}#user_grant@role_binding:{rolebinding_a2}"), ## Role binding to role_a3 call(f"role_binding:{rolebinding_a31}#granted@role:{v2_role_a31}"), call(f"role:{v2_role_a31}#inventory_hosts_write@user:*"), - call(f"workspace:{workspace_1}#parent@workspace:{root_workspace_id}"), + call(f"workspace:{workspace_1}#parent@workspace:{org_id}"), call(f"workspace:{workspace_1}#user_grant@role_binding:{rolebinding_a31}"), call(f"role_binding:{rolebinding_a32}#granted@role:{v2_role_a32}"), call(f"role:{v2_role_a32}#inventory_hosts_write@user:*"), - call(f"workspace:{workspace_2}#parent@workspace:{root_workspace_id}"), + call(f"workspace:{workspace_2}#parent@workspace:{org_id}"), call(f"workspace:{workspace_2}#user_grant@role_binding:{rolebinding_a32}"), ] logger_mock.info.assert_has_calls(tuples, any_order=True) From 690ae27c6761a712125be26b6a3aa94d5118c5bb Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Tue, 10 Sep 2024 17:08:32 -0400 Subject: [PATCH 15/55] Rebase migrations on master --- ..._and_more.py => 0050_remove_rolemapping_v1_role_and_more.py} | 2 +- .../{0050_bindingmapping.py => 0051_bindingmapping.py} | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) rename rbac/management/migrations/{0049_remove_rolemapping_v1_role_and_more.py => 0050_remove_rolemapping_v1_role_and_more.py} (92%) rename rbac/management/migrations/{0050_bindingmapping.py => 0051_bindingmapping.py} (92%) diff --git a/rbac/management/migrations/0049_remove_rolemapping_v1_role_and_more.py b/rbac/management/migrations/0050_remove_rolemapping_v1_role_and_more.py similarity index 92% rename from rbac/management/migrations/0049_remove_rolemapping_v1_role_and_more.py rename to rbac/management/migrations/0050_remove_rolemapping_v1_role_and_more.py index b91ef39ba..9f8c9799e 100644 --- a/rbac/management/migrations/0049_remove_rolemapping_v1_role_and_more.py +++ b/rbac/management/migrations/0050_remove_rolemapping_v1_role_and_more.py @@ -6,7 +6,7 @@ class Migration(migrations.Migration): dependencies = [ - ("management", "0048_outbox"), + ("management", "0049_alter_workspace_parent"), ] operations = [ diff --git a/rbac/management/migrations/0050_bindingmapping.py b/rbac/management/migrations/0051_bindingmapping.py similarity index 92% rename from rbac/management/migrations/0050_bindingmapping.py rename to rbac/management/migrations/0051_bindingmapping.py index 331bf1f6e..b13ccb139 100644 --- a/rbac/management/migrations/0050_bindingmapping.py +++ b/rbac/management/migrations/0051_bindingmapping.py @@ -7,7 +7,7 @@ class Migration(migrations.Migration): dependencies = [ - ("management", "0049_remove_rolemapping_v1_role_and_more"), + ("management", "0050_remove_rolemapping_v1_role_and_more"), ] operations = [ From c6ae3137dc947ffb383fffdf5ac08aa1f1e000de Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Wed, 11 Sep 2024 13:19:53 +0200 Subject: [PATCH 16/55] Fix linter issues in internal/views.py and migrate.py --- rbac/internal/views.py | 3 ++- rbac/migration_tool/migrate.py | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/rbac/internal/views.py b/rbac/internal/views.py index 58ed3cd9e..bc4b3d952 100644 --- a/rbac/internal/views.py +++ b/rbac/internal/views.py @@ -16,12 +16,13 @@ # """View for internal tenant management.""" + import json import logging -from django.conf import settings import requests from core.utils import destructive_ok +from django.conf import settings from django.db import transaction from django.db.migrations.recorder import MigrationRecorder from django.http import HttpResponse diff --git a/rbac/migration_tool/migrate.py b/rbac/migration_tool/migrate.py index bdc10d034..3ea31f079 100644 --- a/rbac/migration_tool/migrate.py +++ b/rbac/migration_tool/migrate.py @@ -21,7 +21,6 @@ from django.conf import settings from kessel.relations.v1beta1 import common_pb2 from management.role.model import BindingMapping, Role -from management.workspace.model import Workspace from migration_tool.models import V2rolebinding from migration_tool.sharedSystemRolesReplicatedRoleBindings import v1_role_to_v2_bindings from migration_tool.utils import create_relationship, output_relationships From cb93ae071ae0ad236871823d1e18fba8b16e3859 Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Thu, 19 Sep 2024 09:23:28 -0400 Subject: [PATCH 17/55] RHCLOUD-35235: Create test harness for simpler testing of expected relations for given edits (#1200) * Factor out an "RelationReplicator" for testing replication * Add InMemoryTuples for testing replication * Add enum for event types to clarify the different replication event types * Add test fixture for testing tuple replication patterns * Predefine attribute-resource map with namespaces; improve test fixtures * Include unmatched groups for better failure messages * Implement expect_1_role_binding_to_workspace; fix roles w/ default & resource def perms * Improve predicate string representation for debugging * Add events for system roles (e.g. for seeding) * Add another test * Catch when no resource is define for v1 role (e.g. system role seeding) * Fix tenant when creating system role in fixture * Outline some more tests and expectations * Clarify param name * Remove unused method * Add test reproducing issue * Update replicate method naming * Fix typo in OutboxReplicator name * Update typos and add namespace to outbox via proto schema --- rbac/management/role/model.py | 7 +- .../role/relation_api_dual_write_handler.py | 180 ++++++--- rbac/management/role/view.py | 20 +- rbac/migration_tool/in_memory_tuples.py | 274 ++++++++++++++ rbac/migration_tool/ingest.py | 16 +- rbac/migration_tool/migrate.py | 37 +- rbac/migration_tool/models.py | 3 +- ...sharedSystemRolesReplicatedRoleBindings.py | 32 +- rbac/migration_tool/utils.py | 9 +- rbac/rbac/settings.py | 1 + tests/identity_request.py | 4 +- tests/management/role/test_dual_write.py | 356 ++++++++++++++++++ tests/management/role/test_view.py | 26 +- 13 files changed, 861 insertions(+), 104 deletions(-) create mode 100644 rbac/migration_tool/in_memory_tuples.py create mode 100644 tests/management/role/test_dual_write.py diff --git a/rbac/management/role/model.py b/rbac/management/role/model.py index af4b363bf..c8e0c6358 100644 --- a/rbac/management/role/model.py +++ b/rbac/management/role/model.py @@ -121,9 +121,14 @@ class Meta: class BindingMapping(models.Model): """V2 binding Mapping definition.""" + # The ID of the role binding + # id = models.UUIDField(primary_key=True, default=uuid4, editable=False) + # The relations mappings = models.JSONField(default=dict) - # One-to-one relationship with Role role = models.OneToOneField(Role, on_delete=models.CASCADE, related_name="binding_mapping") + # resource_type_namespace = models.CharField(max_length=20, null=False) + # resource_type_name = models.CharField(max_length=20, null=False) + # resource_id = models.CharField(max_length=50, null=False) def find_role_binding_by_v2_role(self, v2_role_id): """Find role binding by v2 role id.""" diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py index 720493319..801d21b6a 100644 --- a/rbac/management/role/relation_api_dual_write_handler.py +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -17,13 +17,17 @@ """Class to handle Dual Write API related operations.""" import logging +from abc import ABC, abstractmethod +from enum import Enum +from typing import Optional +from django.conf import settings +from google.protobuf import json_format +from kessel.relations.v1beta1 import common_pb2 from management.models import Outbox from management.role.model import BindingMapping from migration_tool.migrate import migrate_role -from migration_tool.utils import relationship_to_json -from rbac.env import ENVIRONMENT logger = logging.getLogger(__name__) # pylint: disable=invalid-name @@ -34,16 +38,116 @@ class DualWriteException(Exception): pass +class ReplicationEventType(str, Enum): + """Replication event type.""" + + CREATE_SYSTEM_ROLE = "create_system_role" + UPDATE_SYSTEM_ROLE = "update_system_role" + DELETE_SYSTEM_ROLE = "delete_system_role" + CREATE_CUSTOM_ROLE = "create_custom_role" + UPDATE_CUSTOM_ROLE = "update_custom_role" + DELETE_CUSTOM_ROLE = "delete_custom_role" + ASSIGN_ROLE = "assign_role" + UNASSIGN_ROLE = "unassign_role" + CREATE_GROUP = "create_group" + UPDATE_GROUP = "update_group" + DELETE_GROUP = "delete_group" + + +class ReplicationEvent: + """What tuples changes to replicate.""" + + event_type: ReplicationEventType + partition_key: str + add: list[common_pb2.Relationship] + remove: list[common_pb2.Relationship] + + def __init__( + self, + type: ReplicationEventType, + partition_key: str, + add: list[common_pb2.Relationship] = [], + remove: list[common_pb2.Relationship] = [], + ): + """Initialize ReplicationEvent.""" + self.partition_key = partition_key + self.event_type = type + self.add = add + self.remove = remove + + +class RelationReplicator(ABC): + """Type responsible for replicating relations to Kessel Relations.""" + + @abstractmethod + def replicate(self, event: ReplicationEvent): + """Replicate the given event to Kessel Relations.""" + pass + + +class OutboxReplicator(RelationReplicator): + """Replicates relations via the outbox table.""" + + def __init__(self, role): + """Initialize OutboxReplicator.""" + self.role = role + + def replicate(self, event: ReplicationEvent): + """Replicate the given event to Kessel Relations via the Outbox.""" + payload = self._build_replication_event(event.add, event.remove) + self._save_replication_event(payload, event.event_type, event.partition_key) + + def _build_replication_event(self, relations_to_add, relations_to_remove): + """Build replication event.""" + logger.info("[Dual Write] Build Replication event for role(%s): '%s'", self.role.uuid, self.role.name) + add_json = [] + for relation in relations_to_add: + add_json.append(json_format.MessageToDict(relation)) + + remove_json = [] + for relation in relations_to_remove: + remove_json.append(json_format.MessageToDict(relation)) + + replication_event = {"relations_to_add": add_json, "relations_to_remove": remove_json} + return replication_event + + def _save_replication_event(self, payload, event_type, aggregateid): + """Save replication event.""" + logger.info( + "[Dual Write] Save replication event into outbox table for role(%s): '%s'", self.role.uuid, self.role.name + ) + logger.info("[Dual Write] Replication event: %s for role(%s): '%s'", payload, self.role.uuid, self.role.name) + # https://debezium.io/documentation/reference/stable/transformations/outbox-event-router.html#basic-outbox-table + outbox_record = Outbox.objects.create( + aggregatetype="RelationReplicationEvent", + aggregateid=aggregateid, + event_type=event_type, + payload=payload, + ) + outbox_record.delete() + + +class NoopReplicator(RelationReplicator): + """Noop replicator.""" + + def replicate(self, event: ReplicationEvent): + """Noop.""" + pass + + class RelationApiDualWriteHandler: """Class to handle Dual Write API related operations.""" - def __init__(self, role, event_type): + # TODO: add resource as parameter + def __init__(self, role, event_type: ReplicationEventType, replicator: Optional[RelationReplicator] = None): """Initialize RelationApiDualWriteHandler.""" if not self.replication_enabled(): + self._replicator = NoopReplicator() return try: - self.role_relations = [] - self.current_role_relations = [] + self._replicator = replicator if replicator else OutboxReplicator(role) + self.role_relations: list[common_pb2.Relationship] = [] + self.current_role_relations: list[common_pb2.Relationship] = [] self.role = role self.binding_mapping = None self.tenant_id = role.tenant_id @@ -54,11 +158,7 @@ def __init__(self, role, event_type): def replication_enabled(self): """Check whether replication enabled.""" - return ENVIRONMENT.get_value("REPLICATION_TO_RELATION_ENABLED", default=False, cast=bool) - - def get_current_role_relations(self): - """Get current roles relations.""" - return self.current_role_relations + return settings.REPLICATION_TO_RELATION_ENABLED is True def load_relations_from_current_state_of_role(self): """Generate relations from current state of role and UUIDs for v2 role and role binding from database.""" @@ -90,24 +190,36 @@ def load_relations_from_current_state_of_role(self): except Exception as e: raise DualWriteException(e) - def generate_replication_event_to_outbox(self, role): + def replicate_new_or_updated_role(self, role): """Generate replication event to outbox table.""" if not self.replication_enabled(): return self.role = role self._generate_relations_and_mappings_for_role() - return self.save_replication_event_to_outbox() + self._replicate() - def save_replication_event_to_outbox(self): - """Generate and store replication event to outbox table.""" + def replicate_deleted_role(self): + """Replicate removal of current role state.""" if not self.replication_enabled(): - return {} + return + self._replicate() + + def _replicate(self): + if not self.replication_enabled(): + return try: - replication_event = self._build_replication_event() - self._save_replication_event(replication_event) + self._replicator.replicate( + ReplicationEvent( + type=self.event_type, + # TODO: need to think about partitioning + # Maybe resource id + partition_key="rbactodo", + remove=self.current_role_relations, + add=self.role_relations, + ), + ) except Exception as e: raise DualWriteException(e) - return replication_event def _generate_relations_and_mappings_for_role(self): """Generate relations and mappings for a role with new UUIDs for v2 role and role bindings.""" @@ -134,35 +246,3 @@ def _generate_relations_and_mappings_for_role(self): return relations except Exception as e: raise DualWriteException(e) - - def _build_replication_event(self): - """Build replication event.""" - if not self.replication_enabled(): - return {} - logger.info("[Dual Write] Build Replication event for role(%s): '%s'", self.role.uuid, self.role.name) - relations_to_add = [] - for relation in self.role_relations: - relations_to_add.append(relationship_to_json(relation)) - - relations_to_remove = [] - for relation in self.current_role_relations: - relations_to_remove.append(relationship_to_json(relation)) - - replication_event = {"relations_to_add": relations_to_add, "relations_to_remove": relations_to_remove} - return replication_event - - def _save_replication_event(self, replication_event): - """Save replication event.""" - if not self.replication_enabled(): - return - logger.info( - "[Dual Write] Save replication event into outbox table for role(%s): '%s'", self.role.uuid, self.role.name - ) - logger.info( - "[Dual Write] Replication event: %s for role(%s): '%s'", replication_event, self.role.uuid, self.role.name - ) - # https://debezium.io/documentation/reference/stable/transformations/outbox-event-router.html#basic-outbox-table - outbox_record = Outbox.objects.create( - aggregatetype="Role", aggregateid=self.role.uuid, event_type=self.event_type, payload=replication_event - ) - outbox_record.delete() diff --git a/rbac/management/role/view.py b/rbac/management/role/view.py index 2f48fd54b..0f9049f63 100644 --- a/rbac/management/role/view.py +++ b/rbac/management/role/view.py @@ -35,7 +35,11 @@ from management.notifications.notification_handlers import role_obj_change_notification_handler from management.permissions import RoleAccessPermission from management.querysets import get_role_queryset, user_has_perm -from management.role.relation_api_dual_write_handler import DualWriteException, RelationApiDualWriteHandler +from management.role.relation_api_dual_write_handler import ( + DualWriteException, + RelationApiDualWriteHandler, + ReplicationEventType, +) from management.role.serializer import AccessSerializer, RoleDynamicSerializer, RolePatchSerializer from management.utils import validate_uuid from rest_framework import mixins, serializers, status, viewsets @@ -467,8 +471,8 @@ def perform_create(self, serializer): """ role = serializer.save() - dual_write_handler = RelationApiDualWriteHandler(role, "CREATE") - dual_write_handler.generate_replication_event_to_outbox(role) + dual_write_handler = RelationApiDualWriteHandler(role, ReplicationEventType.CREATE_CUSTOM_ROLE) + dual_write_handler.replicate_new_or_updated_role(role) role_obj_change_notification_handler(role, "created", self.request.user) @@ -482,13 +486,15 @@ def perform_update(self, serializer): Assumes concurrent updates are prevented (e.g. with atomic block and locks). """ if self.action != "partial_update": - dual_write_handler = RelationApiDualWriteHandler(serializer.instance, "UPDATE") + dual_write_handler = RelationApiDualWriteHandler( + serializer.instance, ReplicationEventType.UPDATE_CUSTOM_ROLE + ) dual_write_handler.load_relations_from_current_state_of_role() role = serializer.save() if self.action != "partial_update": - dual_write_handler.generate_replication_event_to_outbox(role) + dual_write_handler.replicate_new_or_updated_role(role) role_obj_change_notification_handler(role, "updated", self.request.user) auditlog = AuditLog() @@ -506,13 +512,13 @@ def perform_destroy(self, instance: Role): error = {key: [_(message)]} raise serializers.ValidationError(error) - dual_write_handler = RelationApiDualWriteHandler(instance, "DELETE") + dual_write_handler = RelationApiDualWriteHandler(instance, ReplicationEventType.DELETE_CUSTOM_ROLE) dual_write_handler.load_relations_from_current_state_of_role() self.delete_policies_if_no_role_attached(instance) instance.delete() - dual_write_handler.save_replication_event_to_outbox() + dual_write_handler.replicate_deleted_role() role_obj_change_notification_handler(instance, "deleted", self.request.user) # Audit in perform_destroy because it needs access to deleted instance diff --git a/rbac/migration_tool/in_memory_tuples.py b/rbac/migration_tool/in_memory_tuples.py new file mode 100644 index 000000000..cd18e3163 --- /dev/null +++ b/rbac/migration_tool/in_memory_tuples.py @@ -0,0 +1,274 @@ +"""This module contains the in-memory representation of a tuple store.""" + +from typing import Callable, Hashable, Iterable, List, NamedTuple, Optional, Set, Tuple, TypeVar +from collections import namedtuple, defaultdict + +from kessel.relations.v1beta1.common_pb2 import Relationship +from management.role.relation_api_dual_write_handler import RelationReplicator + + +class RelationTuple(NamedTuple): + """Simple representation of a relation tuple.""" + + resource_type_namespace: str + resource_type_name: str + resource_id: str + relation: str + subject_type_namespace: str + subject_type_name: str + subject_id: str + subject_relation: str + + +T = TypeVar("T", bound=Hashable) + + +class InMemoryTuples: + """In-memory store for relation tuples.""" + + def __init__(self, tuples=None): + """Initialize the store.""" + self._tuples: Set[RelationTuple] = set(tuples) if tuples is not None else set() + + def _relationship_key(self, relationship: Relationship): + return RelationTuple( + resource_type_namespace=relationship.resource.type.namespace, + resource_type_name=relationship.resource.type.name, + resource_id=relationship.resource.id, + relation=relationship.relation, + subject_type_namespace=relationship.subject.subject.type.namespace, + subject_type_name=relationship.subject.subject.type.name, + subject_id=relationship.subject.subject.id, + subject_relation=relationship.subject.relation, + ) + + def add(self, tuple: Relationship): + """Add a tuple to the store.""" + key = self._relationship_key(tuple) + self._tuples.add(key) + + def remove(self, tuple: Relationship): + """Remove a tuple from the store.""" + key = self._relationship_key(tuple) + self._tuples.discard(key) + + def write(self, add: Iterable[Relationship], remove: Iterable[Relationship]): + """Add / remove tuples.""" + for tuple in add: + self.add(tuple) + for tuple in remove: + self.remove(tuple) + + def find_tuples(self, predicate: Callable[[RelationTuple], bool]) -> List[RelationTuple]: + """Find tuples matching the given predicate.""" + return [rel for rel in self._tuples if predicate(rel)] + + def find_tuples_grouped( + self, predicate: Callable[[RelationTuple], bool], group_by: Callable[[RelationTuple], T] + ) -> dict[T, List[RelationTuple]]: + """Filter tuples and group them by a key.""" + grouped_tuples: dict[T, List[RelationTuple]] = defaultdict(list) + for rel in self._tuples: + if predicate(rel): + key = group_by(rel) + grouped_tuples[key].append(rel) + return grouped_tuples + + def find_group_with_tuples( + self, + predicates: List[Callable[[RelationTuple], bool]], + group_by: Callable[[RelationTuple], T], + group_filter: Callable[[T], bool] = lambda _: True, + require_full_match: bool = False, + match_once: bool = True, + ) -> Tuple[dict[T, List[RelationTuple]], dict[T, List[RelationTuple]]]: + """ + Find groups of tuples matching given predicates, grouped by a key. + + Groups the tuples using the provided `group_by` function and tests the + predicates against each group independently. + + For each group, this method attempts to find tuples that match all the + predicates, ensuring that no tuple is used for more than one predicate + within the group. + + If `require_full_match` is True, the method also ensures that all tuples + in the group are matched by the predicates (i.e., no unmatched tuples + remain in the group). If any group does not meet the criteria, it is + excluded from the results. + + Args: + predicates: A list of predicates (functions) that each accept a + RelationTuple and return a bool indicating a match. + group_by: A function that takes a RelationTuple and returns a key + to group by (e.g., a resource ID). + require_full_match: If True, only groups where all tuples are matched + by the predicates (i.e. there are no remaining unmatched tuples) + are included in the results. + group_filter: A predicate that filters the groups to include in the + results. Useful when you only want to test a subset of tuples e.g. + a specific resource type. + match_once: If True, each predicate is only used once in the matching process. + Otherwise, each tuple in the group will be tested by each predicate until + one predicate matches the tuple. + + Returns: + A tuple containing two dictionaries: + - The first dictionary contains the groups that matched all predicates. + - The second dictionary contains the groups that did not match all predicates. + """ + # Group the tuples by the specified key + grouped_tuples: dict[T, List[RelationTuple]] = defaultdict(list) + for rel in self._tuples: + key = group_by(rel) + if group_filter(key): + grouped_tuples[key].append(rel) + + matching_groups: dict[T, List[RelationTuple]] = {} + unmatched_groups: dict[T, List[RelationTuple]] = {} + + # Iterate over each group + for key, group_tuples in grouped_tuples.items(): + remaining_tuples = set(group_tuples) + remaining_predicates = list(predicates) if match_once else predicates + i = 0 + matching_tuples = [] + success = True + + # Attempt to match all predicates within the group + # Using each predicate only once if requested + while remaining_predicates and i < len(remaining_predicates): + predicate = remaining_predicates[i] + found = False + for rel in remaining_tuples: + if predicate(rel): + matching_tuples.append(rel) + remaining_tuples.remove(rel) + found = True + break # Move to next predicate + if not found: + success = False + break # Predicate not satisfied in this group + if match_once: + remaining_predicates.pop(i) + else: + i += 1 + + if require_full_match and remaining_tuples or not success: + unmatched_groups[key] = group_tuples + continue + + matching_groups[key] = matching_tuples + + return matching_groups, unmatched_groups + + def __str__(self): + return str(self._tuples) + + def __repr__(self): + return f"InMemoryTuples({repr(self._tuples)})" + + +class TuplePredicate: + """A predicate that can be used to filter relation tuples.""" + + def __init__(self, func, repr): + self.func = func + self.repr = repr + + def __call__(self, *args, **kwargs): + return self.func(*args, **kwargs) + + def __repr__(self): + return self.repr + + +def all_of(*predicates: Callable[[RelationTuple], bool]) -> Callable[[RelationTuple], bool]: + """Return a predicate that is true if all of the given predicates are true.""" + + def predicate(rel: RelationTuple) -> bool: + return all(p(rel) for p in predicates) + + return TuplePredicate(predicate, f"all_of({', '.join([str(p) for p in predicates])})") + + +def one_of(*predicates: Callable[[RelationTuple], bool]) -> Callable[[RelationTuple], bool]: + """Return a predicate that is true if any of the given predicates are true.""" + + if len(predicates) == 1: + return predicates[0] + + def predicate(rel: RelationTuple) -> bool: + return any(p(rel) for p in predicates) + + return TuplePredicate(predicate, f"one_of({', '.join([str(p) for p in predicates])})") + + +def resource_type(namespace: str, name: str) -> Callable[[RelationTuple], bool]: + """Return a predicate that is true if the resource type matches the given namespace and name.""" + + def predicate(rel: RelationTuple) -> bool: + return rel.resource_type_namespace == namespace and rel.resource_type_name == name + + return TuplePredicate(predicate, f'resource_type("{namespace}", "{name}")') + + +def resource_id(id: str) -> Callable[[RelationTuple], bool]: + """Return a predicate that is true if the resource ID matches the given ID.""" + + def predicate(rel: RelationTuple) -> bool: + return rel.resource_id == id + + return TuplePredicate(predicate, f'resource_id("{id}")') + + +def resource(namespace: str, name: str, id: str) -> Callable[[RelationTuple], bool]: + """Return a predicate that is true if the resource matches the given namespace and name.""" + return all_of(resource_type(namespace, name), resource_id(id)) + + +def relation(relation: str) -> Callable[[RelationTuple], bool]: + """Return a predicate that is true if the resource relation matches the given relation.""" + + def predicate(rel: RelationTuple) -> bool: + return rel.relation == relation + + return TuplePredicate(predicate, f'relation("{relation}")') + + +def subject_type(namespace: str, name: str, relation: str = "") -> Callable[[RelationTuple], bool]: + """Return a predicate that is true if the subject type matches the given namespace and name.""" + + def predicate(rel: RelationTuple) -> bool: + return ( + rel.subject_type_namespace == namespace + and rel.subject_type_name == name + and rel.subject_relation == relation + ) + + return TuplePredicate(predicate, f'subject_type("{namespace}", "{name}")') + + +def subject_id(id: str) -> Callable[[RelationTuple], bool]: + """Return a predicate that is true if the subject ID matches the given ID.""" + + def predicate(rel: RelationTuple) -> bool: + return rel.subject_id == id + + return TuplePredicate(predicate, f'subject_id("{id}")') + + +def subject(namespace: str, name: str, id: str, relation: str = "") -> Callable[[RelationTuple], bool]: + """Return a predicate that is true if the subject matches the given namespace and name.""" + return all_of(subject_type(namespace, name, relation), subject_id(id)) + + +class InMemoryRelationReplicator(RelationReplicator): + """Replicates relations to an in-memory store.""" + + def __init__(self, store: InMemoryTuples = InMemoryTuples()): + """Initialize the replicator.""" + self.store = store + + def replicate(self, event): + self.store.write(event.add, event.remove) diff --git a/rbac/migration_tool/ingest.py b/rbac/migration_tool/ingest.py index 0dca33c38..3aed3a1f5 100644 --- a/rbac/migration_tool/ingest.py +++ b/rbac/migration_tool/ingest.py @@ -29,12 +29,14 @@ def aggregate_v1_role(role: Role) -> V1role: This maps the RBAC model to preloaded, navigable objects with the key data broken down. """ perm_res_defs: dict[Tuple[str, str], list[V1resourcedef]] = {} - perm_list: list[str] = [] + default_perm_list: list[str] = [] role_id = str(role.uuid) # Determine v1 permissions for access in role.access.all(): + default = True for resource_def in access.resourceDefinitions.all(): + default = False attri_filter = resource_def.attributeFilter # Some malformed data in db if attri_filter["operation"] == "in": @@ -44,13 +46,19 @@ def aggregate_v1_role(role: Role) -> V1role: continue res_def = V1resourcedef(attri_filter["key"], attri_filter["operation"], json.dumps(attri_filter["value"])) if res_def.resource_id != "": + # TODO: Need to bind against "ungrouped hosts" for inventory add_element(perm_res_defs, (role_id, access.permission.permission), res_def) - perm_list.append(access.permission.permission) + if default: + default_perm_list.append(access.permission.permission) v1_perms = [] - for perm in perm_list: + for perm in default_perm_list: + perm_parts = perm.split(":") + v1_perm = V1permission(perm_parts[0], perm_parts[1], perm_parts[2], frozenset()) + v1_perms.append(v1_perm) + + for (role_id, perm), res_defs in perm_res_defs.items(): perm_parts = perm.split(":") - res_defs = [res_def for res_def in perm_res_defs.get((role_id, perm), [])] v1_perm = V1permission(perm_parts[0], perm_parts[1], perm_parts[2], frozenset(res_defs)) v1_perms.append(v1_perm) diff --git a/rbac/migration_tool/migrate.py b/rbac/migration_tool/migrate.py index 3ea31f079..546da2086 100644 --- a/rbac/migration_tool/migrate.py +++ b/rbac/migration_tool/migrate.py @@ -47,7 +47,9 @@ def get_kessel_relation_tuples( for v2_role_binding in v2_role_bindings: relationships.append( - create_relationship("role_binding", v2_role_binding.id, "role", v2_role_binding.role.id, "granted") + create_relationship( + ("rbac", "role_binding"), v2_role_binding.id, ("rbac", "role"), v2_role_binding.role.id, "granted" + ) ) v2_role_data = v2_role_binding.role @@ -61,10 +63,16 @@ def get_kessel_relation_tuples( } for perm in v2_role_binding.role.permissions: - relationships.append(create_relationship("role", v2_role_binding.role.id, "user", "*", perm)) + relationships.append( + create_relationship(("rbac", "role"), v2_role_binding.role.id, ("rbac", "user"), "*", perm) + ) for group in v2_role_binding.groups: # These might be duplicate but it is OK, spiceDB will handle duplication through touch - relationships.append(create_relationship("role_binding", v2_role_binding.id, "group", group.id, "subject")) + relationships.append( + create_relationship( + ("rbac", "role_binding"), v2_role_binding.id, ("rbac", "group"), group.id, "subject" + ) + ) for bound_resource in v2_role_binding.resources: # Is this a workspace binding, but not to the root workspace? @@ -72,7 +80,10 @@ def get_kessel_relation_tuples( # All other resource-resource or resource-workspace relations # which may be implied or necessary are intentionally ignored. # These should come from the apps that own the resource. - if bound_resource.resource_type == "workspace" and not bound_resource.resourceId == default_workspace: + if ( + bound_resource.resource_type == ("rbac", "workspace") + and not bound_resource.resourceId == default_workspace + ): # This is not strictly necessary here and the relation may be a duplicate. # Once we have more Workspace API / Inventory Group migration progress, # this block can and probably should be removed. @@ -81,7 +92,7 @@ def get_kessel_relation_tuples( create_relationship( bound_resource.resource_type, bound_resource.resourceId, - "workspace", + ("rbac", "workspace"), default_workspace, "parent", ) @@ -91,7 +102,7 @@ def get_kessel_relation_tuples( create_relationship( bound_resource.resource_type, bound_resource.resourceId, - "role_binding", + ("rbac", "role_binding"), v2_role_binding.id, "user_grant", ) @@ -125,11 +136,13 @@ def migrate_workspace(tenant: Tenant, write_relationships: bool): root_workspace = f"root-workspace-{tenant.org_id}" # Org id represents the default workspace for now relationships = [ - create_relationship("workspace", tenant.org_id, "workspace", root_workspace, "parent"), - create_relationship("workspace", root_workspace, "tenant", tenant.org_id, "parent"), + create_relationship(("rbac", "workspace"), tenant.org_id, ("rbac", "workspace"), root_workspace, "parent"), + create_relationship(("rbac", "workspace"), root_workspace, ("rbac", "tenant"), tenant.org_id, "parent"), ] # Include realm for tenant - relationships.append(create_relationship("tenant", str(tenant.org_id), "realm", settings.ENV_NAME, "realm")) + relationships.append( + create_relationship(("rbac", "tenant"), str(tenant.org_id), ("rbac", "realm"), settings.ENV_NAME, "realm") + ) output_relationships(relationships, write_relationships) return root_workspace, tenant.org_id @@ -137,7 +150,7 @@ def migrate_workspace(tenant: Tenant, write_relationships: bool): def migrate_users(tenant: Tenant, write_relationships: bool): """Write users relationship to tenant.""" relationships = [ - create_relationship("tenant", str(tenant.org_id), "user", str(principal.uuid), "member") + create_relationship(("rbac", "tenant"), str(tenant.org_id), ("rbac", "user"), str(principal.uuid), "member") for principal in tenant.principal_set.all() ] output_relationships(relationships, write_relationships) @@ -152,7 +165,9 @@ def migrate_users_for_groups(tenant: Tenant, write_relationships: bool): tenant.principal_set.filter(cross_account=False) if group.platform_default else group.principals.all() ) for user in user_set: - relationships.append(create_relationship("group", str(group.uuid), "user", str(user.uuid), "member")) + relationships.append( + create_relationship(("rbac", "group"), str(group.uuid), ("rbac", "user"), str(user.uuid), "member") + ) output_relationships(relationships, write_relationships) diff --git a/rbac/migration_tool/models.py b/rbac/migration_tool/models.py index f8931f046..94eb50b8a 100644 --- a/rbac/migration_tool/models.py +++ b/rbac/migration_tool/models.py @@ -16,6 +16,7 @@ """ from dataclasses import dataclass +from typing import Tuple @dataclass(frozen=True) @@ -89,7 +90,7 @@ class V2group: class V2boundresource: """V2 bound resource definition.""" - resource_type: str + resource_type: Tuple[str, str] resourceId: str diff --git a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py index a49c97b72..ee5c72927 100644 --- a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py +++ b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py @@ -18,7 +18,7 @@ import json import logging import uuid -from typing import Callable, FrozenSet, Optional, Type +from typing import Callable, FrozenSet, Optional, Tuple, Type from django.conf import settings from management.models import BindingMapping @@ -108,24 +108,24 @@ def v1_role_to_v2_bindings( if not is_for_enabled_resource(v1_perm): continue for resource_def in v1_perm.resourceDefs: - resource_type = ( - "workspace" - if v1_perm.app == "inventory" - else v1_attributefilter_resource_type_to_v2_resource_type(resource_def.resource_type) - ) + resource_type = attribute_key_to_v2_related_resource_type(resource_def.resource_type) + if resource_type is None: + # Resource type not mapped to v2 + continue for resource_id in split_resourcedef_literal(resource_def): - if resource_type == "workspace": - if resource_id is None: - resource_id = default_workspace + if resource_id is None: + raise ValueError(f"Resource ID is None for {resource_def}") add_element( perm_groupings, V2boundresource(resource_type, resource_id), v2_perm, ) + elif default_workspace is None: + logger.info(f"Cannot create role binding for role; no resource to bind to: {v1_role.id}") else: add_element( perm_groupings, - V2boundresource("workspace", default_workspace), + V2boundresource(("rbac", "workspace"), default_workspace), v2_perm, ) # Project permission sets to roles per set of resources @@ -277,9 +277,11 @@ def v1_perm_to_v2_perm(v1_permission): ) -def v1_attributefilter_resource_type_to_v2_resource_type(resourceType: str): # Format is app.type +V2_RESOURCE_BY_ATTRIBUTE = {"group.id": ("rbac", "workspace")} + + +def attribute_key_to_v2_related_resource_type(resourceType: str) -> Optional[Tuple[str, str]]: """Convert a V1 resource type to a V2 resource type.""" - parts = resourceType.split(".", 1) - app = cleanNameForV2SchemaCompatibility(parts[0]) - resource = cleanNameForV2SchemaCompatibility(parts[1]) - return f"{app}/{resource}" + if resourceType in V2_RESOURCE_BY_ATTRIBUTE: + return V2_RESOURCE_BY_ATTRIBUTE[resourceType] + return None diff --git a/rbac/migration_tool/utils.py b/rbac/migration_tool/utils.py index 090c2fa5d..e09d24e8b 100644 --- a/rbac/migration_tool/utils.py +++ b/rbac/migration_tool/utils.py @@ -2,6 +2,7 @@ import json import logging +from typing import Tuple import grpc from django.conf import settings @@ -38,9 +39,9 @@ def __init__(self, error: grpc.RpcError): self.metadata = json.loads(str(info.metadata).replace("'", '"')) -def validate_and_create_obj_ref(obj_name, obj_id): +def validate_and_create_obj_ref(obj_name: Tuple[str, str], obj_id): """Validate and create a resource.""" - object_type = common_pb2.ObjectType(name=obj_name, namespace="rbac") + object_type = common_pb2.ObjectType(name=obj_name[1], namespace=obj_name[0]) try: validate_all(object_type) except ValidationFailed as err: @@ -53,7 +54,9 @@ def validate_and_create_obj_ref(obj_name, obj_id): return obj_ref -def create_relationship(resource_name, resource_id, subject_name, subject_id, relation): +def create_relationship( + resource_name: Tuple[str, str], resource_id, subject_name: Tuple[str, str], subject_id, relation +): """Create a relationship between a resource and a subject.""" return common_pb2.Relationship( resource=validate_and_create_obj_ref(resource_name, resource_id), diff --git a/rbac/rbac/settings.py b/rbac/rbac/settings.py index 4d6dea141..7741a2006 100644 --- a/rbac/rbac/settings.py +++ b/rbac/rbac/settings.py @@ -347,6 +347,7 @@ ROLE_CREATE_ALLOW_LIST = ENVIRONMENT.get_value("ROLE_CREATE_ALLOW_LIST", default="").split(",") # Dual write migration configuration +REPLICATION_TO_RELATION_ENABLED = ENVIRONMENT.bool("REPLICATION_TO_RELATION_ENABLED", default=False) V2_MIGRATION_APP_EXCLUDE_LIST = ENVIRONMENT.get_value("V2_MIGRATION_APP_EXCLUDE_LIST", default="").split(",") V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST = ENVIRONMENT.get_value( "V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST", default="" diff --git a/tests/identity_request.py b/tests/identity_request.py index 2b5d89a88..8dc50f44e 100644 --- a/tests/identity_request.py +++ b/tests/identity_request.py @@ -22,13 +22,14 @@ from json import dumps as json_dumps from unittest.mock import Mock -from django.test import TestCase +from django.test import TestCase, override_settings from faker import Faker from api.models import Tenant from api.common import RH_IDENTITY_HEADER +@override_settings(REPLICATION_TO_RELATION_ENABLED=True) class IdentityRequest(TestCase): """Parent Class for IAM test cases.""" @@ -38,7 +39,6 @@ class IdentityRequest(TestCase): def setUpClass(cls): """Set up each test class.""" super().setUpClass() - os.environ["REPLICATION_TO_RELATION_ENABLED"] = "True" cls.customer_data = cls._create_customer_data() cls.user_data = cls._create_user_data() cls.request_context = cls._create_request_context(cls.customer_data, cls.user_data) diff --git a/tests/management/role/test_dual_write.py b/tests/management/role/test_dual_write.py new file mode 100644 index 000000000..115b30a0d --- /dev/null +++ b/tests/management/role/test_dual_write.py @@ -0,0 +1,356 @@ +# +# Copyright 2019 Red Hat, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# +"""Test tuple changes for RBAC operations.""" + +from django.test import TestCase, override_settings +from management.group.model import Group +from management.permission.model import Permission +from management.policy.model import Policy +from management.principal.model import Principal +from management.role.model import Access, ResourceDefinition, Role +from management.role.relation_api_dual_write_handler import RelationApiDualWriteHandler, ReplicationEventType +from migration_tool.in_memory_tuples import ( + InMemoryRelationReplicator, + InMemoryTuples, + all_of, + one_of, + relation, + resource, + resource_id, + resource_type, + subject, + subject_type, +) + + +from api.models import Tenant + + +@override_settings(REPLICATION_TO_RELATION_ENABLED=True) +class DualWriteTestCase(TestCase): + """ + Base TestCase for testing dual write logic. + + Use "given" methods to set up state like users would. Use "expect" methods to assert the state of the system. + + "Given" methods are treated like distinct transactions, which each replicate tuples via dual write. + """ + + def setUp(self): + """Set up the dual write tests.""" + super().setUp() + self.tuples = InMemoryTuples() + self.fixture = RbacFixture() + self.tenant = self.fixture.new_tenant(name="tenant", org_id="1234567") + + def default_workspace(self) -> str: + """Return the default workspace ID.""" + assert self.tenant.org_id is not None, "Tenant org_id should not be None" + return self.tenant.org_id + + def dual_write_handler(self, role: Role, event_type: ReplicationEventType) -> RelationApiDualWriteHandler: + """Create a RelationApiDualWriteHandler for the given role and event type.""" + return RelationApiDualWriteHandler(role, event_type, replicator=InMemoryRelationReplicator(self.tuples)) + + def given_v1_system_role(self, name: str, permissions: list[str]) -> Role: + """Create a new system role with the given ID and permissions.""" + role = self.fixture.new_system_role(name=name, permissions=permissions) + dual_write = self.dual_write_handler(role, ReplicationEventType.CREATE_SYSTEM_ROLE) + dual_write.replicate_new_or_updated_role(role) + return role + + def given_v1_role(self, name: str, default: list[str], **kwargs: list[str]) -> Role: + """Create a new custom role with the given ID and workspace permissions.""" + role = self.fixture.new_custom_role( + name=name, + tenant=self.tenant, + resource_access=self._workspace_access_to_resource_definition(default, **kwargs), + ) + dual_write = self.dual_write_handler(role, ReplicationEventType.CREATE_CUSTOM_ROLE) + dual_write.replicate_new_or_updated_role(role) + return role + + def given_update_to_v1_role(self, role: Role, default: list[str], **kwargs: list[str]): + """Update the given role with the given workspace permissions.""" + dual_write = self.dual_write_handler(role, ReplicationEventType.UPDATE_CUSTOM_ROLE) + dual_write.load_relations_from_current_state_of_role() + role = self.fixture.update_custom_role( + role, + resource_access=self._workspace_access_to_resource_definition(default, **kwargs), + ) + dual_write.replicate_new_or_updated_role(role) + return role + + def _workspace_access_to_resource_definition(self, default: list[str], **kwargs: list[str]): + return [ + (default, {}), + *[ + (permissions, {"key": "group.id", "operation": "equal", "value": workspace}) + for workspace, permissions in kwargs.items() + ], + ] + + def given_group(self, name: str, users: list[str]) -> Group: + """Create a new group with the given name and users.""" + # TODO: replicate group membership + return self.fixture.new_group(name=name, users=users, tenant=self.tenant) + + def given_policy(self, group: Group, roles: list[Role]) -> Policy: + """Assign the [roles] to the [group].""" + # TODO: replicate role assignment + return self.fixture.add_role_to_group(roles[0], group, self.tenant) + + def expect_1_v2_role_with_permissions(self, permissions: list[str]) -> str: + """Assert there is a role matching the given permissions and return its ID.""" + roles, unmatched = self.tuples.find_group_with_tuples( + [ + all_of(resource_type("rbac", "role"), relation(permission.replace(":", "_"))) + for permission in permissions + ], + group_by=lambda t: (t.resource_type_namespace, t.resource_type_name, t.resource_id), + group_filter=lambda group: group[0] == "rbac" and group[1] == "role", + require_full_match=True, + ) + + num_roles = len(roles) + self.assertEqual( + num_roles, + 1, + f"Expected exactly 1 role with permissions {permissions}, but got {num_roles}.\n" + f"Matched roles: {roles}.\n" + f"Unmatched roles: {unmatched}", + ) + _, _, id = next(iter(roles.keys())) + return id + + def expect_num_role_bindings(self, num: int): + """Assert there are [num] role bindings.""" + role_bindings = self.tuples.find_tuples_grouped( + subject_type("rbac", "role_binding"), + group_by=lambda t: (t.resource_type_namespace, t.resource_type_name, t.resource_id), + ) + num_role_bindings = len(role_bindings) + self.assertEqual( + num_role_bindings, + num, + f"Expected exactly {num} role bindings, but got {num_role_bindings}.\n" f"Role bindings: {role_bindings}", + ) + + def expect_1_role_binding_to_workspace(self, workspace: str, for_v2_roles: list[str], for_groups: list[str]): + """Assert there is a role binding with the given roles and groups.""" + # Find all bindings for the given workspace + resources = self.tuples.find_tuples_grouped( + all_of(resource("rbac", "workspace", workspace), relation("user_grant")), + group_by=lambda t: (t.resource_type_namespace, t.resource_type_name, t.resource_id), + ) + + # Now of those bound to the workspace, find bindings that bind the given roles and groups + # (we expect only 1) + role_bindings, unmatched = self.tuples.find_group_with_tuples( + [ + all_of( + resource_type("rbac", "role_binding"), + one_of(*[resource_id(t.subject_id) for _, tuples in resources.items() for t in tuples]), + relation("granted"), + subject("rbac", "role", role_id), + ) + for role_id in for_v2_roles + ] + + [ + all_of( + resource_type("rbac", "role_binding"), + relation("subject"), + subject("rbac", "group", group_id), + ) + for group_id in for_groups + ], + group_by=lambda t: (t.resource_type_namespace, t.resource_type_name, t.resource_id), + group_filter=lambda group: group[0] == "rbac" and group[1] == "role_binding", + require_full_match=True, + ) + + num_role_bindings = len(role_bindings) + self.assertEqual( + num_role_bindings, + 1, + f"Expected exactly 1 role binding against workspace {workspace} " + f"with roles {for_v2_roles} and groups {for_groups}, " + f"but got {len(role_bindings)}.\n" + f"Matched role bindings: {role_bindings}.\n" + f"Unmatched role bindings: {unmatched}", + ) + + +class DualWriteSystemRolesTestCase(DualWriteTestCase): + """Test dual write logic for system roles.""" + + def test_system_role_grants_access_to_default_workspace(self): + """Create role binding only when system role is bound to group.""" + role = self.given_v1_system_role("r1", ["app1:hosts:read", "inventory:hosts:write"]) + group = self.given_group("g1", ["u1", "u2"]) + + self.expect_num_role_bindings(0) + + self.given_policy(group, roles=[role]) + + id = self.expect_1_v2_role_with_permissions(["app1:hosts:read", "inventory:hosts:write"]) + self.expect_1_role_binding_to_workspace(self.default_workspace(), for_v2_roles=[id], for_groups=[group.id]) + self.expect_num_role_bindings(1) + + +class DualWriteCustomRolesTestCase(DualWriteTestCase): + """Test dual write logic when we are working with custom roles.""" + + def test_role_with_same_default_and_resource_permission_reuses_same_v2_role(self): + """With same resource permissions (when one of those is the default workspace), reuse the same v2 role.""" + role = self.given_v1_role( + "r1", + default=["app1:hosts:read", "inventory:hosts:write"], + ws_2=["app1:hosts:read", "inventory:hosts:write"], + ) + + group = self.given_group("g1", ["u1", "u2"]) + self.given_policy(group, roles=[role]) + + id = self.expect_1_v2_role_with_permissions(["app1:hosts:read", "inventory:hosts:write"]) + # TODO: assert group once group replication is implemented + self.expect_1_role_binding_to_workspace(self.default_workspace(), for_v2_roles=[id], for_groups=[]) + self.expect_1_role_binding_to_workspace("ws_2", for_v2_roles=[id], for_groups=[]) + + def test_add_permissions_to_role(self): + """Modify the role in place when adding permissions.""" + role = self.given_v1_role( + "r1", + default=["app1:hosts:read", "inventory:hosts:write"], + ws_2=["app1:hosts:read", "inventory:hosts:write"], + ) + + self.given_update_to_v1_role( + role, + default=["app1:hosts:read", "inventory:hosts:write"], + ws_2=["app1:hosts:read", "inventory:hosts:write", "app2:hosts:read"], + ) + + role_for_default = self.expect_1_v2_role_with_permissions(["app1:hosts:read", "inventory:hosts:write"]) + role_for_ws_2 = self.expect_1_v2_role_with_permissions( + ["app1:hosts:read", "inventory:hosts:write", "app2:hosts:read"] + ) + + # TODO: assert group once group replication is implemented + self.expect_1_role_binding_to_workspace( + self.default_workspace(), for_v2_roles=[role_for_default], for_groups=[] + ) + self.expect_1_role_binding_to_workspace("ws_2", for_v2_roles=[role_for_ws_2], for_groups=[]) + + def test_remove_permissions_from_role(self): + """Modify the role in place when removing permissions.""" + pass + + def test_delete_role(self): + """Delete the role and its bindings when deleting a custom role.""" + pass + + def test_remove_resource_removes_role_binding(self): + """Remove the role binding when removing the resource from attribute filter.""" + pass + + def test_two_roles_with_same_resource_permissions_create_two_v2_roles(self): + """Create two v2 roles when two roles have the same resource permissions across different resources.""" + pass + + +class RbacFixture: + """RBAC Fixture.""" + + def __init__(self): + """Initialize the RBAC fixture.""" + self.public_tenant = Tenant.objects.create(tenant_name="public") + + def new_tenant(self, name: str, org_id: str) -> Tenant: + """Create a new tenant with the given name and organization ID.""" + return Tenant.objects.create(tenant_name=name, org_id=org_id) + + def new_system_role(self, name: str, permissions: list[str]) -> Role: + """Create a new system role with the given name and permissions.""" + role = Role.objects.create(name=name, system=True, tenant=self.public_tenant) + + access_list = [ + Access( + permission=Permission.objects.get_or_create(permission=permission, tenant=self.public_tenant)[0], + role=role, + tenant=self.public_tenant, + ) + for permission in permissions + ] + + Access.objects.bulk_create(access_list) + + return role + + def new_custom_role(self, name: str, resource_access: list[tuple[list[str], dict]], tenant: Tenant) -> Role: + """ + Create a new custom role. + + [resource_access] is a list of tuples of the form (permissions, attribute_filter). + """ + role = Role.objects.create(name=name, system=False, tenant=tenant) + return self.update_custom_role(role, resource_access) + + def update_custom_role(self, role: Role, resource_access: list[tuple[list[str], dict]]) -> Role: + """ + Update a custom role. + + [resource_access] is a list of tuples of the form (permissions, attribute_filter). + """ + role.access.all().delete() + + for permissions, attribute_filter in resource_access: + access_list = [ + Access( + permission=Permission.objects.get_or_create(permission=permission, tenant=role.tenant)[0], + role=role, + tenant=role.tenant, + ) + for permission in permissions + ] + + Access.objects.bulk_create(access_list) + + if attribute_filter: + for access in access_list: + ResourceDefinition.objects.create( + attributeFilter=attribute_filter, access=access, tenant=role.tenant + ) + + return role + + def new_group(self, name: str, users: list[str], tenant: Tenant) -> Group: + """Create a new group with the given name, users, and tenant.""" + group = Group.objects.create(name=name, tenant=tenant) + + principals = [Principal.objects.get_or_create(username=username, tenant=tenant)[0] for username in users] + + group.principals.add(*principals) + + return group + + def add_role_to_group(self, role: Role, group: Group, tenant: Tenant) -> Policy: + """Add a role to a group for a given tenant and return the policy.""" + policy, _ = Policy.objects.get_or_create(name=f"System Policy_{group.name}", group=group, tenant=tenant) + policy.roles.add(role) + policy.save() + return policy diff --git a/tests/management/role/test_view.py b/tests/management/role/test_view.py index b7ac764c2..9e1e45871 100644 --- a/tests/management/role/test_view.py +++ b/tests/management/role/test_view.py @@ -93,16 +93,17 @@ def relation_api_tuples_for_v1_role(v1_role_uuid, default_workspace_uuid): def relation_api_tuple(resource_type, resource_id, relation, subject_type, subject_id): + """Helper function for creating a relation tuple in json.""" return { "resource": relation_api_resource(resource_type, resource_id), "relation": relation, - "subject": relation_api_resource(subject_type, subject_id), + "subject": {"subject": relation_api_resource(subject_type, subject_id)}, } def relation_api_resource(type_resource, id_resource): """Helper function for creating a relation resource in json.""" - return {"type": type_resource, "id": id_resource} + return {"type": {"namespace": "rbac", "name": type_resource}, "id": id_resource} def find_in_list(list, predicate): @@ -119,6 +120,7 @@ class RoleViewsetTests(IdentityRequest): def setUp(self): """Set up the role viewset tests.""" super().setUp() + self.maxDiff = None sys_role_config = {"name": "system_role", "display_name": "system_display", "system": True} def_role_config = {"name": "default_role", "display_name": "default_display", "platform_default": True} @@ -344,7 +346,7 @@ def test_create_role_success(self, send_kafka_message): ) @override_settings(V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST=["app"]) - @patch("management.role.relation_api_dual_write_handler.RelationApiDualWriteHandler._save_replication_event") + @patch("management.role.relation_api_dual_write_handler.OutboxReplicator._save_replication_event") def test_role_replication_exluded_resource(self, mock_method): """Test that excluded resources do not replicate via dual write.""" # Set up @@ -369,18 +371,22 @@ def test_role_replication_exluded_resource(self, mock_method): self.assertEqual([], actual_sorted["relations_to_remove"]) self.assertEqual(3, len(to_add), "too many relations (should not add relations for excluded resource)") - role_binding = find_in_list(to_add, lambda r: r["resource"]["type"] == "role_binding")["resource"]["id"] - workspace = find_in_list(to_add, lambda r: r["resource"]["type"] == "workspace") + role_binding = find_in_list(to_add, lambda r: r["resource"]["type"]["name"] == "role_binding")["resource"][ + "id" + ] + workspace = find_in_list(to_add, lambda r: r["resource"]["type"]["name"] == "workspace") self.assertEquals( - role_binding, workspace["subject"]["id"], "expected binding to workspace (not to excluded resource)" + role_binding, + workspace["subject"]["subject"]["id"], + "expected binding to workspace (not to excluded resource)", ) - role = find_in_list(to_add, lambda r: r["resource"]["type"] == "role") + role = find_in_list(to_add, lambda r: r["resource"]["type"]["name"] == "role") self.assertEquals(role["relation"], "app_all_read", "expected workspace permission") - @patch("management.role.relation_api_dual_write_handler.RelationApiDualWriteHandler._save_replication_event") + @patch("management.role.relation_api_dual_write_handler.OutboxReplicator._save_replication_event") def test_create_role_with_display_success(self, mock_method): """Test that we can create a role.""" role_name = "roleD" @@ -1405,7 +1411,7 @@ def test_update_role_invalid_permission(self): response = client.put(url, test_data, format="json", **self.headers) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) - @patch("management.role.relation_api_dual_write_handler.RelationApiDualWriteHandler._save_replication_event") + @patch("management.role.relation_api_dual_write_handler.OutboxReplicator._save_replication_event") def test_update_role(self, mock_method): """Test that updating a role with an invalid permission returns an error.""" # Set up @@ -1528,7 +1534,7 @@ def test_update_role_permission_does_not_exist_fail(self): self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual(response.data.get("errors")[0].get("detail"), f"Permission does not exist: {permission}") - @patch("management.role.relation_api_dual_write_handler.RelationApiDualWriteHandler._save_replication_event") + @patch("management.role.relation_api_dual_write_handler.OutboxReplicator._save_replication_event") def test_delete_role(self, mock_method): """Test that we can delete an existing role.""" role_name = "roleA" From 570e9e51cf3540b6e4b7efc6a65728d1b25104df Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Mon, 23 Sep 2024 10:31:34 -0400 Subject: [PATCH 18/55] RHCLOUD-35040: Updating role with new permissions fails due to no matching existing custom role (#1199) * RHCLOUD-35040: Updating role with new permissions fails due to no matching existing custom role Update mapping algorithm to per v2 role-resource See this miro for details: https://miro.com/app/board/uXjVNds81pI=/?moveToWidget=3458764597029374128&cot=14 Remove tuples before add (due to set semantics) Actually run the binding_mappings query Update role view tests for BindingMapping change and changes lost from merge (?) Lock bindings Pass Role model through; various other tweaks Simplify group data in mappings; inline Role to avoid circular import Add note about policies for system roles Add saving binding mappings to migrator Add more test cases; fix group id in tuples Remove some dead code / simplify attribute filter parsing * Fix lint issues * Remove extra whitespace * Update test for new migration algorithm * Skip system role test (not implemented yet) --- .../migrations/0051_bindingmapping.py | 7 +- rbac/management/role/model.py | 92 +++++-- .../role/relation_api_dual_write_handler.py | 88 ++++-- rbac/management/role/view.py | 4 +- rbac/migration_tool/in_memory_tuples.py | 15 +- rbac/migration_tool/ingest.py | 67 +---- rbac/migration_tool/migrate.py | 106 +++---- rbac/migration_tool/models.py | 59 ++-- ...sharedSystemRolesReplicatedRoleBindings.py | 260 +++++++----------- tests/management/role/test_dual_write.py | 134 ++++++++- tests/management/role/test_view.py | 21 +- tests/migration_tool/tests_migrate.py | 48 ++-- 12 files changed, 470 insertions(+), 431 deletions(-) diff --git a/rbac/management/migrations/0051_bindingmapping.py b/rbac/management/migrations/0051_bindingmapping.py index b13ccb139..42d38c98a 100644 --- a/rbac/management/migrations/0051_bindingmapping.py +++ b/rbac/management/migrations/0051_bindingmapping.py @@ -18,12 +18,15 @@ class Migration(migrations.Migration): ("mappings", models.JSONField(default=dict)), ( "role", - models.OneToOneField( + models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, - related_name="binding_mapping", + related_name="binding_mappings", to="management.role", ), ), + ("resource_id", models.CharField(max_length=256, null=False)), + ("resource_type_name", models.CharField(max_length=256, null=False)), + ("resource_type_namespace", models.CharField(max_length=256, null=False)), ], ), ] diff --git a/rbac/management/role/model.py b/rbac/management/role/model.py index c8e0c6358..fe02da17e 100644 --- a/rbac/management/role/model.py +++ b/rbac/management/role/model.py @@ -17,6 +17,7 @@ """Model for role management.""" import logging +from typing import Union from uuid import uuid4 from django.conf import settings @@ -27,6 +28,7 @@ from management.cache import AccessCache from management.models import Permission, Principal from management.rbac_fields import AutoDateTimeField +from migration_tool.models import V2boundresource, V2role, V2rolebinding from api.models import TenantAwareModel @@ -121,36 +123,68 @@ class Meta: class BindingMapping(models.Model): """V2 binding Mapping definition.""" - # The ID of the role binding - # id = models.UUIDField(primary_key=True, default=uuid4, editable=False) - # The relations + # JSON encoding of migration_tool.models.V2rolebinding mappings = models.JSONField(default=dict) - role = models.OneToOneField(Role, on_delete=models.CASCADE, related_name="binding_mapping") - # resource_type_namespace = models.CharField(max_length=20, null=False) - # resource_type_name = models.CharField(max_length=20, null=False) - # resource_id = models.CharField(max_length=50, null=False) - - def find_role_binding_by_v2_role(self, v2_role_id): - """Find role binding by v2 role id.""" - role_binding_id = None - for role_binding_uuid, data in self.mappings.items(): - if data["v2_role_uuid"] == v2_role_id: - role_binding_id = str(role_binding_uuid) - - if role_binding_id is None: - raise Exception(f"role_binding_id not found in mappings for v2 role {v2_role_id} ") - return role_binding_id - - def find_v2_role_by_permission(self, permissions): - """Find v2 role by permissions.""" - v2_uuid = None - for v1_role_uuid, data in self.mappings.items(): - if set(data["permissions"]) == set(permissions): - v2_uuid = data["v2_role_uuid"] - - if v2_uuid is None: - raise Exception(f"v2_uuid not found in mappings for v1 role {self.role.uuid}") - return v2_uuid + role = models.ForeignKey(Role, on_delete=models.CASCADE, related_name="binding_mappings") + resource_type_namespace = models.CharField(max_length=256, null=False) + resource_type_name = models.CharField(max_length=256, null=False) + resource_id = models.CharField(max_length=256, null=False) + + @classmethod + def for_role_binding(cls, role_binding: V2rolebinding, v1_role: Union[Role, str]): + """Create a new BindingMapping for a V2rolebinding.""" + mappings = role_binding.as_minimal_dict() + resource = role_binding.resource + resource_type_namespace = resource.resource_type[0] + resource_type_name = resource.resource_type[1] + resource_id = resource.resource_id + role_arg = {"role": v1_role} if isinstance(v1_role, Role) else {"role_id": v1_role} + return cls( + mappings=mappings, + **role_arg, + resource_type_namespace=resource_type_namespace, + resource_type_name=resource_type_name, + resource_id=resource_id, + ) + + def remove_group_from_bindings(self, group_id: str): + """Remove group from mappings.""" + self.mappings["groups"] = [group for group in self.mappings["groups"] if group != group_id] + + def add_group_to_bindings(self, group_id: str): + """Add group to mappings.""" + self.mappings["groups"].append(group_id) + + def update_mappings_from_role_binding(self, role_binding: V2rolebinding): + """Set mappings.""" + # Validate resource and v1 role match + resource = role_binding.resource + if ( + resource.resource_type[0] != self.resource_type_namespace + or resource.resource_type[1] != self.resource_type_name + or resource.resource_id != self.resource_id + ): + raise Exception( + "Resource mismatch." + f"Expected: {self.resource_type_namespace}:{self.resource_type_name}:{self.resource_id} " + f"but got: {resource.resource_type[0]}:{resource.resource_type[1]}:{resource.resource_id} " + ) + + self.mappings = role_binding.as_minimal_dict() + + def get_role_binding(self) -> V2rolebinding: + """Get role binding.""" + args = {**self.mappings} + args["resource"] = V2boundresource( + resource_type=(self.resource_type_namespace, self.resource_type_name), resource_id=self.resource_id + ) + args["role"] = V2role( + id=args["role"]["id"], + is_system=args["role"]["is_system"], + permissions=frozenset(args["role"]["permissions"]), + ) + args["groups"] = frozenset(args["groups"]) + return V2rolebinding(**args) def role_related_obj_change_cache_handler(sender=None, instance=None, using=None, **kwargs): diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py index 801d21b6a..c7795533e 100644 --- a/rbac/management/role/relation_api_dual_write_handler.py +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -25,10 +25,13 @@ from google.protobuf import json_format from kessel.relations.v1beta1 import common_pb2 from management.models import Outbox -from management.role.model import BindingMapping +from management.role.model import BindingMapping, Role from migration_tool.migrate import migrate_role +from api.models import Tenant + + logger = logging.getLogger(__name__) # pylint: disable=invalid-name @@ -138,21 +141,49 @@ def replicate(self, event: ReplicationEvent): class RelationApiDualWriteHandler: """Class to handle Dual Write API related operations.""" - # TODO: add resource as parameter - def __init__(self, role, event_type: ReplicationEventType, replicator: Optional[RelationReplicator] = None): + @classmethod + def for_system_role_event( + cls, + role: Role, + # TODO: may want to include Policy instead? + tenant: Tenant, + event_type: ReplicationEventType, + replicator: Optional[RelationReplicator] = None, + ): + """Create a RelationApiDualWriteHandler for assigning / unassigning a system role for a group.""" + return cls(role, event_type, replicator, tenant) + + def __init__( + self, + role: Role, + event_type: ReplicationEventType, + replicator: Optional[RelationReplicator] = None, + tenant: Optional[Tenant] = None, + ): """Initialize RelationApiDualWriteHandler.""" if not self.replication_enabled(): self._replicator = NoopReplicator() return try: self._replicator = replicator if replicator else OutboxReplicator(role) + self.event_type = event_type self.role_relations: list[common_pb2.Relationship] = [] self.current_role_relations: list[common_pb2.Relationship] = [] self.role = role - self.binding_mapping = None - self.tenant_id = role.tenant_id - self.org_id = role.tenant.org_id - self.event_type = event_type + self.binding_mappings: dict[str, BindingMapping] = {} + + binding_tenant = tenant if tenant is not None else role.tenant + + if binding_tenant.tenant_name == "public": + raise DualWriteException( + "Cannot bind role to public tenant. " + "Expected the role to have non-public tenant, or for a non-public tenant to be provided. " + f"Role: {role.uuid} " + f"Tenant: {binding_tenant.id}" + ) + + self.tenant_id = binding_tenant.id + self.org_id = binding_tenant.org_id except Exception as e: raise DualWriteException(e) @@ -160,7 +191,7 @@ def replication_enabled(self): """Check whether replication enabled.""" return settings.REPLICATION_TO_RELATION_ENABLED is True - def load_relations_from_current_state_of_role(self): + def prepare_for_update(self): """Generate relations from current state of role and UUIDs for v2 role and role binding from database.""" if not self.replication_enabled(): return @@ -169,24 +200,26 @@ def load_relations_from_current_state_of_role(self): "[Dual Write] Generate relations from current state of role(%s): '%s'", self.role.uuid, self.role.name ) - self.binding_mapping = self.role.binding_mapping + self.binding_mappings = {m.id: m for m in self.role.binding_mappings.select_for_update().all()} + + if not self.binding_mappings: + logger.warning( + "[Dual Write] Binding mappings not found for role(%s): '%s'. " + "Assuming no current relations exist. " + "If this is NOT the case, relations are inconsistent!", + self.role.uuid, + self.role.name, + ) + return relations, _ = migrate_role( self.role, write_relationships=False, default_workspace=self.org_id, - current_mapping=self.binding_mapping, + current_bindings=self.binding_mappings.values(), ) self.current_role_relations = relations - except BindingMapping.DoesNotExist: - logger.warning( - "[Dual Write] Binding mapping not found for role(%s): '%s'. " - "Assuming no current relations exist. " - "If this is NOT the case, relations are inconsistent!", - self.role.uuid, - self.role.name, - ) except Exception as e: raise DualWriteException(e) @@ -232,16 +265,23 @@ def _generate_relations_and_mappings_for_role(self): self.role, write_relationships=False, default_workspace=self.org_id, - current_mapping=self.binding_mapping, + current_bindings=self.binding_mappings.values(), ) + prior_mappings = self.binding_mappings + self.role_relations = relations + self.binding_mappings = {m.id: m for m in mappings} + + # Create or update mappings as needed + for mapping in mappings: + if mapping.id is not None: + prior_mappings.pop(mapping.id) + mapping.save() - if self.binding_mapping is None: - self.binding_mapping = BindingMapping.objects.create(role=self.role, mappings=mappings) - else: - self.binding_mapping.mappings = mappings - self.binding_mapping.save(force_update=True) + # Delete any mappings to resources this role no longer gives access to + for mapping in prior_mappings.values(): + mapping.delete() return relations except Exception as e: diff --git a/rbac/management/role/view.py b/rbac/management/role/view.py index 0f9049f63..119d6ab51 100644 --- a/rbac/management/role/view.py +++ b/rbac/management/role/view.py @@ -489,7 +489,7 @@ def perform_update(self, serializer): dual_write_handler = RelationApiDualWriteHandler( serializer.instance, ReplicationEventType.UPDATE_CUSTOM_ROLE ) - dual_write_handler.load_relations_from_current_state_of_role() + dual_write_handler.prepare_for_update() role = serializer.save() @@ -513,7 +513,7 @@ def perform_destroy(self, instance: Role): raise serializers.ValidationError(error) dual_write_handler = RelationApiDualWriteHandler(instance, ReplicationEventType.DELETE_CUSTOM_ROLE) - dual_write_handler.load_relations_from_current_state_of_role() + dual_write_handler.prepare_for_update() self.delete_policies_if_no_role_attached(instance) instance.delete() diff --git a/rbac/migration_tool/in_memory_tuples.py b/rbac/migration_tool/in_memory_tuples.py index cd18e3163..b801aec09 100644 --- a/rbac/migration_tool/in_memory_tuples.py +++ b/rbac/migration_tool/in_memory_tuples.py @@ -1,7 +1,7 @@ """This module contains the in-memory representation of a tuple store.""" -from typing import Callable, Hashable, Iterable, List, NamedTuple, Optional, Set, Tuple, TypeVar -from collections import namedtuple, defaultdict +from collections import defaultdict +from typing import Callable, Hashable, Iterable, List, NamedTuple, Set, Tuple, TypeVar from kessel.relations.v1beta1.common_pb2 import Relationship from management.role.relation_api_dual_write_handler import RelationReplicator @@ -54,10 +54,10 @@ def remove(self, tuple: Relationship): def write(self, add: Iterable[Relationship], remove: Iterable[Relationship]): """Add / remove tuples.""" - for tuple in add: - self.add(tuple) for tuple in remove: self.remove(tuple) + for tuple in add: + self.add(tuple) def find_tuples(self, predicate: Callable[[RelationTuple], bool]) -> List[RelationTuple]: """Find tuples matching the given predicate.""" @@ -163,9 +163,11 @@ def find_group_with_tuples( return matching_groups, unmatched_groups def __str__(self): + """Return a string representation of the store.""" return str(self._tuples) def __repr__(self): + """Return a representation of the store.""" return f"InMemoryTuples({repr(self._tuples)})" @@ -173,13 +175,16 @@ class TuplePredicate: """A predicate that can be used to filter relation tuples.""" def __init__(self, func, repr): + """Initialize the predicate.""" self.func = func self.repr = repr def __call__(self, *args, **kwargs): + """Call the predicate.""" return self.func(*args, **kwargs) def __repr__(self): + """Return a representation of the predicate.""" return self.repr @@ -194,7 +199,6 @@ def predicate(rel: RelationTuple) -> bool: def one_of(*predicates: Callable[[RelationTuple], bool]) -> Callable[[RelationTuple], bool]: """Return a predicate that is true if any of the given predicates are true.""" - if len(predicates) == 1: return predicates[0] @@ -271,4 +275,5 @@ def __init__(self, store: InMemoryTuples = InMemoryTuples()): self.store = store def replicate(self, event): + """Replicate the event to the in-memory store.""" self.store.write(event.add, event.remove) diff --git a/rbac/migration_tool/ingest.py b/rbac/migration_tool/ingest.py index 3aed3a1f5..fec7b27c7 100644 --- a/rbac/migration_tool/ingest.py +++ b/rbac/migration_tool/ingest.py @@ -15,66 +15,15 @@ along with this program. If not, see . """ -import json -from typing import Tuple +from typing import Callable, Collection -from management.role.model import Role -from migration_tool.models import V1group, V1permission, V1resourcedef, V1role - -def aggregate_v1_role(role: Role) -> V1role: - """ - Aggregate the role's access and policy as a consolidated V1role object. - - This maps the RBAC model to preloaded, navigable objects with the key data broken down. - """ - perm_res_defs: dict[Tuple[str, str], list[V1resourcedef]] = {} - default_perm_list: list[str] = [] - role_id = str(role.uuid) - - # Determine v1 permissions - for access in role.access.all(): - default = True - for resource_def in access.resourceDefinitions.all(): - default = False - attri_filter = resource_def.attributeFilter - # Some malformed data in db - if attri_filter["operation"] == "in": - if not isinstance(attri_filter["value"], list): - attri_filter["operation"] = "equal" - elif attri_filter["value"] == [] or attri_filter["value"] == [None]: - continue - res_def = V1resourcedef(attri_filter["key"], attri_filter["operation"], json.dumps(attri_filter["value"])) - if res_def.resource_id != "": - # TODO: Need to bind against "ungrouped hosts" for inventory - add_element(perm_res_defs, (role_id, access.permission.permission), res_def) - if default: - default_perm_list.append(access.permission.permission) - - v1_perms = [] - for perm in default_perm_list: - perm_parts = perm.split(":") - v1_perm = V1permission(perm_parts[0], perm_parts[1], perm_parts[2], frozenset()) - v1_perms.append(v1_perm) - - for (role_id, perm), res_defs in perm_res_defs.items(): - perm_parts = perm.split(":") - v1_perm = V1permission(perm_parts[0], perm_parts[1], perm_parts[2], frozenset(res_defs)) - v1_perms.append(v1_perm) - - # With the replicated role bindings algorithm, role bindings are scoped by group, so we need to add groups - # TODO: We don't need to care about principals here – see RHCLOUD-35039 - # Maybe not even groups? See RHCLOUD-34786 - groups = set() - for policy in role.policies.all(): - principals = [str(principal) for principal in policy.group.principals.values_list("uuid", flat=True)] - groups.add(V1group(str(policy.group.uuid), frozenset(principals))) - - return V1role(role_id, frozenset(v1_perms), frozenset(groups)) - - -def add_element(dict, key, value): +def add_element(dict, key, value, collection: Callable[[], Collection] = list): """Add append value to dictionnary according to key.""" if key not in dict: - dict[key] = [] - dict[key].append(value) + dict[key] = collection() + c = dict[key] + if hasattr(c, "append"): + c.append(value) + else: + c.add(value) diff --git a/rbac/migration_tool/migrate.py b/rbac/migration_tool/migrate.py index 546da2086..42468d890 100644 --- a/rbac/migration_tool/migrate.py +++ b/rbac/migration_tool/migrate.py @@ -16,7 +16,7 @@ """ import logging -from typing import Any, FrozenSet, Optional +from typing import Iterable from django.conf import settings from kessel.relations.v1beta1 import common_pb2 @@ -26,25 +26,18 @@ from migration_tool.utils import create_relationship, output_relationships from api.models import Tenant -from .ingest import aggregate_v1_role logger = logging.getLogger(__name__) # pylint: disable=invalid-name -BindingMappings = dict[str, dict[str, Any]] - def get_kessel_relation_tuples( - v2_role_bindings: FrozenSet[V2rolebinding], + v2_role_bindings: Iterable[V2rolebinding], default_workspace: str, -) -> tuple[list[common_pb2.Relationship], BindingMappings]: +) -> list[common_pb2.Relationship]: """Generate a set of relationships and BindingMappings for the given set of v2 role bindings.""" relationships: list[common_pb2.Relationship] = list() - # Dictionary of v2 role binding ID to v2 role UUID and its permissions - # for the given v1 role. - binding_mappings: BindingMappings = {} - for v2_role_binding in v2_role_bindings: relationships.append( create_relationship( @@ -52,16 +45,6 @@ def get_kessel_relation_tuples( ) ) - v2_role_data = v2_role_binding.role - - if binding_mappings.get(v2_role_binding.id) is None: - binding_mappings[v2_role_binding.id] = {} - - binding_mappings[v2_role_binding.id] = { - "v2_role_uuid": str(v2_role_data.id), - "permissions": list(v2_role_binding.role.permissions), - } - for perm in v2_role_binding.role.permissions: relationships.append( create_relationship(("rbac", "role"), v2_role_binding.role.id, ("rbac", "user"), "*", perm) @@ -69,66 +52,63 @@ def get_kessel_relation_tuples( for group in v2_role_binding.groups: # These might be duplicate but it is OK, spiceDB will handle duplication through touch relationships.append( - create_relationship( - ("rbac", "role_binding"), v2_role_binding.id, ("rbac", "group"), group.id, "subject" - ) + create_relationship(("rbac", "role_binding"), v2_role_binding.id, ("rbac", "group"), group, "subject") ) - for bound_resource in v2_role_binding.resources: - # Is this a workspace binding, but not to the root workspace? - # If so, ensure this workspace is a child of the root workspace. - # All other resource-resource or resource-workspace relations - # which may be implied or necessary are intentionally ignored. - # These should come from the apps that own the resource. - if ( - bound_resource.resource_type == ("rbac", "workspace") - and not bound_resource.resourceId == default_workspace - ): - # This is not strictly necessary here and the relation may be a duplicate. - # Once we have more Workspace API / Inventory Group migration progress, - # this block can and probably should be removed. - # One of those APIs will add it themselves. - relationships.append( - create_relationship( - bound_resource.resource_type, - bound_resource.resourceId, - ("rbac", "workspace"), - default_workspace, - "parent", - ) - ) - + bound_resource = v2_role_binding.resource + + # Is this a workspace binding, but not to the root workspace? + # If so, ensure this workspace is a child of the root workspace. + # All other resource-resource or resource-workspace relations + # which may be implied or necessary are intentionally ignored. + # These should come from the apps that own the resource. + if ( + bound_resource.resource_type == ("rbac", "workspace") + and not bound_resource.resource_id == default_workspace + ): + # This is not strictly necessary here and the relation may be a duplicate. + # Once we have more Workspace API / Inventory Group migration progress, + # this block can and probably should be removed. + # One of those APIs will add it themselves. relationships.append( create_relationship( bound_resource.resource_type, - bound_resource.resourceId, - ("rbac", "role_binding"), - v2_role_binding.id, - "user_grant", + bound_resource.resource_id, + ("rbac", "workspace"), + default_workspace, + "parent", ) ) - return relationships, binding_mappings + relationships.append( + create_relationship( + bound_resource.resource_type, + bound_resource.resource_id, + ("rbac", "role_binding"), + v2_role_binding.id, + "user_grant", + ) + ) + + return relationships def migrate_role( role: Role, write_relationships: bool, default_workspace: str, - current_mapping: Optional[BindingMapping] = None, -) -> tuple[list[common_pb2.Relationship], BindingMappings]: + current_bindings: Iterable[BindingMapping] = [], +) -> tuple[list[common_pb2.Relationship], list[BindingMapping]]: """ Migrate a role from v1 to v2, returning the tuples and mappings. The mappings are returned so that we can reconstitute the corresponding tuples for a given role. This is needed so we can remove those tuples when the role changes if needed. """ - v1_role = aggregate_v1_role(role) - # This is where we wire in the implementation we're using into the Migrator - v2_role_bindings = [binding for binding in v1_role_to_v2_bindings(v1_role, default_workspace, current_mapping)] - relationships, mappings = get_kessel_relation_tuples(frozenset(v2_role_bindings), default_workspace) + v2_role_bindings = v1_role_to_v2_bindings(role, default_workspace, current_bindings) + relationships = get_kessel_relation_tuples([m.get_role_binding() for m in v2_role_bindings], default_workspace) output_relationships(relationships, write_relationships) - return relationships, mappings + return relationships, v2_role_bindings def migrate_workspace(tenant: Tenant, write_relationships: bool): @@ -194,13 +174,13 @@ def migrate_data_for_tenant(tenant: Tenant, exclude_apps: list, write_relationsh _, mappings = migrate_role(role, write_relationships, default_workspace) - # Insert is forced with `create` in order to prevent this from + # Conflits are not ignored in order to prevent this from # accidentally running concurrently with dual-writes. # If migration should be rerun, then the bindings table should be dropped. - # If changing this to update_or_create, + # If changing this to allow updates, # always ensure writes are paused before running. - # Thus must always be the case, but `create` will at least start failing you if you forget. - BindingMapping.objects.create(role=role, mappings=mappings) + # This must always be the case, but this should at least start failing you if you forget. + BindingMapping.objects.bulk_create(mappings, ignore_conflicts=False) logger.info(f"Migration completed for role: {role.name} with UUID {role.uuid}.") logger.info(f"Migrated {roles.count()} roles for tenant: {tenant.org_id}") diff --git a/rbac/migration_tool/models.py b/rbac/migration_tool/models.py index 94eb50b8a..02ad473ee 100644 --- a/rbac/migration_tool/models.py +++ b/rbac/migration_tool/models.py @@ -19,25 +19,6 @@ from typing import Tuple -@dataclass(frozen=True) -class Relationship: - """Relationship definition.""" - - resource_type: str - resource_id: str - relation: str - subject_type: str - subject_id: str - - -@dataclass(frozen=True) -class V1group: - """V1 group definition.""" - - id: str - users: frozenset[str] - - @dataclass(frozen=True) class V1resourcedef: """V1 resource definition.""" @@ -69,29 +50,12 @@ def matches(self, v2perm: str): return True -@dataclass(frozen=True) -class V1role: - """V1 role definition.""" - - id: str - permissions: frozenset[V1permission] - groups: frozenset[V1group] - - -@dataclass(frozen=True) -class V2group: - """V2 group definition.""" - - id: str - users: frozenset[str] - - @dataclass(frozen=True) class V2boundresource: """V2 bound resource definition.""" resource_type: Tuple[str, str] - resourceId: str + resource_id: str @dataclass(frozen=True) @@ -102,16 +66,31 @@ class V2role: is_system: bool permissions: frozenset[str] + def as_dict(self) -> dict: + """Convert the V2 role to a dictionary.""" + return { + "id": self.id, + "is_system": self.is_system, + "permissions": list(self.permissions), + } + @dataclass(frozen=True) class V2rolebinding: """V2 role binding definition.""" id: str - originalRole: V1role role: V2role - resources: frozenset[V2boundresource] - groups: frozenset[V2group] + resource: V2boundresource + groups: frozenset[str] + + def as_minimal_dict(self) -> dict: + """Convert the V2 role binding to a dictionary, excluding resource, original role, and users.""" + return { + "id": self.id, + "role": self.role.as_dict(), + "groups": [g for g in self.groups], + } def split_v2_perm(perm: str): diff --git a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py index ee5c72927..c4aa2d82b 100644 --- a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py +++ b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py @@ -15,22 +15,17 @@ along with this program. If not, see . """ -import json import logging import uuid -from typing import Callable, FrozenSet, Optional, Tuple, Type +from typing import Any, Iterable, Optional, Tuple, Union from django.conf import settings from management.models import BindingMapping +from management.permission.model import Permission from management.role.model import Role from migration_tool.ingest import add_element from migration_tool.models import ( - V1group, - V1permission, - V1resourcedef, - V1role, V2boundresource, - V2group, V2role, V2rolebinding, cleanNameForV2SchemaCompatibility, @@ -38,15 +33,7 @@ logger = logging.getLogger(__name__) -Permissiongroupings = dict[V2boundresource, list[str]] -Perm_bound_resources = dict[str, list[V2boundresource]] - -group_perms_for_rolebinding_fn = Type[ - Callable[ - [str, Permissiongroupings, Perm_bound_resources, FrozenSet[V1group]], - FrozenSet[V2rolebinding], - ] -] +PermissionGroupings = dict[V2boundresource, set[str]] def add_system_role(system_roles, role: V2role): @@ -93,150 +80,116 @@ def set_system_roles(cls): def v1_role_to_v2_bindings( - v1_role: V1role, + v1_role: Role, default_workspace: str, - binding_mapping: Optional[BindingMapping], -) -> FrozenSet[V2rolebinding]: + role_bindings: Iterable[BindingMapping], +) -> list[BindingMapping]: """Convert a V1 role to a set of V2 role bindings.""" - perm_groupings: Permissiongroupings = {} + perm_groupings: PermissionGroupings = {} + # Group V2 permissions by target resource - for v1_perm in v1_role.permissions: + for access in v1_role.access.all(): + v1_perm = access.permission + if not is_for_enabled_app(v1_perm): continue + v2_perm = v1_perm_to_v2_perm(v1_perm) - if v1_perm.resourceDefs: + + default = True + for resource_def in access.resourceDefinitions.all(): if not is_for_enabled_resource(v1_perm): + default = False continue - for resource_def in v1_perm.resourceDefs: - resource_type = attribute_key_to_v2_related_resource_type(resource_def.resource_type) - if resource_type is None: - # Resource type not mapped to v2 + + default = False + attri_filter = resource_def.attributeFilter + + # Deal with some malformed data in db + if attri_filter["operation"] == "in": + if not isinstance(attri_filter["value"], list): + # Override operation as "equal" if value is not a list + attri_filter["operation"] = "equal" + elif attri_filter["value"] == [] or attri_filter["value"] == [None]: + # Skip empty values continue - for resource_id in split_resourcedef_literal(resource_def): - if resource_id is None: - raise ValueError(f"Resource ID is None for {resource_def}") - add_element( - perm_groupings, - V2boundresource(resource_type, resource_id), - v2_perm, - ) - elif default_workspace is None: - logger.info(f"Cannot create role binding for role; no resource to bind to: {v1_role.id}") - else: + + resource_type = attribute_key_to_v2_related_resource_type(attri_filter["key"]) + if resource_type is None: + # Resource type not mapped to v2 + continue + for resource_id in values_from_attribute_filter(attri_filter): + # TODO: Need to bind against "ungrouped hosts" for inventory + if resource_id is None: + raise ValueError(f"Resource ID is None for {resource_def}") + add_element(perm_groupings, V2boundresource(resource_type, resource_id), v2_perm, collection=set) + if default: add_element( - perm_groupings, - V2boundresource(("rbac", "workspace"), default_workspace), - v2_perm, + perm_groupings, V2boundresource(("rbac", "workspace"), default_workspace), v2_perm, collection=set ) - # Project permission sets to roles per set of resources - resource_roles = permission_groupings_to_v2_role_and_resource(perm_groupings, v1_role, binding_mapping) - # Construct rolebindings for each resource - v2_role_bindings: list[V2rolebinding] = [] - v2_groups = v1groups_to_v2groups(v1_role.groups) - for role, resources in resource_roles.items(): - for resource in resources: - if v2_groups: - for v2_group in v2_groups: - if binding_mapping: - role_binding_id = binding_mapping.find_role_binding_by_v2_role(role.id) - else: - role_binding_id = str(uuid.uuid4()) - v2_role_binding = V2rolebinding( - role_binding_id, v1_role, role, frozenset({resource}), frozenset({v2_group}) - ) - v2_role_bindings.append(v2_role_binding) - else: - if binding_mapping: - role_binding_id = binding_mapping.find_role_binding_by_v2_role(role.id) - else: - role_binding_id = str(uuid.uuid4()) - v2_role_binding = V2rolebinding(role_binding_id, v1_role, role, frozenset({resource}), v2_groups) - v2_role_bindings.append(v2_role_binding) - return frozenset(v2_role_bindings) - -custom_roles_created = 0 + # Project permission sets to roles per set of resources + return permission_groupings_to_v2_role_bindings(perm_groupings, v1_role, role_bindings) -def permission_groupings_to_v2_role_and_resource( - perm_groupings: Permissiongroupings, v1_role: V1role, binding_mapping: Optional[BindingMapping] -) -> dict[V2role, list[V2boundresource]]: - """ - Determine V2 roles and resources they apply to from a set of V1 resources and permissions. +def permission_groupings_to_v2_role_bindings( + perm_groupings: PermissionGroupings, v1_role: Role, role_bindings: Iterable[BindingMapping] +) -> list[BindingMapping]: + """Determine updated role bindings based on latest resource-permission state and current role bindings.""" + updated_mappings: list[BindingMapping] = [] + latest_roles_by_id: dict[str, V2role] = {} + # TODO: this is broken for system roles, need to have Tenant or Policies provided + # so that we don't look up Policies across all Tenants! + latest_groups = frozenset([str(policy.group.uuid) for policy in v1_role.policies.all()]) - Prefers to reuse system roles where possible. - """ - candidate_system_roles = {} - resource_roles: dict[V2role, list[V2boundresource]] = {} - system_roles = SystemRole.get_system_roles() + role_bindings_by_resource = {binding.get_role_binding().resource: binding for binding in role_bindings} for resource, permissions in perm_groupings.items(): - system_role = system_roles.get(frozenset(permissions)) - if system_role is not None: - role = system_roles[frozenset(permissions)] - add_element(resource_roles, role, resource) - else: - permset = set(permissions) - granted = set() - matched_roles = [] - - for sysperms, sysrole in system_roles.items(): - if sysperms.issubset(permset) and not sysperms.issubset( - granted - ): # If all permissions on the role should be granted but not all of them have been, add it - matched_roles.append(sysrole) - granted |= sysperms - - if permset == granted: - break - if permset == granted: - for role in matched_roles: - add_element(resource_roles, role, resource) + mapping = role_bindings_by_resource.get(resource) + current = mapping.get_role_binding() if mapping is not None else None + perm_set = frozenset(permissions) + new_role: Optional[V2role] = None + + # Try to find an updated Role that matches (could be our current Role) + for _, role in latest_roles_by_id.items(): + if role.permissions == perm_set: + new_role = role + break + + if new_role is None: + # No updated Role matches. We need a new or reconfigured Role. + # Is there a current role? Should update it? Only if it wasn't already updated. + if current is not None and current.role.id not in latest_roles_by_id: + new_role = V2role(current.role.id, False, perm_set) else: - # Track leftovers and add a custom role - leftovers = permset - granted - logger.info( - f"No system role for role: {v1_role.id}. Not matched permissions: {leftovers}. Resource: {resource}" - ) - # Track possible missing system roles - # Get applications with unmatched permissions - apps = {} - for perm in leftovers: - app = perm.split("_", 1)[0] # Hack since we don't have the V1 data anymore by this point - if app not in apps: - apps[app] = [] - # Get original permissions granted on this resource grouped by application, - # for applications with unmatched permissions - for perm in permissions: - app = perm.split("_", 1)[0] # Hack since we don't have the V1 data anymore by this point - if app in apps: - apps[app].append(perm) - # Increment counts for each distinct set of permissions - - for app, perms in apps.items(): - candidate = frozenset(perms) - if candidate in candidate_system_roles: - candidate_system_roles[candidate].add(v1_role.id) - else: - candidate_system_roles[candidate] = {v1_role.id} - # Add a custom role - if binding_mapping: - v2_uuid = binding_mapping.find_v2_role_by_permission(permissions) - else: - v2_uuid = uuid.uuid4() - - add_element(resource_roles, V2role(str(v2_uuid), False, frozenset(permissions)), resource) - global custom_roles_created - custom_roles_created += 1 - return resource_roles - - -def is_for_enabled_app(perm: V1permission): + # Need to create a new role + id = str(uuid.uuid4()) + new_role = V2role(id, False, perm_set) + latest_roles_by_id[new_role.id] = new_role + + # Add the role binding, updating or creating as needed. + if mapping is None: + # No existing binding for this resource, have to create one + id = str(uuid.uuid4()) + binding = V2rolebinding(id, new_role, resource, latest_groups) + updated_mapping = BindingMapping.for_role_binding(binding, v1_role) + else: + # Reuse current binding ID and mapping ID + binding = V2rolebinding(current.id, new_role, resource, latest_groups) + updated_mapping = mapping + updated_mapping.update_mappings_from_role_binding(binding) + + updated_mappings.append(updated_mapping) + + return updated_mappings + + +def is_for_enabled_app(perm: Permission): """Return true if the permission is for an app that should migrate.""" - return perm.app not in settings.V2_MIGRATION_APP_EXCLUDE_LIST + return perm.application not in settings.V2_MIGRATION_APP_EXCLUDE_LIST -def is_for_enabled_resource(perm: V1permission): +def is_for_enabled_resource(perm: Permission): """ Return true if the resource is for an app that should migrate. @@ -247,33 +200,26 @@ def is_for_enabled_resource(perm: V1permission): Once the resource model is finalized, we should no longer exclude that app, and should instead update the migration code to account for migrating those resources in whatever form they should migrate. """ - return perm.app not in settings.V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST + return perm.application not in settings.V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST -def split_resourcedef_literal(resourceDef: V1resourcedef): +def values_from_attribute_filter(attribute_filter: dict[str, Any]) -> list[str]: """Split a resource definition into a list of resource IDs.""" - if resourceDef.op == "in": - try: - return json.loads(resourceDef.resource_id) # Most are JSON arrays - except json.JSONDecodeError: - return resourceDef.resource_id.split( - "," - ) # If not JSON, assume comma-separated? Cost Management openshift assets are like this. - else: - return [json.loads(resourceDef.resource_id)] + op: str = attribute_filter["operation"] + resource_id: Union[list[str], str] = attribute_filter["value"] + if isinstance(resource_id, list): + return resource_id -def v1groups_to_v2groups(v1groups: FrozenSet[V1group]): - """Convert a set of V1 groups to a set of V2 groups.""" - return frozenset([V2group(v1group.id, v1group.users) for v1group in v1groups]) + return resource_id.split(",") if op == "in" else [resource_id] -def v1_perm_to_v2_perm(v1_permission): +def v1_perm_to_v2_perm(v1_permission: Permission): """Convert a V1 permission to a V2 permission.""" - if v1_permission.app == "inventory" and v1_permission.resource == "groups": - return cleanNameForV2SchemaCompatibility(f"workspace_{v1_permission.perm}") + if v1_permission.application == "inventory" and v1_permission.resource_type == "groups": + return cleanNameForV2SchemaCompatibility(f"workspace_{v1_permission.verb}") return cleanNameForV2SchemaCompatibility( - v1_permission.app + "_" + v1_permission.resource + "_" + v1_permission.perm + v1_permission.application + "_" + v1_permission.resource_type + "_" + v1_permission.verb ) diff --git a/tests/management/role/test_dual_write.py b/tests/management/role/test_dual_write.py index 115b30a0d..303469139 100644 --- a/tests/management/role/test_dual_write.py +++ b/tests/management/role/test_dual_write.py @@ -16,6 +16,7 @@ # """Test tuple changes for RBAC operations.""" +import unittest from django.test import TestCase, override_settings from management.group.model import Group from management.permission.model import Permission @@ -66,11 +67,19 @@ def dual_write_handler(self, role: Role, event_type: ReplicationEventType) -> Re """Create a RelationApiDualWriteHandler for the given role and event type.""" return RelationApiDualWriteHandler(role, event_type, replicator=InMemoryRelationReplicator(self.tuples)) + def dual_write_handler_for_system_role( + self, role: Role, tenant: Tenant, event_type: ReplicationEventType + ) -> RelationApiDualWriteHandler: + """Create a RelationApiDualWriteHandler for the given role and event type.""" + return RelationApiDualWriteHandler.for_system_role_event( + role, tenant, event_type, replicator=InMemoryRelationReplicator(self.tuples) + ) + def given_v1_system_role(self, name: str, permissions: list[str]) -> Role: """Create a new system role with the given ID and permissions.""" role = self.fixture.new_system_role(name=name, permissions=permissions) - dual_write = self.dual_write_handler(role, ReplicationEventType.CREATE_SYSTEM_ROLE) - dual_write.replicate_new_or_updated_role(role) + # TODO: Need to replicate system role permission relations + # This is different from group assignment return role def given_v1_role(self, name: str, default: list[str], **kwargs: list[str]) -> Role: @@ -84,10 +93,10 @@ def given_v1_role(self, name: str, default: list[str], **kwargs: list[str]) -> R dual_write.replicate_new_or_updated_role(role) return role - def given_update_to_v1_role(self, role: Role, default: list[str], **kwargs: list[str]): + def given_update_to_v1_role(self, role: Role, default: list[str] = [], **kwargs: list[str]): """Update the given role with the given workspace permissions.""" dual_write = self.dual_write_handler(role, ReplicationEventType.UPDATE_CUSTOM_ROLE) - dual_write.load_relations_from_current_state_of_role() + dual_write.prepare_for_update() role = self.fixture.update_custom_role( role, resource_access=self._workspace_access_to_resource_definition(default, **kwargs), @@ -115,6 +124,10 @@ def given_policy(self, group: Group, roles: list[Role]) -> Policy: return self.fixture.add_role_to_group(roles[0], group, self.tenant) def expect_1_v2_role_with_permissions(self, permissions: list[str]) -> str: + """Assert there is a role matching the given permissions and return its ID.""" + return self.expect_v2_roles_with_permissions(1, permissions)[0] + + def expect_v2_roles_with_permissions(self, count: int, permissions: list[str]) -> list[str]: """Assert there is a role matching the given permissions and return its ID.""" roles, unmatched = self.tuples.find_group_with_tuples( [ @@ -129,13 +142,12 @@ def expect_1_v2_role_with_permissions(self, permissions: list[str]) -> str: num_roles = len(roles) self.assertEqual( num_roles, - 1, - f"Expected exactly 1 role with permissions {permissions}, but got {num_roles}.\n" + count, + f"Expected exactly {count} role(s) with permissions {permissions}, but got {num_roles}.\n" f"Matched roles: {roles}.\n" f"Unmatched roles: {unmatched}", ) - _, _, id = next(iter(roles.keys())) - return id + return [role[2] for role in roles.keys()] def expect_num_role_bindings(self, num: int): """Assert there are [num] role bindings.""" @@ -198,6 +210,7 @@ def expect_1_role_binding_to_workspace(self, workspace: str, for_v2_roles: list[ class DualWriteSystemRolesTestCase(DualWriteTestCase): """Test dual write logic for system roles.""" + @unittest.skip("Not implemented yet") def test_system_role_grants_access_to_default_workspace(self): """Create role binding only when system role is bound to group.""" role = self.given_v1_system_role("r1", ["app1:hosts:read", "inventory:hosts:write"]) @@ -258,7 +271,75 @@ def test_add_permissions_to_role(self): def test_remove_permissions_from_role(self): """Modify the role in place when removing permissions.""" - pass + role = self.given_v1_role( + "r1", + default=["app1:hosts:read", "inventory:hosts:write"], + ws_2=["app1:hosts:read", "inventory:hosts:write"], + ) + + self.given_update_to_v1_role( + role, + default=["app1:hosts:read", "inventory:hosts:write"], + ws_2=["app1:hosts:read"], + ) + + role_for_default = self.expect_1_v2_role_with_permissions(["app1:hosts:read", "inventory:hosts:write"]) + role_for_ws_2 = self.expect_1_v2_role_with_permissions(["app1:hosts:read"]) + + # TODO: assert group once group replication is implemented + self.expect_1_role_binding_to_workspace( + self.default_workspace(), for_v2_roles=[role_for_default], for_groups=[] + ) + self.expect_1_role_binding_to_workspace("ws_2", for_v2_roles=[role_for_ws_2], for_groups=[]) + + def test_remove_permissions_from_role_back_to_original(self): + """Modify the role in place when removing permissions, consolidating roles.""" + """Modify the role in place when adding permissions.""" + role = self.given_v1_role( + "r1", + default=["app1:hosts:read", "inventory:hosts:write"], + ws_2=["app1:hosts:read", "inventory:hosts:write"], + ) + + self.given_update_to_v1_role( + role, + default=["app1:hosts:read", "inventory:hosts:write"], + ws_2=["app1:hosts:read", "inventory:hosts:write", "app2:hosts:read"], + ) + + self.given_update_to_v1_role( + role, + default=["app1:hosts:read", "inventory:hosts:write"], + ws_2=["app1:hosts:read", "inventory:hosts:write"], + ) + + id = self.expect_1_v2_role_with_permissions(["app1:hosts:read", "inventory:hosts:write"]) + self.expect_1_role_binding_to_workspace(self.default_workspace(), for_v2_roles=[id], for_groups=[]) + self.expect_1_role_binding_to_workspace("ws_2", for_v2_roles=[id], for_groups=[]) + + def test_add_resource_uses_existing_groups(self): + """New bindings get existing groups.""" + role = self.given_v1_role( + "r1", + default=["app1:hosts:read", "inventory:hosts:write"], + ws_2=["app1:hosts:read", "inventory:hosts:write"], + ) + + g1 = self.given_group("g2", ["u2"]) + g2 = self.given_group("g1", ["u1"]) + self.given_policy(g1, roles=[role]) + self.given_policy(g2, roles=[role]) + + self.given_update_to_v1_role( + role, + default=["app1:hosts:read", "inventory:hosts:write"], + ws_2=["app1:hosts:read", "inventory:hosts:write"], + ws_3=["app1:hosts:read", "inventory:hosts:write"], + ) + + role = self.expect_1_v2_role_with_permissions(["app1:hosts:read", "inventory:hosts:write"]) + + self.expect_1_role_binding_to_workspace("ws_3", for_v2_roles=[role], for_groups=[str(g1.uuid), str(g2.uuid)]) def test_delete_role(self): """Delete the role and its bindings when deleting a custom role.""" @@ -266,11 +347,42 @@ def test_delete_role(self): def test_remove_resource_removes_role_binding(self): """Remove the role binding when removing the resource from attribute filter.""" - pass + role = self.given_v1_role( + "r1", + default=["app1:hosts:read", "inventory:hosts:write"], + ws_2=["app1:hosts:read", "inventory:hosts:write"], + ) + + self.given_update_to_v1_role( + role, + ws_2=["app1:hosts:read", "inventory:hosts:write"], + ) + + role = self.expect_1_v2_role_with_permissions(["app1:hosts:read", "inventory:hosts:write"]) + + self.expect_num_role_bindings(1) + self.expect_1_role_binding_to_workspace("ws_2", for_v2_roles=[role], for_groups=[]) def test_two_roles_with_same_resource_permissions_create_two_v2_roles(self): """Create two v2 roles when two roles have the same resource permissions across different resources.""" - pass + self.given_v1_role( + "r1", + default=["app1:hosts:read", "inventory:hosts:write"], + ws_2=["app1:hosts:read", "inventory:hosts:write"], + ) + + self.given_v1_role( + "r2", + default=["app1:hosts:read", "inventory:hosts:write"], + ws_2=["app1:hosts:read", "inventory:hosts:write"], + ) + + roles = self.expect_v2_roles_with_permissions(2, ["app1:hosts:read", "inventory:hosts:write"]) + + self.expect_1_role_binding_to_workspace(self.default_workspace(), for_v2_roles=[roles[0]], for_groups=[]) + self.expect_1_role_binding_to_workspace(self.default_workspace(), for_v2_roles=[roles[1]], for_groups=[]) + self.expect_1_role_binding_to_workspace("ws_2", for_v2_roles=[roles[0]], for_groups=[]) + self.expect_1_role_binding_to_workspace("ws_2", for_v2_roles=[roles[1]], for_groups=[]) class RbacFixture: diff --git a/tests/management/role/test_view.py b/tests/management/role/test_view.py index 9e1e45871..4a030ea90 100644 --- a/tests/management/role/test_view.py +++ b/tests/management/role/test_view.py @@ -68,26 +68,22 @@ def replication_event_for_v1_role(v1_role_uuid, default_workspace_uuid): def relation_api_tuples_for_v1_role(v1_role_uuid, default_workspace_uuid): """Create a relation API tuple for a v1 role.""" role_id = Role.objects.get(uuid=v1_role_uuid).id - role_binding = BindingMapping.objects.filter(role=role_id).first() + mappings = BindingMapping.objects.filter(role=role_id).all() relations = [] - for role_binding_uuid, data in role_binding.mappings.items(): - relation_tuple = relation_api_tuple( - "role_binding", str(role_binding_uuid), "granted", "role", str(data["v2_role_uuid"]) - ) + for role_binding in [m.get_role_binding() for m in mappings]: + relation_tuple = relation_api_tuple("role_binding", role_binding.id, "granted", "role", role_binding.role.id) relations.append(relation_tuple) - for permission in data["permissions"]: - relation_tuple = relation_api_tuple("role", str(data["v2_role_uuid"]), permission, "user", "*") + for permission in role_binding.role.permissions: + relation_tuple = relation_api_tuple("role", role_binding.role.id, permission, "user", "*") relations.append(relation_tuple) - if "app_all_read" in data["permissions"]: + if "app_all_read" in role_binding.role.permissions: relation_tuple = relation_api_tuple( - "workspace", default_workspace_uuid, "user_grant", "role_binding", str(role_binding_uuid) + "workspace", default_workspace_uuid, "user_grant", "role_binding", role_binding.id ) relations.append(relation_tuple) else: - relation_tuple = relation_api_tuple( - "keya/id", "valueA", "user_grant", "role_binding", str(role_binding_uuid) - ) + relation_tuple = relation_api_tuple("keya/id", "valueA", "user_grant", "role_binding", role_binding.id) relations.append(relation_tuple) return relations @@ -120,7 +116,6 @@ class RoleViewsetTests(IdentityRequest): def setUp(self): """Set up the role viewset tests.""" super().setUp() - self.maxDiff = None sys_role_config = {"name": "system_role", "display_name": "system_display", "system": True} def_role_config = {"name": "default_role", "display_name": "default_display", "platform_default": True} diff --git a/tests/migration_tool/tests_migrate.py b/tests/migration_tool/tests_migrate.py index f17c19c88..71e4be9fb 100644 --- a/tests/migration_tool/tests_migrate.py +++ b/tests/migration_tool/tests_migrate.py @@ -22,7 +22,6 @@ from api.models import Tenant from management.models import * from migration_tool.migrate import migrate_data -from management.workspace.model import Workspace class MigrateTests(TestCase): @@ -40,8 +39,8 @@ def setUp(self): another_tenant = Tenant.objects.create(org_id="7654321") # setup data for organization 1234567 - self.aws_account_id_1 = "123456" - self.aws_account_id_2 = "654321" + self.workspace_id_1 = "123456" + self.workspace_id_2 = "654321" # This role will be skipped because it contains permission with skipping application self.role_a1 = Role.objects.create(name="role_a1", tenant=self.tenant) self.access_a11 = Access.objects.create(permission=permission1, role=self.role_a1, tenant=self.tenant) @@ -51,9 +50,9 @@ def setUp(self): self.access_a2 = Access.objects.create(permission=permission2, role=self.role_a2, tenant=self.tenant) self.resourceDef_a2 = ResourceDefinition.objects.create( attributeFilter={ - "key": "cost-management.aws.account", + "key": "group.id", "operation": "equal", - "value": self.aws_account_id_1, + "value": self.workspace_id_1, }, access=self.access_a2, tenant=self.tenant, @@ -62,9 +61,9 @@ def setUp(self): self.access_a3 = Access.objects.create(permission=permission2, role=self.role_a3, tenant=self.tenant) self.resourceDef_a3 = ResourceDefinition.objects.create( attributeFilter={ - "key": "aws.account", + "key": "group.id", "operation": "in", - "value": [self.aws_account_id_1, self.aws_account_id_2], + "value": [self.workspace_id_1, self.workspace_id_2], }, access=self.access_a3, tenant=self.tenant, @@ -97,25 +96,22 @@ def test_migration_of_data(self, logger_mock): org_id = self.tenant.org_id root_workspace_id = f"root-workspace-{self.tenant.org_id}" - role_binding = BindingMapping.objects.filter(role=self.role_a2).first() + role_binding = BindingMapping.objects.filter(role=self.role_a2).get().get_role_binding() - mappings_a2 = role_binding.mappings - first_key = list(mappings_a2.keys())[0] + rolebinding_a2 = role_binding.id + v2_role_a2 = role_binding.role.id - v2_role_a2 = mappings_a2[first_key]["v2_role_uuid"] - rolebinding_a2 = first_key - - role_binding_a3 = BindingMapping.objects.filter(role=self.role_a3).first() - mappings_a3 = role_binding_a3.mappings - first_key = list(mappings_a3.keys())[0] - v2_role_a31_value = mappings_a3[first_key]["v2_role_uuid"] - v2_role_a31 = v2_role_a31_value - - last_key = list(mappings_a3.keys())[-1] - v2_role_a32 = mappings_a3[last_key]["v2_role_uuid"] + role_binding_a3_1 = ( + BindingMapping.objects.filter(role=self.role_a3, resource_id=self.workspace_id_1).get().get_role_binding() + ) + role_binding_a3_2 = ( + BindingMapping.objects.filter(role=self.role_a3, resource_id=self.workspace_id_2).get().get_role_binding() + ) + v2_role_a31 = role_binding_a3_1.role.id + v2_role_a32 = role_binding_a3_2.role.id - rolebinding_a31 = first_key - rolebinding_a32 = last_key + rolebinding_a31 = role_binding_a3_1.id + rolebinding_a32 = role_binding_a3_2.id workspace_1 = "123456" workspace_2 = "654321" @@ -124,7 +120,7 @@ def test_migration_of_data(self, logger_mock): rolebinding_a31, rolebinding_a32 = rolebinding_a32, rolebinding_a31 # Switch these two if binding is not in correct order if ( - call(f"workspace:{self.aws_account_id_1}#user_grant@role_binding:{rolebinding_a31}") + call(f"workspace:{self.workspace_id_1}#user_grant@role_binding:{rolebinding_a31}") not in logger_mock.info.call_args_list ): workspace_1, workspace_2 = workspace_2, workspace_1 @@ -147,8 +143,8 @@ def test_migration_of_data(self, logger_mock): call(f"role_binding:{rolebinding_a2}#granted@role:{v2_role_a2}"), call(f"role:{v2_role_a2}#inventory_hosts_write@user:*"), call(f"role_binding:{rolebinding_a2}#subject@group:{self.group_a2.uuid}"), - call(f"workspace:{self.aws_account_id_1}#parent@workspace:{org_id}"), - call(f"workspace:{self.aws_account_id_1}#user_grant@role_binding:{rolebinding_a2}"), + call(f"workspace:{self.workspace_id_1}#parent@workspace:{org_id}"), + call(f"workspace:{self.workspace_id_1}#user_grant@role_binding:{rolebinding_a2}"), ## Role binding to role_a3 call(f"role_binding:{rolebinding_a31}#granted@role:{v2_role_a31}"), call(f"role:{v2_role_a31}#inventory_hosts_write@user:*"), From 452b13f71fa892e25e39d69eafc2f2d5ae52ed1e Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Mon, 23 Sep 2024 18:39:48 -0400 Subject: [PATCH 19/55] Address type check linting issues --- rbac/management/role/model.py | 4 +++- rbac/management/role/relation_api_dual_write_handler.py | 2 ++ .../migration_tool/sharedSystemRolesReplicatedRoleBindings.py | 2 ++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/rbac/management/role/model.py b/rbac/management/role/model.py index fe02da17e..0c2adcc16 100644 --- a/rbac/management/role/model.py +++ b/rbac/management/role/model.py @@ -138,7 +138,9 @@ def for_role_binding(cls, role_binding: V2rolebinding, v1_role: Union[Role, str] resource_type_namespace = resource.resource_type[0] resource_type_name = resource.resource_type[1] resource_id = resource.resource_id - role_arg = {"role": v1_role} if isinstance(v1_role, Role) else {"role_id": v1_role} + role_arg: dict[str, Union[Role, str]] = ( + {"role": v1_role} if isinstance(v1_role, Role) else {"role_id": v1_role} + ) return cls( mappings=mappings, **role_arg, diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py index c7795533e..893498189 100644 --- a/rbac/management/role/relation_api_dual_write_handler.py +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -141,6 +141,8 @@ def replicate(self, event: ReplicationEvent): class RelationApiDualWriteHandler: """Class to handle Dual Write API related operations.""" + _replicator: RelationReplicator + @classmethod def for_system_role_event( cls, diff --git a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py index c4aa2d82b..3d8000d2a 100644 --- a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py +++ b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py @@ -175,6 +175,8 @@ def permission_groupings_to_v2_role_bindings( updated_mapping = BindingMapping.for_role_binding(binding, v1_role) else: # Reuse current binding ID and mapping ID + if current is None: + raise ValueError(f"Current role binding is None for {mapping}") binding = V2rolebinding(current.id, new_role, resource, latest_groups) updated_mapping = mapping updated_mapping.update_mappings_from_role_binding(binding) From e7b0dc9bb33d429a0e9240495d4568f514335676 Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Tue, 24 Sep 2024 15:42:40 +0200 Subject: [PATCH 20/55] [RHCLOUD-35039] Generate replication event for v1 Group add / remove principal endpoints (#1198) * Add replication for group principal add/remove Extract principals validation in proxy request from add_principals in group view Extract service accounts validation from add_service_accounts in group view Allow to pass group into protect_system_groups in group view User different group object to system group protection in group view Lock group and principals to add/remove principals in group view Generate replication event for adding principals into groups Update dual write handler after PR#1200 rebase Remove unnecesary query in relation_api_dual_write_group_handler and pass principal objects Decouple role from OutboxReplicator Add replication event test for adding principals Remove unnecessary token generation in removing principals from groups Replicate removed principals and service accounts from group into Relation API Extract user-group relations into method in relation api group handler Use constant for event type in group dual write handler Add more tests * Remove locking groups and principals * Remove unecessary atomic and fix method signature * Fix linter issues * Add tests for group dual write handler These tests just demonstrate the dual write handler usage and assert expected tuples on more varied scenarios. * Move out query to get group in group view for add/remove_principals * Combine principals and service accounts for dual write group handler --------- Co-authored-by: Alec Henninger --- .../relation_api_dual_write_group_handler.py | 109 ++++++++++ rbac/management/group/view.py | 202 ++++++++++-------- .../role/relation_api_dual_write_handler.py | 38 +++- rbac/migration_tool/in_memory_tuples.py | 4 +- tests/management/group/test_view.py | 89 +++++++- tests/management/role/test_dual_write.py | 127 +++++++++-- 6 files changed, 444 insertions(+), 125 deletions(-) create mode 100644 rbac/management/group/relation_api_dual_write_group_handler.py diff --git a/rbac/management/group/relation_api_dual_write_group_handler.py b/rbac/management/group/relation_api_dual_write_group_handler.py new file mode 100644 index 000000000..49bba9c74 --- /dev/null +++ b/rbac/management/group/relation_api_dual_write_group_handler.py @@ -0,0 +1,109 @@ +# +# Copyright 2024 Red Hat, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# + +"""Class to handle Dual Write API related operations.""" +import logging +from typing import Optional + +from django.conf import settings +from management.principal.model import Principal +from management.role.relation_api_dual_write_handler import ( + DualWriteException, + OutboxReplicator, + RelationReplicator, + ReplicationEvent, + ReplicationEventType, +) +from migration_tool.utils import create_relationship + +logger = logging.getLogger(__name__) # pylint: disable=invalid-name + + +class RelationApiDualWriteGroupHandler: + """Class to handle Dual Write API related operations.""" + + def __init__( + self, + group, + event_type: ReplicationEventType, + principals: list[Principal], + replicator: Optional[RelationReplicator] = None, + ): + """Initialize RelationApiDualWriteGroupHandler.""" + if not self.replication_enabled(): + return + try: + self.group_relations_to_add = [] + self.group_relations_to_remove = [] + self.principals = principals + self.group = group + self.event_type = event_type + self._replicator = replicator if replicator else OutboxReplicator(group) + except Exception as e: + raise DualWriteException(e) + + def replication_enabled(self): + """Check whether replication enabled.""" + return settings.REPLICATION_TO_RELATION_ENABLED is True + + def _generate_relations(self): + """Generate user-groups relations.""" + relations = [] + for principal in self.principals: + relations.append( + create_relationship( + ("rbac", "group"), str(self.group.uuid), ("rbac", "user"), str(principal.uuid), "member" + ) + ) + + return relations + + def replicate_new_principals(self): + """Replicate new principals into group.""" + if not self.replication_enabled(): + return + logger.info("[Dual Write] Generate new relations from Group(%s): '%s'", self.group.uuid, self.group.name) + + self.group_relations_to_add = self._generate_relations() + self._replicate() + + def replicate_removed_principals(self): + """Replicate removed principals from group.""" + if not self.replication_enabled(): + return + logger.info("[Dual Write] Generate new relations from Group(%s): '%s'", self.group.uuid, self.group.name) + + self.group_relations_to_remove = self._generate_relations() + + self._replicate() + + def _replicate(self): + if not self.replication_enabled(): + return + try: + self._replicator.replicate( + ReplicationEvent( + type=self.event_type, + # TODO: need to think about partitioning + # Maybe resource id + partition_key="rbactodo", + remove=self.group_relations_to_remove, + add=self.group_relations_to_add, + ), + ) + except Exception as e: + raise DualWriteException(e) diff --git a/rbac/management/group/view.py b/rbac/management/group/view.py index 3f13c5493..50e99fd3d 100644 --- a/rbac/management/group/view.py +++ b/rbac/management/group/view.py @@ -17,7 +17,7 @@ """View for group management.""" import logging -from typing import Iterable, Optional +from typing import Iterable, List, Optional, Tuple from uuid import UUID import requests @@ -36,6 +36,10 @@ set_system_flag_before_update, ) from management.group.model import Group +from management.group.relation_api_dual_write_group_handler import ( + RelationApiDualWriteGroupHandler, + ReplicationEventType, +) from management.group.serializer import ( GroupInputSerializer, GroupPrincipalInputSerializer, @@ -195,9 +199,10 @@ def get_serializer_class(self): return GroupInputSerializer return GroupSerializer - def protect_system_groups(self, action): + def protect_system_groups(self, action, group=None): """Deny modifications on system groups.""" - group = self.get_object() + if group is None: + group = self.get_object() if group.system: key = "group" message = "{} cannot be performed on system groups.".format(action.upper()) @@ -410,10 +415,8 @@ def update(self, request, *args, **kwargs): return update_group - def add_principals(self, group, principals, org_id=None): - """Process list of principals and add them to the group.""" - tenant = self.request.tenant - + def validate_principals_in_proxy_request(self, principals, org_id=None): + """Validate principals in proxy request.""" users = [principal.get("username") for principal in principals] resp = self.proxy.request_filtered_principals(users, org_id=org_id, limit=len(users)) if "errors" in resp: @@ -423,7 +426,13 @@ def add_principals(self, group, principals, org_id=None): "status_code": status.HTTP_404_NOT_FOUND, "errors": [{"detail": "User(s) {} not found.".format(users), "status": "404", "source": "principals"}], } - for item in resp.get("data", []): + return resp + + def add_principals(self, group, principals_from_response, org_id=None): + """Add principals to the group.""" + tenant = self.request.tenant + new_principals = [] + for item in principals_from_response: username = item["username"] try: principal = Principal.objects.get(username__iexact=username, tenant=tenant) @@ -431,17 +440,16 @@ def add_principals(self, group, principals, org_id=None): principal = Principal.objects.create(username=username, tenant=tenant) logger.info("Created new principal %s for org_id %s.", username, org_id) group.principals.add(principal) + new_principals.append(principal) group_principal_change_notification_handler(self.request.user, group, username, "added") - return group + return group, new_principals - def add_service_accounts( + def raise_error_if_service_accounts_not_present_in_it_service( self, user: User, - group: Group, service_accounts: Iterable[dict], - org_id: str = "", - ) -> Group: - """Process the list of service accounts and add them to the group.""" + ): + """Validate service account in it service.""" # Fetch all the user's service accounts from IT. If we are on a development or testing environment, we might # want to skip calling IT it_service = ITService() @@ -464,9 +472,16 @@ def add_service_accounts( if len(invalid_service_accounts) > 0: raise ServiceAccountNotFoundError(f"Service account(s) {invalid_service_accounts} not found.") + def add_service_accounts( + self, + group: Group, + service_accounts: Iterable[dict], + org_id: str = "", + ) -> Tuple[Group, List[Principal]]: + """Add service accounts to the group.""" # Get the tenant in order to fetch or store the service account in the database. tenant: Tenant = self.request.tenant - + new_service_accounts = [] # Fetch the service account from our database to add it to the group. If it doesn't exist, we create # it. for specified_sa in service_accounts: @@ -487,6 +502,7 @@ def add_service_accounts( logger.info("Created new service account %s for org_id %s.", client_id, org_id) group.principals.add(principal) + new_service_accounts.append(principal) group_principal_change_notification_handler( self.request.user, group, @@ -494,7 +510,7 @@ def add_service_accounts( "added", ) - return group + return group, new_service_accounts def remove_principals(self, group, principals, org_id=None): """Process list of principals and remove them from the group.""" @@ -518,16 +534,17 @@ def remove_principals(self, group, principals, org_id=None): "source": "principals", } ], - } + }, [] - with transaction.atomic(): - for principal in valid_principals: - group.principals.remove(principal) + principals_to_remove = [] + for principal in valid_principals: + group.principals.remove(principal) + principals_to_remove.append(principal) logger.info(f"[Request_id:{req_id}] {valid_usernames} removed from group {group.name} for org id {org_id}.") for username in principals: group_principal_change_notification_handler(self.request.user, group, username, "removed") - return group + return group, principals_to_remove @action(detail=True, methods=["get", "post", "delete"]) def principals(self, request: Request, uuid: Optional[UUID] = None): @@ -610,15 +627,10 @@ def principals(self, request: Request, uuid: Optional[UUID] = None): HTTP/1.1 204 NO CONTENT """ validate_uuid(uuid, "group uuid validation") - group = self.get_object() + org_id = self.request.user.org_id + group = self.get_object() if request.method == "POST": - # Make sure that system groups are kept unmodified. - self.protect_system_groups("add principals") - - if not request.user.admin: - self.protect_group_with_user_access_admin_role(group.roles_with_access(), "add_principals") - serializer = GroupPrincipalInputSerializer(data=request.data) # Serialize the payload and validate that it is correct. @@ -635,6 +647,11 @@ def principals(self, request: Request, uuid: Optional[UUID] = None): else: principals.append(specified_principal) + self.protect_system_groups("add principals", group) + + if not request.user.admin: + self.protect_group_with_user_access_admin_role(group.roles_with_access(), "add principals") + # Process the service accounts and add them to the group. if len(service_accounts) > 0: token_validator = ITSSOTokenValidator() @@ -642,13 +659,9 @@ def principals(self, request: Request, uuid: Optional[UUID] = None): request=request, additional_scopes_to_validate=set[ScopeClaims]([ScopeClaims.SERVICE_ACCOUNTS_CLAIM]), ) - try: - resp = self.add_service_accounts( - user=request.user, - group=group, - service_accounts=service_accounts, - org_id=org_id, + self.raise_error_if_service_accounts_not_present_in_it_service( + user=request.user, service_accounts=service_accounts ) except InsufficientPrivilegesError as ipe: return Response( @@ -668,18 +681,33 @@ def principals(self, request: Request, uuid: Optional[UUID] = None): ) # Process user principals and add them to the group. + principals_from_response = [] if len(principals) > 0: - resp = self.add_principals(group, principals, org_id=org_id) + proxy_response = self.validate_principals_in_proxy_request(principals, org_id=org_id) + if len(proxy_response.get("data", [])) > 0: + principals_from_response = proxy_response.get("data", []) + if isinstance(proxy_response, dict) and "errors" in proxy_response: + return Response(status=proxy_response["status_code"], data=proxy_response["errors"]) + + with transaction.atomic(): + new_service_accounts = [] + if len(service_accounts) > 0: + group, new_service_accounts = self.add_service_accounts( + group=group, + service_accounts=service_accounts, + org_id=org_id, + ) + new_principals = [] + if len(principals) > 0: + group, new_principals = self.add_principals(group, principals_from_response, org_id=org_id) - # Storing user principals might return an error structure instead of a group, - # so we need to check that before returning a response. - if isinstance(resp, dict) and "errors" in resp: - return Response(status=resp["status_code"], data=resp["errors"]) + dual_write_handler = RelationApiDualWriteGroupHandler( + group, ReplicationEventType.ADD_PRINCIPALS_TO_GROUP, new_principals + new_service_accounts + ) + dual_write_handler.replicate_new_principals() # Serialize the group... - output = GroupSerializer(resp) - - # ... and return it. + output = GroupSerializer(group) response = Response(status=status.HTTP_200_OK, data=output.data) elif request.method == "GET": # Check if the request comes with a bunch of service account client IDs that we need to check. Since this @@ -856,60 +884,43 @@ def principals(self, request: Request, uuid: Optional[UUID] = None): message = "Query parameter {} or {} is required.".format(SERVICE_ACCOUNTS_KEY, USERNAMES_KEY) raise serializers.ValidationError({key: _(message)}) - # Remove the service accounts from the group. - if SERVICE_ACCOUNTS_KEY in request.query_params: - service_accounts_parameter = request.query_params.get(SERVICE_ACCOUNTS_KEY, "") - service_accounts = [ - service_account.strip() for service_account in service_accounts_parameter.split(",") - ] - - try: - token_validator = ITSSOTokenValidator() - request.user.bearer_token = token_validator.validate_token( - request=request, - additional_scopes_to_validate=set[ScopeClaims]([ScopeClaims.SERVICE_ACCOUNTS_CLAIM]), - ) + with transaction.atomic(): + service_accounts_to_remove = [] + # Remove the service accounts from the group. + if SERVICE_ACCOUNTS_KEY in request.query_params: + service_accounts_parameter = request.query_params.get(SERVICE_ACCOUNTS_KEY, "") + service_accounts = [ + service_account.strip() for service_account in service_accounts_parameter.split(",") + ] - self.remove_service_accounts( + service_accounts_to_remove = self.remove_service_accounts( user=request.user, service_accounts=service_accounts, group=group, org_id=org_id, ) - except InsufficientPrivilegesError as ipe: - return Response( - status=status.HTTP_403_FORBIDDEN, - data={ - "errors": [{"detail": str(ipe), "status": status.HTTP_403_FORBIDDEN, "source": "groups"}] - }, - ) - except ValueError as ve: - return Response( - status=status.HTTP_404_NOT_FOUND, - data={ - "errors": [ - { - "detail": str(ve), - "status": status.HTTP_404_NOT_FOUND, - "source": "groups", - } - ], - }, - ) - # Create a default and successful response object. If no user principals are to be removed below, this - # response will be returned. Else, it will be overridden with whichever response the user removal - # generates. - response = Response(status=status.HTTP_204_NO_CONTENT) - - # Remove the users from the group too. - if USERNAMES_KEY in request.query_params: - username = request.query_params.get(USERNAMES_KEY, "") - principals = [name.strip() for name in username.split(",")] - resp = self.remove_principals(group, principals, org_id=org_id) - if isinstance(resp, dict) and "errors" in resp: - return Response(status=resp.get("status_code"), data={"errors": resp.get("errors")}) - response = Response(status=status.HTTP_204_NO_CONTENT) + # Create a default and successful response object. If no user principals are to be removed below, + # this response will be returned. Else, it will be overridden with whichever response the user + # removal generates. + response = Response(status=status.HTTP_204_NO_CONTENT) + + principals_to_remove = [] + # Remove the users from the group too. + if USERNAMES_KEY in request.query_params: + username = request.query_params.get(USERNAMES_KEY, "") + principals = [name.strip() for name in username.split(",")] + resp, principals_to_remove = self.remove_principals(group, principals, org_id=org_id) + if isinstance(resp, dict) and "errors" in resp: + return Response(status=resp.get("status_code"), data={"errors": resp.get("errors")}) + response = Response(status=status.HTTP_204_NO_CONTENT) + + dual_write_handler = RelationApiDualWriteGroupHandler( + group, + ReplicationEventType.REMOVE_PRINCIPALS_FROM_GROUP, + principals_to_remove + service_accounts_to_remove, + ) + dual_write_handler.replicate_removed_principals() return response @@ -1096,9 +1107,7 @@ def obtain_roles_with_exclusion(self, request, group): roles_for_group = group.roles().values("uuid") return roles.exclude(uuid__in=roles_for_group) - def remove_service_accounts( - self, user: User, group: Group, service_accounts: Iterable[str], org_id: str = "" - ) -> None: + def remove_service_accounts(self, user: User, group: Group, service_accounts: Iterable[str], org_id: str = ""): """Remove the given service accounts from the tenant.""" # Log our intention. request_id = getattr(self.request, "req_id", None) @@ -1126,10 +1135,11 @@ def remove_service_accounts( raise ValueError(f"Service account(s) {service_account_ids_diff} not found in the group '{group.name}'") + removed_service_accounts = [] # Remove service accounts from the group. - with transaction.atomic(): - for service_account in valid_service_accounts: - group.principals.remove(service_account) + for service_account in valid_service_accounts: + group.principals.remove(service_account) + removed_service_accounts.append(service_account) logger.info( f"[Request_id:{request_id}] {valid_service_account_ids} " @@ -1137,3 +1147,5 @@ def remove_service_accounts( ) for username in service_accounts: group_principal_change_notification_handler(self.request.user, group, username, "removed") + + return removed_service_accounts diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py index 893498189..884c6b0d0 100644 --- a/rbac/management/role/relation_api_dual_write_handler.py +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -55,6 +55,8 @@ class ReplicationEventType(str, Enum): CREATE_GROUP = "create_group" UPDATE_GROUP = "update_group" DELETE_GROUP = "delete_group" + ADD_PRINCIPALS_TO_GROUP = "add_principals_to_group" + REMOVE_PRINCIPALS_FROM_GROUP = "remove_principals_from_group" class ReplicationEvent: @@ -91,9 +93,21 @@ def replicate(self, event: ReplicationEvent): class OutboxReplicator(RelationReplicator): """Replicates relations via the outbox table.""" - def __init__(self, role): + def __init__(self, record): """Initialize OutboxReplicator.""" - self.role = role + self.record = record + + def _record_name(self): + """Return record name.""" + return self.record.name + + def _record_uuid(self): + """Return record uuid.""" + return self.record.uuid + + def _record_class(self): + """Return record class.""" + return self.record.__class__.__name__ def replicate(self, event: ReplicationEvent): """Replicate the given event to Kessel Relations via the Outbox.""" @@ -102,7 +116,12 @@ def replicate(self, event: ReplicationEvent): def _build_replication_event(self, relations_to_add, relations_to_remove): """Build replication event.""" - logger.info("[Dual Write] Build Replication event for role(%s): '%s'", self.role.uuid, self.role.name) + logger.info( + "[Dual Write] Build Replication event for %s(%s): '%s'", + self._record_class(), + self._record_uuid(), + self._record_name(), + ) add_json = [] for relation in relations_to_add: add_json.append(json_format.MessageToDict(relation)) @@ -117,9 +136,18 @@ def _build_replication_event(self, relations_to_add, relations_to_remove): def _save_replication_event(self, payload, event_type, aggregateid): """Save replication event.""" logger.info( - "[Dual Write] Save replication event into outbox table for role(%s): '%s'", self.role.uuid, self.role.name + "[Dual Write] Save replication event into outbox table for %s(%s): '%s'", + self._record_class(), + self._record_uuid(), + self._record_name(), + ) + logger.info( + "[Dual Write] Replication event: %s for %s(%s): '%s'", + payload, + self._record_class(), + self._record_uuid(), + self._record_name(), ) - logger.info("[Dual Write] Replication event: %s for role(%s): '%s'", payload, self.role.uuid, self.role.name) # https://debezium.io/documentation/reference/stable/transformations/outbox-event-router.html#basic-outbox-table outbox_record = Outbox.objects.create( aggregatetype="RelationReplicationEvent", diff --git a/rbac/migration_tool/in_memory_tuples.py b/rbac/migration_tool/in_memory_tuples.py index b801aec09..0c1a719bb 100644 --- a/rbac/migration_tool/in_memory_tuples.py +++ b/rbac/migration_tool/in_memory_tuples.py @@ -226,9 +226,9 @@ def predicate(rel: RelationTuple) -> bool: return TuplePredicate(predicate, f'resource_id("{id}")') -def resource(namespace: str, name: str, id: str) -> Callable[[RelationTuple], bool]: +def resource(namespace: str, name: str, id: object) -> Callable[[RelationTuple], bool]: """Return a predicate that is true if the resource matches the given namespace and name.""" - return all_of(resource_type(namespace, name), resource_id(id)) + return all_of(resource_type(namespace, name), resource_id(str(id))) def relation(relation: str) -> Callable[[RelationTuple], bool]: diff --git a/tests/management/group/test_view.py b/tests/management/group/test_view.py index b210c365a..c8f9f94b8 100644 --- a/tests/management/group/test_view.py +++ b/tests/management/group/test_view.py @@ -35,6 +35,34 @@ from tests.identity_request import IdentityRequest +def generate_relation_entry(group_uuid, principal_uuid): + relation_entry = {"resource": {}} + + relation_entry["resource"]["type"] = {} + relation_entry["resource"]["type"]["namespace"] = "rbac" + relation_entry["resource"]["type"]["name"] = "group" + relation_entry["resource"]["id"] = group_uuid + + relation_entry["relation"] = "member" + + relation_entry["subject"] = {} + relation_entry["subject"]["subject"] = {} + relation_entry["subject"]["subject"]["type"] = {} + relation_entry["subject"]["subject"]["type"]["namespace"] = "rbac" + relation_entry["subject"]["subject"]["type"]["name"] = "user" + relation_entry["subject"]["subject"]["id"] = principal_uuid + + return relation_entry + + +def generate_replication_event_to_add_principals(group_uuid, principal_uuid): + return {"relations_to_add": [generate_relation_entry(group_uuid, principal_uuid)], "relations_to_remove": []} + + +def generate_replication_event_to_remove_principals(group_uuid, principal_uuid): + return {"relations_to_add": [], "relations_to_remove": [generate_relation_entry(group_uuid, principal_uuid)]} + + class GroupViewsetTests(IdentityRequest): """Test the group viewset.""" @@ -917,11 +945,12 @@ def test_add_group_principal_invalid_guid(self): response = client.post(url, test_data, format="json", **self.headers) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + @patch("management.role.relation_api_dual_write_handler.OutboxReplicator._save_replication_event") @patch( "management.principal.proxy.PrincipalProxy.request_filtered_principals", return_value={"status_code": 200, "data": []}, ) - def test_add_group_principal_not_exists(self, mock_request): + def test_add_group_principal_not_exists(self, mock_request, mock_method): """Test that adding a non-existing principal into existing group causes a 404""" url = reverse("group-principals", kwargs={"uuid": self.group.uuid}) client = APIClient() @@ -929,13 +958,15 @@ def test_add_group_principal_not_exists(self, mock_request): response = client.post(url, test_data, format="json", **self.headers) self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND) + self.assertIsNone(mock_method.call_args) + @patch("management.role.relation_api_dual_write_handler.OutboxReplicator._save_replication_event") @patch( "management.principal.proxy.PrincipalProxy.request_filtered_principals", return_value={"status_code": 200, "data": [{"username": "test_add_user"}]}, ) @patch("core.kafka.RBACProducer.send_kafka_message") - def test_add_group_principals_success(self, send_kafka_message, mock_request): + def test_add_group_principals_success(self, send_kafka_message, mock_request, mock_method): """Test that adding a principal to a group returns successfully.""" # Create a group and a cross account user. with self.settings(NOTIFICATIONS_ENABLED=True): @@ -960,6 +991,12 @@ def test_add_group_principals_success(self, send_kafka_message, mock_request): self.assertEqual(response.data.get("principals")[0], {"username": username}) self.assertEqual(principal.tenant, self.tenant) + actual_call_arg = mock_method.call_args[0][0] + self.assertEqual( + generate_replication_event_to_add_principals(str(test_group.uuid), str(principal.uuid)), + actual_call_arg, + ) + send_kafka_message.assert_called_with( settings.NOTIFICATIONS_TOPIC, { @@ -1064,12 +1101,13 @@ def test_get_group_principals_nonempty_admin_only(self, mock_request): self.assertEqual(len(response.data.get("data")), 1) self.assertEqual(response.data.get("data")[0].get("username"), "test_user") + @patch("management.role.relation_api_dual_write_handler.OutboxReplicator._save_replication_event") @patch( "management.principal.proxy.PrincipalProxy.request_filtered_principals", return_value={"status_code": 200, "data": [{"username": "test_user"}]}, ) @patch("core.kafka.RBACProducer.send_kafka_message") - def test_remove_group_principals_success(self, send_kafka_message, mock_request): + def test_remove_group_principals_success(self, send_kafka_message, mock_request, mock_method): """Test that removing a principal to a group returns successfully.""" with self.settings(NOTIFICATIONS_ENABLED=True): test_user = Principal.objects.create(username="test_user", tenant=self.tenant) @@ -1108,6 +1146,12 @@ def test_remove_group_principals_success(self, send_kafka_message, mock_request) ANY, ) + actual_call_arg = mock_method.call_args[0][0] + self.assertEqual( + generate_replication_event_to_remove_principals(str(self.group.uuid), str(test_user.uuid)), + actual_call_arg, + ) + def test_remove_group_principals_invalid(self): """Test that removing a principal returns an error with invalid data format.""" url = reverse("group-principals", kwargs={"uuid": self.group.uuid}) @@ -3580,7 +3624,8 @@ def test_update_group_with_User_Access_Admin_fail(self): response = client.put(url, request_body, format="json", **self.headers_org_admin) self.assertEqual(response.status_code, status.HTTP_200_OK) - def test_remove_group_without_User_Access_Admin_fail(self): + @patch("management.role.relation_api_dual_write_handler.OutboxReplicator._save_replication_event") + def test_remove_group_without_User_Access_Admin_fail(self, mock_method): """Test that non org admin without 'User Access administrator' role cannot remove a group.""" test_group = Group(name="test group", tenant=self.tenant) test_group.save() @@ -3591,7 +3636,7 @@ def test_remove_group_without_User_Access_Admin_fail(self): response = client.delete(url, **self.headers_user_based_principal) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual(response.data.get("errors")[0].get("detail"), self.no_permission_err_message) - + self.assertIsNone(mock_method.call_args) response = client.delete(url, **self.headers_service_account_principal) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual(response.data.get("errors")[0].get("detail"), self.no_permission_err_message) @@ -3834,8 +3879,9 @@ def test_add_user_based_principal_in_group_without_User_Access_Admin_fail(self, self.assertEqual(response.status_code, status.HTTP_200_OK) @override_settings(IT_BYPASS_TOKEN_VALIDATION=True) + @patch("management.role.relation_api_dual_write_handler.OutboxReplicator._save_replication_event") @patch("management.principal.it_service.ITService.request_service_accounts") - def test_add_service_account_principal_in_group_without_User_Access_Admin_fail(self, mock_request): + def test_add_service_account_principal_in_group_without_User_Access_Admin_fail(self, mock_request, mock_method): """ Test that non org admin without 'User Access administrator' role cannot add service account based principal into a group without 'User Access administrator' role. @@ -3884,11 +3930,17 @@ def test_add_service_account_principal_in_group_without_User_Access_Admin_fail(s response = client.post(url, request_body, format="json", **self.headers_org_admin) self.assertEqual(response.status_code, status.HTTP_200_OK) + actual_call_arg = mock_method.call_args[0][0] + self.assertEqual( + generate_replication_event_to_add_principals(str(test_group.uuid), str(sa_principal.uuid)), actual_call_arg + ) + + @patch("management.role.relation_api_dual_write_handler.OutboxReplicator._save_replication_event") @patch( "management.principal.proxy.PrincipalProxy.request_filtered_principals", return_value={"status_code": 200, "data": []}, ) - def test_add_user_based_principal_in_group_with_User_Access_Admin_success(self, mock_request): + def test_add_user_based_principal_in_group_with_User_Access_Admin_success(self, mock_request, mock_method): """ Test that non org admin with 'User Access administrator' role can add user based principal into a group without 'User Access administrator' role. @@ -3914,13 +3966,19 @@ def test_add_user_based_principal_in_group_with_User_Access_Admin_success(self, response = client.post(url, request_body, format="json", **self.headers_user_based_principal) self.assertEqual(response.status_code, status.HTTP_200_OK) + actual_call_arg = mock_method.call_args[0][0] + self.assertEqual( + generate_replication_event_to_add_principals(str(test_group.uuid), str(test_principal.uuid)), + actual_call_arg, + ) response = client.post(url, request_body, format="json", **self.headers_service_account_principal) self.assertEqual(response.status_code, status.HTTP_200_OK) @override_settings(IT_BYPASS_TOKEN_VALIDATION=True) + @patch("management.role.relation_api_dual_write_handler.OutboxReplicator._save_replication_event") @patch("management.principal.it_service.ITService.request_service_accounts") - def test_add_service_account_principal_in_group_with_User_Access_Admin_success(self, mock_request): + def test_add_service_account_principal_in_group_with_User_Access_Admin_success(self, mock_request, mock_method): """ Test that non org admin with 'User Access administrator' role can add service account based principal into a group without 'User Access administrator' role. @@ -3965,6 +4023,12 @@ def test_add_service_account_principal_in_group_with_User_Access_Admin_success(s response = client.post(url, request_body, format="json", **self.headers_user_based_principal) self.assertEqual(response.status_code, status.HTTP_200_OK) + actual_call_arg = mock_method.call_args[0][0] + self.assertEqual( + generate_replication_event_to_add_principals(str(test_group.uuid), str(sa_principal.uuid)), + actual_call_arg, + ) + response = client.post(url, request_body, format="json", **self.headers_service_account_principal) self.assertEqual(response.status_code, status.HTTP_200_OK) @@ -4175,8 +4239,9 @@ def test_remove_user_based_principal_from_group_with_User_Access_Admin_success(s response = client.delete(url, format="json", **self.headers_service_account_principal) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) + @patch("management.role.relation_api_dual_write_handler.OutboxReplicator._save_replication_event") @override_settings(IT_BYPASS_TOKEN_VALIDATION=True) - def test_remove_service_account_principal_from_group_with_User_Access_Admin_success(self): + def test_remove_service_account_principal_from_group_with_User_Access_Admin_success(self, mock_method): """ Test that non org admin with 'User Access administrator' role can remove service account based principal from a group without 'User Access administrator' role. @@ -4207,6 +4272,12 @@ def test_remove_service_account_principal_from_group_with_User_Access_Admin_succ response = client.delete(url, format="json", **self.headers_user_based_principal) self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) + actual_call_arg = mock_method.call_args[0][0] + self.assertEqual( + generate_replication_event_to_remove_principals(str(test_group.uuid), str(sa_principal.uuid)), + actual_call_arg, + ) + # Add once removed principal into group test_group.principals.add(sa_principal) test_group.save() diff --git a/tests/management/role/test_dual_write.py b/tests/management/role/test_dual_write.py index 303469139..5d7d06653 100644 --- a/tests/management/role/test_dual_write.py +++ b/tests/management/role/test_dual_write.py @@ -16,9 +16,11 @@ # """Test tuple changes for RBAC operations.""" +from typing import Tuple import unittest from django.test import TestCase, override_settings from management.group.model import Group +from management.group.relation_api_dual_write_group_handler import RelationApiDualWriteGroupHandler from management.permission.model import Permission from management.policy.model import Policy from management.principal.model import Principal @@ -113,10 +115,49 @@ def _workspace_access_to_resource_definition(self, default: list[str], **kwargs: ], ] - def given_group(self, name: str, users: list[str]) -> Group: + def given_group( + self, name: str, users: list[str] = [], service_accounts: list[str] = [] + ) -> Tuple[Group, list[Principal]]: """Create a new group with the given name and users.""" - # TODO: replicate group membership - return self.fixture.new_group(name=name, users=users, tenant=self.tenant) + group, principals = self.fixture.new_group( + name=name, users=users, service_accounts=service_accounts, tenant=self.tenant + ) + dual_write = RelationApiDualWriteGroupHandler( + group, + ReplicationEventType.CREATE_GROUP, + principals, + replicator=InMemoryRelationReplicator(self.tuples), + ) + dual_write.replicate_new_principals() + return group, principals + + def given_additional_group_members( + self, group: Group, users: list[str] = [], service_accounts: list[str] = [] + ) -> list[Principal]: + """Add users to the given group.""" + principals = self.fixture.add_members_to_group(group, users, service_accounts, group.tenant) + dual_write = RelationApiDualWriteGroupHandler( + group, + ReplicationEventType.CREATE_GROUP, + principals, + replicator=InMemoryRelationReplicator(self.tuples), + ) + dual_write.replicate_new_principals() + return principals + + def given_removed_group_members( + self, group: Group, users: list[str] = [], service_accounts: list[str] = [] + ) -> list[Principal]: + """Remove users from the given group.""" + principals = self.fixture.remove_members_from_group(group, users, service_accounts, group.tenant) + dual_write = RelationApiDualWriteGroupHandler( + group, + ReplicationEventType.CREATE_GROUP, + principals, + replicator=InMemoryRelationReplicator(self.tuples), + ) + dual_write.replicate_removed_principals() + return principals def given_policy(self, group: Group, roles: list[Role]) -> Policy: """Assign the [roles] to the [group].""" @@ -207,6 +248,34 @@ def expect_1_role_binding_to_workspace(self, workspace: str, for_v2_roles: list[ ) +class DualWriteGroupMembershipTestCase(DualWriteTestCase): + """Test dual write logic for group membership.""" + + def test_create_group_tuples(self): + """Create a group and add users to it.""" + group, principals = self.given_group("g1", ["u1", "u2"]) + tuples = self.tuples.find_tuples(all_of(resource("rbac", "group", group.uuid), relation("member"))) + self.assertEquals(len(tuples), 2) + self.assertEquals({t.subject_id for t in tuples}, {str(p.uuid) for p in principals}) + + def test_update_group_tuples(self): + """Update a group by adding and removing users.""" + group, principals = self.given_group("g1", ["u1", "u2"]) + + principals += self.given_additional_group_members(group, ["u3"]) + + tuples = self.tuples.find_tuples(all_of(resource("rbac", "group", group.uuid), relation("member"))) + self.assertEquals(len(tuples), 3) + self.assertEquals({t.subject_id for t in tuples}, {str(p.uuid) for p in principals}) + + self.given_removed_group_members(group, ["u2"]) + principals = [p for p in principals if p.username != "u2"] + + tuples = self.tuples.find_tuples(all_of(resource("rbac", "group", group.uuid), relation("member"))) + self.assertEquals(len(tuples), 2) + self.assertEquals({t.subject_id for t in tuples}, {str(p.uuid) for p in principals}) + + class DualWriteSystemRolesTestCase(DualWriteTestCase): """Test dual write logic for system roles.""" @@ -214,7 +283,7 @@ class DualWriteSystemRolesTestCase(DualWriteTestCase): def test_system_role_grants_access_to_default_workspace(self): """Create role binding only when system role is bound to group.""" role = self.given_v1_system_role("r1", ["app1:hosts:read", "inventory:hosts:write"]) - group = self.given_group("g1", ["u1", "u2"]) + group, _ = self.given_group("g1", ["u1", "u2"]) self.expect_num_role_bindings(0) @@ -236,7 +305,7 @@ def test_role_with_same_default_and_resource_permission_reuses_same_v2_role(self ws_2=["app1:hosts:read", "inventory:hosts:write"], ) - group = self.given_group("g1", ["u1", "u2"]) + group, _ = self.given_group("g1", ["u1", "u2"]) self.given_policy(group, roles=[role]) id = self.expect_1_v2_role_with_permissions(["app1:hosts:read", "inventory:hosts:write"]) @@ -325,8 +394,8 @@ def test_add_resource_uses_existing_groups(self): ws_2=["app1:hosts:read", "inventory:hosts:write"], ) - g1 = self.given_group("g2", ["u2"]) - g2 = self.given_group("g1", ["u1"]) + g1, _ = self.given_group("g2", ["u2"]) + g2, _ = self.given_group("g1", ["u1"]) self.given_policy(g1, roles=[role]) self.given_policy(g2, roles=[role]) @@ -450,15 +519,13 @@ def update_custom_role(self, role: Role, resource_access: list[tuple[list[str], return role - def new_group(self, name: str, users: list[str], tenant: Tenant) -> Group: + def new_group( + self, name: str, users: list[str], service_accounts: list[str], tenant: Tenant + ) -> Tuple[Group, list[Principal]]: """Create a new group with the given name, users, and tenant.""" group = Group.objects.create(name=name, tenant=tenant) - - principals = [Principal.objects.get_or_create(username=username, tenant=tenant)[0] for username in users] - - group.principals.add(*principals) - - return group + principals = self.add_members_to_group(group, users, service_accounts, tenant) + return group, principals def add_role_to_group(self, role: Role, group: Group, tenant: Tenant) -> Policy: """Add a role to a group for a given tenant and return the policy.""" @@ -466,3 +533,35 @@ def add_role_to_group(self, role: Role, group: Group, tenant: Tenant) -> Policy: policy.roles.add(role) policy.save() return policy + + def add_members_to_group( + self, group: Group, users: list[str], service_accounts: list[str], principal_tenant: Tenant + ) -> list[Principal]: + """Add members to the group.""" + principals = [ + *[Principal.objects.get_or_create(username=username, tenant=principal_tenant)[0] for username in users], + *[ + Principal.objects.get_or_create(username=username, tenant=principal_tenant, type="service-account")[0] + for username in service_accounts + ], + ] + + group.principals.add(*principals) + + return principals + + def remove_members_from_group( + self, group: Group, users: list[str], service_accounts: list[str], principal_tenant: Tenant + ): + """Remove members from the group.""" + principals = [ + *[Principal.objects.get_or_create(username=username, tenant=principal_tenant)[0] for username in users], + *[ + Principal.objects.get_or_create(username=username, tenant=principal_tenant, type="service-account")[0] + for username in service_accounts + ], + ] + + group.principals.remove(*principals) + + return principals From 21e6d52df823d27485a590e6cd421c128e7e2274 Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Thu, 3 Oct 2024 14:24:08 +0200 Subject: [PATCH 21/55] [RHCLOUD-34787] Dual Write: Generate replication event for adding and removing roles from group (#1211) * Sketch out algorithm for changing groups' roles * Move relations generation for add/remove role into dual write group handler * Generate role-group relations for custom role if binding mapping doesnt exist * Don't remove empty bindings for custom role when role is removed from group * Lock custom roles from removing role from groups * Skip replication in role assignment for default groups * Use name of relation from schema in role for role-groups assigments methods Schema: https://github.com/RedHatInsights/kessel-config/blob/master/schema.zed#L84 definition rbac/role_binding { permission subject = t_subject relation t_subject: rbac/user | rbac/group#member ... * Add method to replicate system role in role dual write handler * Add tests for replication of adding/removing roles to/from groups * Fix linter issues * Avoid extra query in group roles action * Update remove_role from group algorithm I'm not sure why I thought it would be the same for system roles and custom roles before. For system roles we can't retrieve all role bindings, that would query across all tenants on the platform. Adding and removing should be basically the same. So, I factored out a method that they could both reuse and just parameterized the few parts that change. * Pull out system role mapping logic into handler * Add a test case for some multi-tenant system role shenanigans * Assign group members, not the group * Add view tests for role-group assignments replication * Remove principals from dual write group constructor --------- Co-authored-by: Alec Henninger --- rbac/management/group/definer.py | 78 +++-- .../relation_api_dual_write_group_handler.py | 130 +++++++- rbac/management/group/view.py | 7 +- rbac/management/role/model.py | 22 +- .../role/relation_api_dual_write_handler.py | 17 ++ rbac/migration_tool/in_memory_tuples.py | 7 + rbac/migration_tool/migrate.py | 26 +- rbac/migration_tool/models.py | 49 ++- ...sharedSystemRolesReplicatedRoleBindings.py | 17 +- rbac/migration_tool/utils.py | 13 +- tests/management/group/test_view.py | 168 +++++++++- tests/management/role/test_dual_write.py | 286 ++++++++++++++++-- tests/management/role/test_view.py | 11 +- 13 files changed, 721 insertions(+), 110 deletions(-) diff --git a/rbac/management/group/definer.py b/rbac/management/group/definer.py index 66bf22f66..992001f1d 100644 --- a/rbac/management/group/definer.py +++ b/rbac/management/group/definer.py @@ -17,13 +17,17 @@ """Handler for system defined group.""" import logging +from typing import Union from uuid import uuid4 from django.db import transaction -from django.db.models import Q from django.db.models.query import QuerySet from django.utils.translation import gettext as _ from management.group.model import Group +from management.group.relation_api_dual_write_group_handler import ( + RelationApiDualWriteGroupHandler, + ReplicationEventType, +) from management.notifications.notification_handlers import ( group_flag_change_notification_handler, group_role_change_notification_handler, @@ -35,6 +39,7 @@ from api.models import Tenant + logger = logging.getLogger(__name__) # pylint: disable=invalid-name @@ -106,15 +111,11 @@ def clone_default_group_in_public_schema(group, tenant): return group +@transaction.atomic def add_roles(group, roles_or_role_ids, tenant, user=None): """Process list of roles and add them to the group.""" - if not isinstance(roles_or_role_ids, QuerySet): - # If given an iterable of UUIDs, get the corresponding objects - roles = Role.objects.filter(uuid__in=roles_or_role_ids) - else: - roles = roles_or_role_ids + roles = _roles_by_query_or_ids(roles_or_role_ids) group_name = group.name - role_names = list(roles.values_list("name", flat=True)) group, created = Group.objects.get_or_create(name=group_name, tenant=tenant) system_policy_name = "System Policy for Group {}".format(group.uuid) @@ -125,10 +126,14 @@ def add_roles(group, roles_or_role_ids, tenant, user=None): if system_policy_created: logger.info(f"Created new system policy for tenant {tenant.org_id}.") - roles = Role.objects.filter( - Q(tenant=tenant) | Q(tenant=Tenant.objects.get(tenant_name="public")), name__in=role_names - ) - for role in roles: + system_roles = roles.filter(tenant=Tenant.objects.get(tenant_name="public")) + + # Custom roles are locked to prevent resources from being added/removed concurrently, + # in the case that the Roles had _no_ resources specified to begin with. + # This should not be necessary for system roles. + custom_roles = roles.filter(tenant=tenant).select_for_update() + + for role in [*system_roles, *custom_roles]: # Only Organization administrators are allowed to add the role with RBAC permission # higher than "read" into a group. for access in role.access.all(): @@ -144,31 +149,41 @@ def add_roles(group, roles_or_role_ids, tenant, user=None): "into groups." ) raise serializers.ValidationError({key: _(message)}) + # Only add the role if it was not attached - if not system_policy.roles.filter(pk=role.pk).exists(): - system_policy.roles.add(role) + if system_policy.roles.filter(pk=role.pk).exists(): + continue - # Send notifications - group_role_change_notification_handler(user, group, role, "added") + system_policy.roles.add(role) + dual_write_handler = RelationApiDualWriteGroupHandler(group, ReplicationEventType.ASSIGN_ROLE, []) + dual_write_handler.replicate_added_role(role) + # Send notifications + group_role_change_notification_handler(user, group, role, "added") + +@transaction.atomic def remove_roles(group, roles_or_role_ids, tenant, user=None): """Process list of roles and remove them from the group.""" - if not isinstance(roles_or_role_ids, QuerySet): - # If given an iterable of UUIDs, get the corresponding objects - roles = Role.objects.filter(uuid__in=roles_or_role_ids) - else: - roles = roles_or_role_ids - role_names = list(roles.values_list("name", flat=True)) - + roles = _roles_by_query_or_ids(roles_or_role_ids) group = Group.objects.get(name=group.name, tenant=tenant) - roles = group.roles().filter(name__in=role_names) + system_roles = roles.filter(tenant=Tenant.objects.get(tenant_name="public")) + + # Custom roles are locked to prevent resources from being added/removed concurrently, + # in the case that the Roles had _no_ resources specified to begin with. + # This should not be necessary for system roles. + custom_roles = roles.filter(tenant=tenant).select_for_update() + for policy in group.policies.all(): - # Only remove the role if it was attached - for role in roles: + for role in [*system_roles, *custom_roles]: + # Only remove the role if it was attached if policy.roles.filter(pk=role.pk).exists(): policy.roles.remove(role) logger.info(f"Removing role {role} from group {group.name} for tenant {tenant.org_id}.") + + dual_write_handler = RelationApiDualWriteGroupHandler(group, ReplicationEventType.UNASSIGN_ROLE, []) + dual_write_handler.replicate_removed_role(role) + # Send notifications group_role_change_notification_handler(user, group, role, "removed") @@ -182,3 +197,16 @@ def update_group_roles(group, roleset, tenant): role_ids = list(roleset.values_list("uuid", flat=True)) roles_to_remove = group.roles().exclude(uuid__in=role_ids) remove_roles(group, roles_to_remove, tenant) + + +def _roles_by_query_or_ids(roles_or_role_ids: Union[QuerySet[Role], list[str]]) -> QuerySet[Role]: + if not isinstance(roles_or_role_ids, QuerySet): + # If given an iterable of UUIDs, get the corresponding objects + return Role.objects.filter(uuid__in=roles_or_role_ids) + else: + # Given a queryset, so because it may not be efficient (e.g. query on non indexed field) + # keep prior behavior of querying once to get names, then use names (indexed) as base query + # for further queries. + # It MAY be faster to avoid this extra query, but this maintains prior behavior. + role_names = list(roles_or_role_ids.values_list("name", flat=True)) + return Role.objects.filter(name__in=role_names) diff --git a/rbac/management/group/relation_api_dual_write_group_handler.py b/rbac/management/group/relation_api_dual_write_group_handler.py index 49bba9c74..5fabd9269 100644 --- a/rbac/management/group/relation_api_dual_write_group_handler.py +++ b/rbac/management/group/relation_api_dual_write_group_handler.py @@ -17,10 +17,12 @@ """Class to handle Dual Write API related operations.""" import logging -from typing import Optional +from typing import Callable, Iterable, Optional +from uuid import uuid4 from django.conf import settings from management.principal.model import Principal +from management.role.model import BindingMapping, Role from management.role.relation_api_dual_write_handler import ( DualWriteException, OutboxReplicator, @@ -28,6 +30,7 @@ ReplicationEvent, ReplicationEventType, ) +from migration_tool.models import V2boundresource, V2role, V2rolebinding from migration_tool.utils import create_relationship logger = logging.getLogger(__name__) # pylint: disable=invalid-name @@ -40,7 +43,6 @@ def __init__( self, group, event_type: ReplicationEventType, - principals: list[Principal], replicator: Optional[RelationReplicator] = None, ): """Initialize RelationApiDualWriteGroupHandler.""" @@ -49,7 +51,7 @@ def __init__( try: self.group_relations_to_add = [] self.group_relations_to_remove = [] - self.principals = principals + self.principals = [] self.group = group self.event_type = event_type self._replicator = replicator if replicator else OutboxReplicator(group) @@ -72,21 +74,21 @@ def _generate_relations(self): return relations - def replicate_new_principals(self): + def replicate_new_principals(self, principals: list[Principal]): """Replicate new principals into group.""" if not self.replication_enabled(): return logger.info("[Dual Write] Generate new relations from Group(%s): '%s'", self.group.uuid, self.group.name) - + self.principals = principals self.group_relations_to_add = self._generate_relations() self._replicate() - def replicate_removed_principals(self): + def replicate_removed_principals(self, principals: list[Principal]): """Replicate removed principals from group.""" if not self.replication_enabled(): return logger.info("[Dual Write] Generate new relations from Group(%s): '%s'", self.group.uuid, self.group.name) - + self.principals = principals self.group_relations_to_remove = self._generate_relations() self._replicate() @@ -107,3 +109,117 @@ def _replicate(self): ) except Exception as e: raise DualWriteException(e) + + def replicate_added_role(self, role: Role): + """Replicate added role.""" + if not self.replication_enabled(): + return + # TODO - This needs to be removed to seed the default groups. + if self.group.tenant.tenant_name == "public": + return + + def add_group_to_binding(mapping: BindingMapping): + self.group_relations_to_add.append(mapping.add_group_to_bindings(str(self.group.uuid))) + + def create_default_mapping(): + assert role.system is True, "Expected system role. Mappings for custom roles must already be created." + binding = V2rolebinding( + str(uuid4()), + # Assumes same role UUID for V2 system role equivalent. + V2role.for_system_role(str(role.uuid)), + # TODO: don't use org id once we have workspace built ins + V2boundresource(("rbac", "workspace"), self.group.tenant.org_id), + groups=frozenset([str(self.group.uuid)]), + ) + mapping = BindingMapping.for_role_binding(binding, role) + self.group_relations_to_add.extend(mapping.as_tuples()) + return mapping + + self._update_mapping_for_role( + role, update_mapping=add_group_to_binding, create_default_mapping_for_system_role=create_default_mapping + ) + self._replicate() + + def replicate_removed_role(self, role: Role): + """Replicate removed role.""" + if not self.replication_enabled(): + return + # TODO - This needs to be removed to seed the default groups. + if self.group.tenant.tenant_name == "public": + return + + def remove_group_from_binding(mapping: BindingMapping): + self.group_relations_to_remove.append(mapping.remove_group_from_bindings(str(self.group.uuid))) + + self._update_mapping_for_role( + role, update_mapping=remove_group_from_binding, create_default_mapping_for_system_role=lambda: None + ) + self._replicate() + + def _update_mapping_for_role( + self, + role: Role, + update_mapping: Callable[[BindingMapping], None], + create_default_mapping_for_system_role: Callable[[], Optional[BindingMapping]], + ): + """ + Update mapping for role using callbacks based on current state. + + Callbacks are expected to modify [self.group_relations_to_add] and [self.group_relations_to_remove]. + This method handles persistence and locking itself. + """ + if not self.replication_enabled(): + return + # TODO - This needs to be removed to seed the default groups. + if self.group.tenant.tenant_name == "public": + return + + if role.system: + try: + # We lock the binding here because we cannot lock the Role for system roles, + # as they are used platform-wide, + # and their permissions do not refer to specific resources, + # so they can be changed concurrently safely. + mapping = ( + BindingMapping.objects.select_for_update() + .filter( + role=role, + resource_type_namespace="rbac", + resource_type_name="workspace", + # TODO: don't use org id once we have workspace built ins + resource_id=self.group.tenant.org_id, + ) + .get() + ) + update_mapping(mapping) + mapping.save(force_update=True) + + if mapping.is_unassigned(): + self.group_relations_to_remove.extend(mapping.as_tuples()) + mapping.delete() + except BindingMapping.DoesNotExist: + mapping = create_default_mapping_for_system_role() + if mapping is not None: + mapping.save(force_insert=True) + else: + # NOTE: The custom Role MUST be locked before this point in Read Committed isolation. + # There is a risk of write skew here otherwise, in the case that permissions are added + # to a custom role that currently has no permissions. + # In that case there would be no bindings to lock. + # We must lock something to prevent concurrent updates, so we lock the Role. + # Because custom roles must be locked already by this point, + # we don't need to lock the binding here. + bindings: Iterable[BindingMapping] = role.binding_mappings.all() + + if not bindings: + logger.warning( + "[Dual Write] Binding mappings not found for role(%s): '%s'. " + "Assuming no current relations exist. " + "If this is NOT the case, relations are inconsistent!", + role.uuid, + role.name, + ) + + for mapping in bindings: + update_mapping(mapping) + mapping.save(force_update=True) diff --git a/rbac/management/group/view.py b/rbac/management/group/view.py index d807e9812..19b1e7983 100644 --- a/rbac/management/group/view.py +++ b/rbac/management/group/view.py @@ -704,9 +704,9 @@ def principals(self, request: Request, uuid: Optional[UUID] = None): group, new_principals = self.add_principals(group, principals_from_response, org_id=org_id) dual_write_handler = RelationApiDualWriteGroupHandler( - group, ReplicationEventType.ADD_PRINCIPALS_TO_GROUP, new_principals + new_service_accounts + group, ReplicationEventType.ADD_PRINCIPALS_TO_GROUP ) - dual_write_handler.replicate_new_principals() + dual_write_handler.replicate_new_principals(new_principals + new_service_accounts) # Serialize the group... output = GroupSerializer(group) @@ -920,9 +920,8 @@ def principals(self, request: Request, uuid: Optional[UUID] = None): dual_write_handler = RelationApiDualWriteGroupHandler( group, ReplicationEventType.REMOVE_PRINCIPALS_FROM_GROUP, - principals_to_remove + service_accounts_to_remove, ) - dual_write_handler.replicate_removed_principals() + dual_write_handler.replicate_removed_principals(principals_to_remove + service_accounts_to_remove) return response diff --git a/rbac/management/role/model.py b/rbac/management/role/model.py index 0c2adcc16..30edd564f 100644 --- a/rbac/management/role/model.py +++ b/rbac/management/role/model.py @@ -25,10 +25,11 @@ from django.db.models import signals from django.utils import timezone from internal.integration import sync_handlers +from kessel.relations.v1beta1.common_pb2 import Relationship from management.cache import AccessCache from management.models import Permission, Principal from management.rbac_fields import AutoDateTimeField -from migration_tool.models import V2boundresource, V2role, V2rolebinding +from migration_tool.models import V2boundresource, V2role, V2rolebinding, role_binding_group_subject_tuple from api.models import TenantAwareModel @@ -149,13 +150,24 @@ def for_role_binding(cls, role_binding: V2rolebinding, v1_role: Union[Role, str] resource_id=resource_id, ) - def remove_group_from_bindings(self, group_id: str): + def as_tuples(self) -> list[Relationship]: + """Create tuples from BindingMapping model.""" + v2_role_binding = self.get_role_binding() + return v2_role_binding.as_tuples() + + def is_unassigned(self): + """Return true if mapping is not assigned to any groups.""" + return len(self.mappings["groups"]) == 0 + + def remove_group_from_bindings(self, group_uuid: str) -> Relationship: """Remove group from mappings.""" - self.mappings["groups"] = [group for group in self.mappings["groups"] if group != group_id] + self.mappings["groups"] = [group for group in self.mappings["groups"] if group != group_uuid] + return role_binding_group_subject_tuple(self.mappings["id"], group_uuid) - def add_group_to_bindings(self, group_id: str): + def add_group_to_bindings(self, group_uuid: str) -> Relationship: """Add group to mappings.""" - self.mappings["groups"].append(group_id) + self.mappings["groups"].append(group_uuid) + return role_binding_group_subject_tuple(self.mappings["id"], group_uuid) def update_mappings_from_role_binding(self, role_binding: V2rolebinding): """Set mappings.""" diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py index 884c6b0d0..e18563834 100644 --- a/rbac/management/role/relation_api_dual_write_handler.py +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -27,6 +27,8 @@ from management.models import Outbox from management.role.model import BindingMapping, Role from migration_tool.migrate import migrate_role +from migration_tool.sharedSystemRolesReplicatedRoleBindings import v1_perm_to_v2_perm +from migration_tool.utils import create_relationship from api.models import Tenant @@ -316,3 +318,18 @@ def _generate_relations_and_mappings_for_role(self): return relations except Exception as e: raise DualWriteException(e) + + # TODO: Remove/replace - placeholder for testing + def replicate_new_system_role_permissions(self, role: Role): + """Replicate system role permissions.""" + permissions = list() + for access in role.access.all(): + v1_perm = access.permission + v2_perm = v1_perm_to_v2_perm(v1_perm) + permissions.append(v2_perm) + + for permission in permissions: + self.role_relations.append( + create_relationship(("rbac", "role"), str(role.uuid), ("rbac", "user"), str("*"), permission) + ) + self._replicate() diff --git a/rbac/migration_tool/in_memory_tuples.py b/rbac/migration_tool/in_memory_tuples.py index 0c1a719bb..44dce2bb1 100644 --- a/rbac/migration_tool/in_memory_tuples.py +++ b/rbac/migration_tool/in_memory_tuples.py @@ -19,6 +19,13 @@ class RelationTuple(NamedTuple): subject_id: str subject_relation: str + def stringify(self): + """Display all attributes in one line.""" + return ( + f"{self.resource_type_namespace}/{self.resource_type_name}:{self.resource_id}#{self.relation}" + f"@{self.subject_type_namespace}/{self.subject_type_name}:{self.subject_id}" + ) + T = TypeVar("T", bound=Hashable) diff --git a/rbac/migration_tool/migrate.py b/rbac/migration_tool/migrate.py index 558dcf54f..7a45e9c43 100644 --- a/rbac/migration_tool/migrate.py +++ b/rbac/migration_tool/migrate.py @@ -39,21 +39,7 @@ def get_kessel_relation_tuples( relationships: list[common_pb2.Relationship] = list() for v2_role_binding in v2_role_bindings: - relationships.append( - create_relationship( - ("rbac", "role_binding"), v2_role_binding.id, ("rbac", "role"), v2_role_binding.role.id, "granted" - ) - ) - - for perm in v2_role_binding.role.permissions: - relationships.append( - create_relationship(("rbac", "role"), v2_role_binding.role.id, ("rbac", "user"), "*", perm) - ) - for group in v2_role_binding.groups: - # These might be duplicate but it is OK, spiceDB will handle duplication through touch - relationships.append( - create_relationship(("rbac", "role_binding"), v2_role_binding.id, ("rbac", "group"), group, "subject") - ) + relationships.extend(v2_role_binding.as_tuples()) bound_resource = v2_role_binding.resource @@ -80,16 +66,6 @@ def get_kessel_relation_tuples( ) ) - relationships.append( - create_relationship( - bound_resource.resource_type, - bound_resource.resource_id, - ("rbac", "role_binding"), - v2_role_binding.id, - "user_grant", - ) - ) - return relationships diff --git a/rbac/migration_tool/models.py b/rbac/migration_tool/models.py index 02ad473ee..f6670fda7 100644 --- a/rbac/migration_tool/models.py +++ b/rbac/migration_tool/models.py @@ -18,6 +18,9 @@ from dataclasses import dataclass from typing import Tuple +from kessel.relations.v1beta1.common_pb2 import Relationship +from migration_tool.utils import create_relationship + @dataclass(frozen=True) class V1resourcedef: @@ -62,6 +65,11 @@ class V2boundresource: class V2role: """V2 role definition.""" + @classmethod + def for_system_role(cls, id: str) -> "V2role": + """Create a V2 role for a system role.""" + return cls(id=id, is_system=True, permissions=frozenset()) + id: str is_system: bool permissions: frozenset[str] @@ -71,7 +79,7 @@ def as_dict(self) -> dict: return { "id": self.id, "is_system": self.is_system, - "permissions": list(self.permissions), + "permissions": list(self.permissions) if not self.is_system else [], } @@ -92,6 +100,45 @@ def as_minimal_dict(self) -> dict: "groups": [g for g in self.groups], } + def as_tuples(self): + """Create tuples from V2rolebinding model.""" + tuples: list[Relationship] = list() + + tuples.append( + create_relationship(("rbac", "role_binding"), self.id, ("rbac", "role"), self.role.id, "granted") + ) + + for perm in self.role.permissions: + tuples.append(create_relationship(("rbac", "role"), self.role.id, ("rbac", "user"), "*", perm)) + + for group in self.groups: + # These might be duplicate but it is OK, spiceDB will handle duplication through touch + tuples.append(role_binding_group_subject_tuple(self.id, group)) + + tuples.append( + create_relationship( + self.resource.resource_type, + self.resource.resource_id, + ("rbac", "role_binding"), + self.id, + "user_grant", + ) + ) + + return tuples + + +def role_binding_group_subject_tuple(role_binding_id: str, group_uuid: str) -> Relationship: + """Create a relationship tuple for a role binding and a group.""" + return create_relationship( + ("rbac", "role_binding"), + role_binding_id, + ("rbac", "group"), + group_uuid, + "subject", + subject_relation="member", + ) + def split_v2_perm(perm: str): """Split V2 permission into app, resource and permission.""" diff --git a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py index 3d8000d2a..4f03a01ca 100644 --- a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py +++ b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py @@ -71,12 +71,17 @@ def set_system_roles(cls): # Skip roles such as OCM since they don't have permission if role.external_role_id(): continue - permission_list = list() - for access in role.access.all(): - v2_perm = cleanNameForV2SchemaCompatibility(access.permission.permission) - v2_perm = inventory_to_workspace(v2_perm) - permission_list.append(v2_perm) - add_system_role(cls.SYSTEM_ROLES, V2role(str(role.uuid), True, frozenset(permission_list))) + cls.set_system_role(role) + + @classmethod + def set_system_role(cls, role): + """Set the system role.""" + permission_list = list() + for access in role.access.all(): + v2_perm = cleanNameForV2SchemaCompatibility(access.permission.permission) + v2_perm = inventory_to_workspace(v2_perm) + permission_list.append(v2_perm) + add_system_role(cls.SYSTEM_ROLES, V2role(str(role.uuid), True, frozenset(permission_list))) def v1_role_to_v2_bindings( diff --git a/rbac/migration_tool/utils.py b/rbac/migration_tool/utils.py index e09d24e8b..2cbbe5153 100644 --- a/rbac/migration_tool/utils.py +++ b/rbac/migration_tool/utils.py @@ -2,7 +2,7 @@ import json import logging -from typing import Tuple +from typing import Optional, Tuple import grpc from django.conf import settings @@ -55,13 +55,20 @@ def validate_and_create_obj_ref(obj_name: Tuple[str, str], obj_id): def create_relationship( - resource_name: Tuple[str, str], resource_id, subject_name: Tuple[str, str], subject_id, relation + resource_name: Tuple[str, str], + resource_id, + subject_name: Tuple[str, str], + subject_id, + relation, + subject_relation: Optional[str] = None, ): """Create a relationship between a resource and a subject.""" return common_pb2.Relationship( resource=validate_and_create_obj_ref(resource_name, resource_id), relation=relation, - subject=common_pb2.SubjectReference(subject=validate_and_create_obj_ref(subject_name, subject_id)), + subject=common_pb2.SubjectReference( + subject=validate_and_create_obj_ref(subject_name, subject_id), relation=subject_relation + ), ) diff --git a/tests/management/group/test_view.py b/tests/management/group/test_view.py index 9464b532a..de7e41cd9 100644 --- a/tests/management/group/test_view.py +++ b/tests/management/group/test_view.py @@ -30,9 +30,20 @@ from api.models import Tenant, User from management.cache import TenantCache from management.group.serializer import GroupInputSerializer -from management.models import Access, Group, Permission, Principal, Policy, Role, ExtRoleRelation, ExtTenant +from management.models import ( + Access, + BindingMapping, + Group, + Permission, + Principal, + Policy, + Role, + ExtRoleRelation, + ExtTenant, +) from tests.core.test_kafka import copy_call_args from tests.identity_request import IdentityRequest +from tests.management.role.test_view import find_in_list, relation_api_tuple def generate_relation_entry(group_uuid, principal_uuid): @@ -55,6 +66,14 @@ def generate_relation_entry(group_uuid, principal_uuid): return relation_entry +def replication_event(relations_to_add, relations_to_remove): + """Create a replication event for a v1 role.""" + return { + "relations_to_add": relations_to_add, + "relations_to_remove": relations_to_remove, + } + + def generate_replication_event_to_add_principals(group_uuid, principal_uuid): return {"relations_to_add": [generate_relation_entry(group_uuid, principal_uuid)], "relations_to_remove": []} @@ -63,6 +82,17 @@ def generate_replication_event_to_remove_principals(group_uuid, principal_uuid): return {"relations_to_add": [], "relations_to_remove": [generate_relation_entry(group_uuid, principal_uuid)]} +def find_relation_in_list(relation_list, relation_tuple): + return find_in_list( + relation_list, + lambda r: r["resource"]["type"]["name"] == relation_tuple["resource"]["type"]["name"] + and r["resource"]["id"] == relation_tuple["resource"]["id"] + and r["relation"] == relation_tuple["relation"] + and r["subject"]["subject"]["type"]["name"] == relation_tuple["subject"]["subject"]["type"]["name"] + and r["subject"]["subject"]["id"] == relation_tuple["subject"]["subject"]["id"], + ) + + class GroupViewsetTests(IdentityRequest): """Test the group viewset.""" @@ -1590,7 +1620,8 @@ def test_principal_get_ordering_nonusername_fail(self, mock_request): self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual(principals, None) - def test_add_group_roles_system_policy_create_success(self): + @patch("management.group.relation_api_dual_write_group_handler.OutboxReplicator._save_replication_event") + def test_add_group_roles_system_policy_create_success(self, mock_method): """Test that adding a role to a group without a system policy returns successfully.""" url = reverse("group-roles", kwargs={"uuid": self.group.uuid}) client = APIClient() @@ -2910,6 +2941,7 @@ def test_add_group_roles_as_non_admin(self): test_data = {"roles": [self.roleB.uuid, self.dummy_role_id]} response = client.post(url, test_data, format="json", **self.headers) + self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) def test_remove_group_role_as_non_admin(self): @@ -3584,6 +3616,138 @@ def test_update_group_with_User_Access_Admin_success(self): self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.json()["name"], new_name_sa) + @patch("management.group.relation_api_dual_write_group_handler.OutboxReplicator._save_replication_event") + def test_add_and_remove_role_to_group(self, mock_method): + Permission.objects.create(permission="app:inventory:read", tenant=self.tenant) + + access_data = [ + { + "permission": "app:inventory:read", + "resourceDefinitions": [ + {"attributeFilter": {"key": "group.id", "operation": "equal", "value": "111"}} + ], + } + ] + + test_data = {"name": "role_name", "display_name": "role_display", "access": access_data} + + url = reverse("role-list") + # create a role + client = APIClient() + response = client.post(url, test_data, format="json", **self.headers_org_admin) + role = Role.objects.get(uuid=response.data["uuid"]) + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + + binding_mapping = BindingMapping.objects.get(role=role, resource_type_name="workspace", resource_id="111") + + # Create a group and role we need for the test + group = Group.objects.create(name="test group", tenant=self.tenant) + request_body = {"roles": [role.uuid]} + url = reverse("group-roles", kwargs={"uuid": group.uuid}) + client = APIClient() + + response = client.post(url, request_body, format="json", **self.headers_org_admin) + self.assertEqual(response.status_code, status.HTTP_200_OK) + + actual_call_arg = mock_method.call_args[0][0] + to_add = actual_call_arg["relations_to_add"] + self.assertEqual(1, len(to_add)) + + def assert_group_tuples(tuple_to_replicate): + relation_tuple = relation_api_tuple( + "role_binding", binding_mapping.mappings["id"], "subject", "group", str(group.uuid), "member" + ) + + self.assertIsNotNone(find_relation_in_list(tuple_to_replicate, relation_tuple)) + + assert_group_tuples(to_add) + + url = reverse("group-roles", kwargs={"uuid": group.uuid}) + client = APIClient() + + url = "{}?roles={}".format(url, role.uuid) + response = client.delete(url, format="json", **self.headers_org_admin) + actual_call_arg = mock_method.call_args[0][0] + to_remove = actual_call_arg["relations_to_remove"] + self.assertEqual([], actual_call_arg["relations_to_add"]) + self.assertEqual(1, len(to_remove)) + + assert_group_tuples(to_remove) + self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) + + @patch("management.group.relation_api_dual_write_group_handler.OutboxReplicator._save_replication_event") + def test_add_and_remove_system_role_to_group(self, mock_method): + # Create a group with 'User Access administrator' role and add principals we use in headers + group_with_admin = self._create_group_with_user_access_administrator_role(self.tenant) + group_with_admin.principals.add(self.user_based_principal, self.service_account_principal) + + # Create another group with 'User Access administrator' role we will try to update + test_group = Group(name="test group", tenant=self.tenant) + test_group.save() + + user_access_admin_role = group_with_admin.roles()[0] + request_body = {"roles": [user_access_admin_role.uuid]} + + url = reverse("group-roles", kwargs={"uuid": test_group.uuid}) + client = APIClient() + response = client.post(url, request_body, format="json", **self.headers_org_admin) + + binding_mapping = BindingMapping.objects.filter( + role=user_access_admin_role, resource_id=user_access_admin_role.tenant.org_id + ).get() + + actual_call_arg = mock_method.call_args[0][0] + to_add = actual_call_arg["relations_to_add"] + self.assertEqual([], actual_call_arg["relations_to_remove"]) + self.assertEqual(3, len(to_add)) + + def assert_group_tuples(tuple_to_replicate): + relation_tuple = relation_api_tuple( + "role_binding", + binding_mapping.mappings["id"], + "granted", + "role", + str(user_access_admin_role.uuid), + ) + self.assertIsNotNone(find_relation_in_list(tuple_to_replicate, relation_tuple)) + + relation_tuple = relation_api_tuple( + "role_binding", + binding_mapping.mappings["id"], + "subject", + "group", + str(test_group.uuid), + "member", + ) + + self.assertIsNotNone(find_relation_in_list(tuple_to_replicate, relation_tuple)) + + relation_tuple = relation_api_tuple( + "workspace", + test_group.tenant.org_id, + "user_grant", + "role_binding", + str(binding_mapping.mappings["id"]), + ) + + self.assertIsNotNone(find_relation_in_list(tuple_to_replicate, relation_tuple)) + + assert_group_tuples(to_add) + self.assertEqual(response.status_code, status.HTTP_200_OK) + + url = reverse("group-roles", kwargs={"uuid": test_group.uuid}) + client = APIClient() + + url = "{}?roles={}".format(url, user_access_admin_role.uuid) + response = client.delete(url, format="json", **self.headers_org_admin) + actual_call_arg = mock_method.call_args[0][0] + to_remove = actual_call_arg["relations_to_remove"] + self.assertEqual([], actual_call_arg["relations_to_add"]) + self.assertEqual(3, len(to_remove)) + + assert_group_tuples(to_remove) + self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) + def test_update_group_with_User_Access_Admin_fail(self): """ Test that non org admin with 'User Access administrator' role cannot update a group diff --git a/tests/management/role/test_dual_write.py b/tests/management/role/test_dual_write.py index 5d7d06653..161043a35 100644 --- a/tests/management/role/test_dual_write.py +++ b/tests/management/role/test_dual_write.py @@ -16,16 +16,19 @@ # """Test tuple changes for RBAC operations.""" -from typing import Tuple -import unittest +from typing import Optional, Tuple from django.test import TestCase, override_settings +from django.db.models import Q from management.group.model import Group from management.group.relation_api_dual_write_group_handler import RelationApiDualWriteGroupHandler from management.permission.model import Permission from management.policy.model import Policy from management.principal.model import Principal -from management.role.model import Access, ResourceDefinition, Role -from management.role.relation_api_dual_write_handler import RelationApiDualWriteHandler, ReplicationEventType +from management.role.model import Access, ResourceDefinition, Role, BindingMapping +from management.role.relation_api_dual_write_handler import ( + RelationApiDualWriteHandler, + ReplicationEventType, +) from migration_tool.in_memory_tuples import ( InMemoryRelationReplicator, InMemoryTuples, @@ -59,11 +62,25 @@ def setUp(self): self.tuples = InMemoryTuples() self.fixture = RbacFixture() self.tenant = self.fixture.new_tenant(name="tenant", org_id="1234567") + self.test_tenant = self.tenant + + def switch_to_new_tenant(self, name: str, org_id: str) -> Tenant: + """Switch to a new tenant with the given name and org_id.""" + tenant = self.fixture.new_tenant(name=name, org_id=org_id) + self.tenant = tenant + return tenant + + def switch_tenant(self, tenant: Tenant): + self.tenant = tenant + + def restore_test_tenant(self): + self.tenant = self.test_tenant - def default_workspace(self) -> str: + def default_workspace(self, tenant: Optional[Tenant] = None) -> str: """Return the default workspace ID.""" - assert self.tenant.org_id is not None, "Tenant org_id should not be None" - return self.tenant.org_id + tenant = tenant if tenant is not None else self.tenant + assert tenant.org_id is not None, "Tenant org_id should not be None" + return tenant.org_id def dual_write_handler(self, role: Role, event_type: ReplicationEventType) -> RelationApiDualWriteHandler: """Create a RelationApiDualWriteHandler for the given role and event type.""" @@ -80,8 +97,10 @@ def dual_write_handler_for_system_role( def given_v1_system_role(self, name: str, permissions: list[str]) -> Role: """Create a new system role with the given ID and permissions.""" role = self.fixture.new_system_role(name=name, permissions=permissions) - # TODO: Need to replicate system role permission relations - # This is different from group assignment + dual_write = self.dual_write_handler_for_system_role( + role, self.tenant, ReplicationEventType.CREATE_SYSTEM_ROLE + ) + dual_write.replicate_new_system_role_permissions(role) return role def given_v1_role(self, name: str, default: list[str], **kwargs: list[str]) -> Role: @@ -159,10 +178,37 @@ def given_removed_group_members( dual_write.replicate_removed_principals() return principals - def given_policy(self, group: Group, roles: list[Role]) -> Policy: + def given_roles_assigned_to_group(self, group: Group, roles: list[Role]) -> Policy: """Assign the [roles] to the [group].""" - # TODO: replicate role assignment - return self.fixture.add_role_to_group(roles[0], group, self.tenant) + assert roles, "Roles must not be empty" + dual_write_handler = RelationApiDualWriteGroupHandler( + group, + ReplicationEventType.ASSIGN_ROLE, + [], + replicator=InMemoryRelationReplicator(self.tuples), + ) + policy: Policy + for role in roles: + policy = self.fixture.add_role_to_group(role, group, self.tenant) + dual_write_handler.replicate_added_role(role) + + return policy + + def given_roles_unassigned_from_group(self, group: Group, roles: list[Role]) -> Policy: + """Unassign the [roles] to the [group].""" + assert roles, "Roles must not be empty" + policy = self.fixture.remove_role_from_group(roles[0], group, self.tenant) + dual_write_handler = RelationApiDualWriteGroupHandler( + group, + ReplicationEventType.UNASSIGN_ROLE, + [], + replicator=InMemoryRelationReplicator(self.tuples), + ) + policy: Policy + for role in roles: + policy = self.fixture.remove_role_from_group(role, group, self.tenant) + dual_write_handler.replicate_removed_role(role) + return policy def expect_1_v2_role_with_permissions(self, permissions: list[str]) -> str: """Assert there is a role matching the given permissions and return its ID.""" @@ -205,6 +251,12 @@ def expect_num_role_bindings(self, num: int): def expect_1_role_binding_to_workspace(self, workspace: str, for_v2_roles: list[str], for_groups: list[str]): """Assert there is a role binding with the given roles and groups.""" + self.expect_role_bindings_to_workspace(1, workspace, for_v2_roles, for_groups) + + def expect_role_bindings_to_workspace( + self, num: int, workspace: str, for_v2_roles: list[str], for_groups: list[str] + ): + """Assert there is [num] role bindings with the given roles and groups.""" # Find all bindings for the given workspace resources = self.tuples.find_tuples_grouped( all_of(resource("rbac", "workspace", workspace), relation("user_grant")), @@ -227,7 +279,7 @@ def expect_1_role_binding_to_workspace(self, workspace: str, for_v2_roles: list[ all_of( resource_type("rbac", "role_binding"), relation("subject"), - subject("rbac", "group", group_id), + subject("rbac", "group", group_id, "member"), ) for group_id in for_groups ], @@ -239,7 +291,7 @@ def expect_1_role_binding_to_workspace(self, workspace: str, for_v2_roles: list[ num_role_bindings = len(role_bindings) self.assertEqual( num_role_bindings, - 1, + num, f"Expected exactly 1 role binding against workspace {workspace} " f"with roles {for_v2_roles} and groups {for_groups}, " f"but got {len(role_bindings)}.\n" @@ -276,10 +328,68 @@ def test_update_group_tuples(self): self.assertEquals({t.subject_id for t in tuples}, {str(p.uuid) for p in principals}) +class DualWriteGroupRolesTestCase(DualWriteTestCase): + """Test case for verifying the dual write functionality for group role assignments.""" + + def test_custom_roles_group_assignments_tuples(self): + role_1 = self.given_v1_role( + "r1", + default=["app1:hosts:read", "inventory:hosts:write"], + ws_2=["app1:hosts:read", "inventory:hosts:write"], + ) + + role_2 = self.given_v1_role( + "r2", + default=["app2:hosts:read", "inventory:systems:write"], + ws_2=["app2:hosts:read", "inventory:systems:write"], + ) + group, _ = self.given_group("g1", []) + + self.given_roles_assigned_to_group(group, roles=[role_1, role_2]) + + mappings = BindingMapping.objects.filter(Q(role=role_1) | Q(role=role_2)).values_list("mappings", flat=True) + + tuples = self.tuples.find_tuples( + all_of( + resource_type("rbac", "role_binding"), + relation("subject"), + subject_type("rbac", "group", "member"), + ) + ) + + self.assertEquals(len(tuples), 4) + for mapping in mappings: + for group_from_mapping in mapping["groups"]: + tuples = self.tuples.find_tuples( + all_of( + resource("rbac", "role_binding", mapping["id"]), + relation("subject"), + subject("rbac", "group", group_from_mapping, "member"), + ) + ) + self.assertEquals(len(tuples), 1) + self.assertEquals(tuples[0].subject_id, mapping["groups"][0]) + + self.given_roles_unassigned_from_group(group, [role_1, role_2]) + + mappings = BindingMapping.objects.filter(role=role_2).all() + for m in mappings: + self.assertEquals(m.mappings["groups"], []) + + tuples = self.tuples.find_tuples( + all_of( + resource_type("rbac", "role_binding"), + relation("subject"), + subject_type("rbac", "group"), + ) + ) + + self.assertEquals(len(tuples), 0) + + class DualWriteSystemRolesTestCase(DualWriteTestCase): """Test dual write logic for system roles.""" - @unittest.skip("Not implemented yet") def test_system_role_grants_access_to_default_workspace(self): """Create role binding only when system role is bound to group.""" role = self.given_v1_system_role("r1", ["app1:hosts:read", "inventory:hosts:write"]) @@ -287,12 +397,91 @@ def test_system_role_grants_access_to_default_workspace(self): self.expect_num_role_bindings(0) - self.given_policy(group, roles=[role]) + self.given_roles_assigned_to_group(group, roles=[role]) id = self.expect_1_v2_role_with_permissions(["app1:hosts:read", "inventory:hosts:write"]) - self.expect_1_role_binding_to_workspace(self.default_workspace(), for_v2_roles=[id], for_groups=[group.id]) + self.expect_1_role_binding_to_workspace( + self.default_workspace(), for_v2_roles=[id], for_groups=[str(group.uuid)] + ) self.expect_num_role_bindings(1) + def test_unassign_system_role_removes_role_binding_if_unassigned(self): + """Remove role binding when system role is unbound from group.""" + role = self.given_v1_system_role("r1", ["app1:hosts:read", "inventory:hosts:write"]) + group, _ = self.given_group("g1", ["u1", "u2"]) + + self.given_roles_assigned_to_group(group, roles=[role]) + + id = self.expect_1_v2_role_with_permissions(["app1:hosts:read", "inventory:hosts:write"]) + self.expect_1_role_binding_to_workspace( + self.default_workspace(), for_v2_roles=[id], for_groups=[str(group.uuid)] + ) + + self.given_roles_unassigned_from_group(group, roles=[role]) + self.expect_num_role_bindings(0) + + def test_unassign_system_role_keeps_role_binding_if_still_assigned(self): + """Keep the role binding if it still has other groups assigned to it.""" + role = self.given_v1_system_role("r1", ["app1:hosts:read", "inventory:hosts:write"]) + g1, _ = self.given_group("g1", ["u1", "u2"]) + g2, _ = self.given_group("g2", ["u1", "u2"]) + + self.expect_num_role_bindings(0) + + self.given_roles_assigned_to_group(g1, roles=[role]) + self.given_roles_assigned_to_group(g2, roles=[role]) + + id = self.expect_1_v2_role_with_permissions(["app1:hosts:read", "inventory:hosts:write"]) + self.expect_1_role_binding_to_workspace( + self.default_workspace(), for_v2_roles=[id], for_groups=[str(g1.uuid), str(g2.uuid)] + ) + + self.given_roles_unassigned_from_group(g1, roles=[role]) + + self.expect_1_role_binding_to_workspace(self.default_workspace(), for_v2_roles=[id], for_groups=[str(g2.uuid)]) + + def test_assignment_is_tenant_specific(self): + """System role assignments are tenant-specific despite using the same role.""" + role = self.given_v1_system_role("r1", ["app1:hosts:read", "inventory:hosts:write"]) + g1, _ = self.given_group("g1", ["u1", "u2"]) + self.given_roles_assigned_to_group(g1, roles=[role]) + + t2 = self.switch_to_new_tenant("tenant2", "7654321") + g2, _ = self.given_group("g2", ["u1", "u2"]) + self.given_roles_assigned_to_group(g2, roles=[role]) + + id = self.expect_1_v2_role_with_permissions(["app1:hosts:read", "inventory:hosts:write"]) + + self.expect_1_role_binding_to_workspace( + self.default_workspace(self.test_tenant), for_v2_roles=[id], for_groups=[str(g1.uuid)] + ) + self.expect_1_role_binding_to_workspace( + self.default_workspace(t2), for_v2_roles=[id], for_groups=[str(g2.uuid)] + ) + + def test_unassign_role_is_tenant_specific(self): + """System role unassignments are tenant-specific despite using the same role.""" + role = self.given_v1_system_role("r1", ["app1:hosts:read", "inventory:hosts:write"]) + g1, _ = self.given_group("g1", ["u1", "u2"]) + self.given_roles_assigned_to_group(g1, roles=[role]) + + t2 = self.switch_to_new_tenant("tenant2", "7654321") + g2, _ = self.given_group("g2", ["u1", "u2"]) + self.given_roles_assigned_to_group(g2, roles=[role]) + + self.given_roles_unassigned_from_group(g1, roles=[role]) + + id = self.expect_1_v2_role_with_permissions(["app1:hosts:read", "inventory:hosts:write"]) + + self.expect_role_bindings_to_workspace( + 0, self.default_workspace(self.test_tenant), for_v2_roles=[id], for_groups=[str(g1.uuid)] + ) + self.expect_1_role_binding_to_workspace( + self.default_workspace(t2), for_v2_roles=[id], for_groups=[str(g2.uuid)] + ) + + # TODO: Add test to cover updating and deleting system role + class DualWriteCustomRolesTestCase(DualWriteTestCase): """Test dual write logic when we are working with custom roles.""" @@ -306,12 +495,13 @@ def test_role_with_same_default_and_resource_permission_reuses_same_v2_role(self ) group, _ = self.given_group("g1", ["u1", "u2"]) - self.given_policy(group, roles=[role]) + self.given_roles_assigned_to_group(group, roles=[role]) id = self.expect_1_v2_role_with_permissions(["app1:hosts:read", "inventory:hosts:write"]) - # TODO: assert group once group replication is implemented - self.expect_1_role_binding_to_workspace(self.default_workspace(), for_v2_roles=[id], for_groups=[]) - self.expect_1_role_binding_to_workspace("ws_2", for_v2_roles=[id], for_groups=[]) + self.expect_1_role_binding_to_workspace( + self.default_workspace(), for_v2_roles=[id], for_groups=[str(group.uuid)] + ) + self.expect_1_role_binding_to_workspace("ws_2", for_v2_roles=[id], for_groups=[str(group.uuid)]) def test_add_permissions_to_role(self): """Modify the role in place when adding permissions.""" @@ -327,16 +517,18 @@ def test_add_permissions_to_role(self): ws_2=["app1:hosts:read", "inventory:hosts:write", "app2:hosts:read"], ) + group, _ = self.given_group("g1", ["u1"]) + self.given_roles_assigned_to_group(group, [role]) + role_for_default = self.expect_1_v2_role_with_permissions(["app1:hosts:read", "inventory:hosts:write"]) role_for_ws_2 = self.expect_1_v2_role_with_permissions( ["app1:hosts:read", "inventory:hosts:write", "app2:hosts:read"] ) - # TODO: assert group once group replication is implemented self.expect_1_role_binding_to_workspace( - self.default_workspace(), for_v2_roles=[role_for_default], for_groups=[] + self.default_workspace(), for_v2_roles=[role_for_default], for_groups=[str(group.uuid)] ) - self.expect_1_role_binding_to_workspace("ws_2", for_v2_roles=[role_for_ws_2], for_groups=[]) + self.expect_1_role_binding_to_workspace("ws_2", for_v2_roles=[role_for_ws_2], for_groups=[str(group.uuid)]) def test_remove_permissions_from_role(self): """Modify the role in place when removing permissions.""" @@ -352,14 +544,16 @@ def test_remove_permissions_from_role(self): ws_2=["app1:hosts:read"], ) + group, _ = self.given_group("g1", ["u1"]) + self.given_roles_assigned_to_group(group, [role]) + role_for_default = self.expect_1_v2_role_with_permissions(["app1:hosts:read", "inventory:hosts:write"]) role_for_ws_2 = self.expect_1_v2_role_with_permissions(["app1:hosts:read"]) - # TODO: assert group once group replication is implemented self.expect_1_role_binding_to_workspace( - self.default_workspace(), for_v2_roles=[role_for_default], for_groups=[] + self.default_workspace(), for_v2_roles=[role_for_default], for_groups=[str(group.uuid)] ) - self.expect_1_role_binding_to_workspace("ws_2", for_v2_roles=[role_for_ws_2], for_groups=[]) + self.expect_1_role_binding_to_workspace("ws_2", for_v2_roles=[role_for_ws_2], for_groups=[str(group.uuid)]) def test_remove_permissions_from_role_back_to_original(self): """Modify the role in place when removing permissions, consolidating roles.""" @@ -396,8 +590,8 @@ def test_add_resource_uses_existing_groups(self): g1, _ = self.given_group("g2", ["u2"]) g2, _ = self.given_group("g1", ["u1"]) - self.given_policy(g1, roles=[role]) - self.given_policy(g2, roles=[role]) + self.given_roles_assigned_to_group(g1, roles=[role]) + self.given_roles_assigned_to_group(g2, roles=[role]) self.given_update_to_v1_role( role, @@ -453,6 +647,27 @@ def test_two_roles_with_same_resource_permissions_create_two_v2_roles(self): self.expect_1_role_binding_to_workspace("ws_2", for_v2_roles=[roles[0]], for_groups=[]) self.expect_1_role_binding_to_workspace("ws_2", for_v2_roles=[roles[1]], for_groups=[]) + def test_unassigned_role_keeps_role_binding(self): + """Unassigning a role from a group does not remove the role binding.""" + role = self.given_v1_role( + "r1", + default=["app1:hosts:read", "inventory:hosts:write"], + ws_2=["app1:hosts:read", "inventory:hosts:write"], + ) + + group, _ = self.given_group("g1", ["u1"]) + self.given_roles_assigned_to_group(group, roles=[role]) + + id = self.expect_1_v2_role_with_permissions(["app1:hosts:read", "inventory:hosts:write"]) + + self.expect_1_role_binding_to_workspace( + self.default_workspace(), for_v2_roles=[id], for_groups=[str(group.uuid)] + ) + + self.given_roles_unassigned_from_group(group, roles=[role]) + + self.expect_1_role_binding_to_workspace(self.default_workspace(), for_v2_roles=[id], for_groups=[]) + class RbacFixture: """RBAC Fixture.""" @@ -529,11 +744,22 @@ def new_group( def add_role_to_group(self, role: Role, group: Group, tenant: Tenant) -> Policy: """Add a role to a group for a given tenant and return the policy.""" - policy, _ = Policy.objects.get_or_create(name=f"System Policy_{group.name}", group=group, tenant=tenant) + policy, _ = Policy.objects.get_or_create( + name=f"System Policy_{group.name}_{tenant.tenant_name}", group=group, tenant=tenant + ) policy.roles.add(role) policy.save() return policy + def remove_role_from_group(self, role: Role, group: Group, tenant: Tenant) -> Policy: + """Remove a role to a group for a given tenant and return the policy.""" + policy, _ = Policy.objects.get_or_create( + name=f"System Policy_{group.name}_{tenant.tenant_name}", group=group, tenant=tenant + ) + policy.roles.remove(role) + policy.save() + return policy + def add_members_to_group( self, group: Group, users: list[str], service_accounts: list[str], principal_tenant: Tenant ) -> list[Principal]: diff --git a/tests/management/role/test_view.py b/tests/management/role/test_view.py index d17fd5c2f..5f2954193 100644 --- a/tests/management/role/test_view.py +++ b/tests/management/role/test_view.py @@ -88,15 +88,22 @@ def relation_api_tuples_for_v1_role(v1_role_uuid, default_workspace_uuid): return relations -def relation_api_tuple(resource_type, resource_id, relation, subject_type, subject_id): +def relation_api_tuple(resource_type, resource_id, relation, subject_type, subject_id, subject_relation=None): """Helper function for creating a relation tuple in json.""" return { "resource": relation_api_resource(resource_type, resource_id), "relation": relation, - "subject": {"subject": relation_api_resource(subject_type, subject_id)}, + "subject": relation_api_subject(subject_type, subject_id, subject_relation), } +def relation_api_subject(subject_type, subject_id, subject_relation=None): + subject = {"subject": relation_api_resource(subject_type, subject_id)} + if subject_relation is not None: + subject["relation"] = subject_relation + return subject + + def relation_api_resource(type_resource, id_resource): """Helper function for creating a relation resource in json.""" return {"type": {"namespace": "rbac", "name": type_resource}, "id": id_resource} From 21563799ffbaef4a00683aa6b1cec65791195b0d Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Thu, 3 Oct 2024 15:32:03 +0200 Subject: [PATCH 22/55] Fix migration names and conflicts --- ..._and_more.py => 0052_remove_rolemapping_v1_role_and_more.py} | 2 +- .../{0052_bindingmapping.py => 0053_bindingmapping.py} | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) rename rbac/management/migrations/{0051_remove_rolemapping_v1_role_and_more.py => 0052_remove_rolemapping_v1_role_and_more.py} (91%) rename rbac/management/migrations/{0052_bindingmapping.py => 0053_bindingmapping.py} (94%) diff --git a/rbac/management/migrations/0051_remove_rolemapping_v1_role_and_more.py b/rbac/management/migrations/0052_remove_rolemapping_v1_role_and_more.py similarity index 91% rename from rbac/management/migrations/0051_remove_rolemapping_v1_role_and_more.py rename to rbac/management/migrations/0052_remove_rolemapping_v1_role_and_more.py index a65e6eb31..848d271d5 100644 --- a/rbac/management/migrations/0051_remove_rolemapping_v1_role_and_more.py +++ b/rbac/management/migrations/0052_remove_rolemapping_v1_role_and_more.py @@ -6,7 +6,7 @@ class Migration(migrations.Migration): dependencies = [ - ("management", "0050_principal_user_id_alter_principal_type"), + ("management", "0051_alter_principal_user_id"), ] operations = [ diff --git a/rbac/management/migrations/0052_bindingmapping.py b/rbac/management/migrations/0053_bindingmapping.py similarity index 94% rename from rbac/management/migrations/0052_bindingmapping.py rename to rbac/management/migrations/0053_bindingmapping.py index e17eaa8ad..593e61fe3 100644 --- a/rbac/management/migrations/0052_bindingmapping.py +++ b/rbac/management/migrations/0053_bindingmapping.py @@ -7,7 +7,7 @@ class Migration(migrations.Migration): dependencies = [ - ("management", "0051_remove_rolemapping_v1_role_and_more"), + ("management", "0052_remove_rolemapping_v1_role_and_more"), ] operations = [ From 4bc84752e5551ca483447b7eccc62295229463de Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Thu, 3 Oct 2024 15:57:00 -0400 Subject: [PATCH 23/55] Update relations and types for latest schema (#1221) * Update relations and types for latest schema * Make my daily plea offering to the linter gods --- .../relation_api_dual_write_group_handler.py | 2 +- .../role/relation_api_dual_write_handler.py | 2 +- rbac/migration_tool/migrate.py | 20 +++++++++--- rbac/migration_tool/models.py | 8 ++--- tests/management/group/test_view.py | 6 ++-- tests/management/role/test_dual_write.py | 15 +++------ tests/management/role/test_view.py | 8 ++--- tests/migration_tool/tests_migrate.py | 32 +++++++++---------- 8 files changed, 48 insertions(+), 45 deletions(-) diff --git a/rbac/management/group/relation_api_dual_write_group_handler.py b/rbac/management/group/relation_api_dual_write_group_handler.py index 5fabd9269..d8c5ea997 100644 --- a/rbac/management/group/relation_api_dual_write_group_handler.py +++ b/rbac/management/group/relation_api_dual_write_group_handler.py @@ -68,7 +68,7 @@ def _generate_relations(self): for principal in self.principals: relations.append( create_relationship( - ("rbac", "group"), str(self.group.uuid), ("rbac", "user"), str(principal.uuid), "member" + ("rbac", "group"), str(self.group.uuid), ("rbac", "principal"), str(principal.uuid), "member" ) ) diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py index e18563834..e30f16f97 100644 --- a/rbac/management/role/relation_api_dual_write_handler.py +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -330,6 +330,6 @@ def replicate_new_system_role_permissions(self, role: Role): for permission in permissions: self.role_relations.append( - create_relationship(("rbac", "role"), str(role.uuid), ("rbac", "user"), str("*"), permission) + create_relationship(("rbac", "role"), str(role.uuid), ("rbac", "principal"), str("*"), permission) ) self._replicate() diff --git a/rbac/migration_tool/migrate.py b/rbac/migration_tool/migrate.py index 7a45e9c43..43d88e514 100644 --- a/rbac/migration_tool/migrate.py +++ b/rbac/migration_tool/migrate.py @@ -95,9 +95,11 @@ def migrate_workspace(tenant: Tenant, write_relationships: bool): create_relationship(("rbac", "workspace"), tenant.org_id, ("rbac", "workspace"), root_workspace, "parent"), create_relationship(("rbac", "workspace"), root_workspace, ("rbac", "tenant"), tenant.org_id, "parent"), ] - # Include realm for tenant + # Include platform for tenant relationships.append( - create_relationship(("rbac", "tenant"), str(tenant.org_id), ("rbac", "realm"), settings.ENV_NAME, "realm") + create_relationship( + ("rbac", "tenant"), str(tenant.org_id), ("rbac", "platform"), settings.ENV_NAME, "platform" + ) ) output_relationships(relationships, write_relationships) return root_workspace, tenant.org_id @@ -106,7 +108,9 @@ def migrate_workspace(tenant: Tenant, write_relationships: bool): def migrate_users(tenant: Tenant, write_relationships: bool): """Write users relationship to tenant.""" relationships = [ - create_relationship(("rbac", "tenant"), str(tenant.org_id), ("rbac", "user"), str(principal.uuid), "member") + create_relationship( + ("rbac", "tenant"), str(tenant.org_id), ("rbac", "principal"), str(principal.uuid), "member" + ) for principal in tenant.principal_set.all() ] output_relationships(relationships, write_relationships) @@ -119,7 +123,9 @@ def migrate_users_for_groups(tenant: Tenant, write_relationships: bool): user_set = group.principals.all() for user in user_set: relationships.append( - create_relationship(("rbac", "group"), str(group.uuid), ("rbac", "user"), str(user.uuid), "member") + create_relationship( + ("rbac", "group"), str(group.uuid), ("rbac", "principal"), str(user.uuid), "member" + ) ) # Explicitly create relationships for platform default group group_default = tenant.group_set.filter(platform_default=True).first() @@ -127,7 +133,11 @@ def migrate_users_for_groups(tenant: Tenant, write_relationships: bool): group_default = Tenant.objects.get(tenant_name="public").group_set.get(platform_default=True) user_set = tenant.principal_set.filter(cross_account=False) for user in user_set: - relationships.append(create_relationship("group", str(group_default.uuid), "user", str(user.uuid), "member")) + relationships.append( + create_relationship( + ("rbac", "group"), str(group_default.uuid), ("rbac", "principal"), str(user.uuid), "member" + ) + ) output_relationships(relationships, write_relationships) diff --git a/rbac/migration_tool/models.py b/rbac/migration_tool/models.py index f6670fda7..84b89cdf4 100644 --- a/rbac/migration_tool/models.py +++ b/rbac/migration_tool/models.py @@ -104,12 +104,10 @@ def as_tuples(self): """Create tuples from V2rolebinding model.""" tuples: list[Relationship] = list() - tuples.append( - create_relationship(("rbac", "role_binding"), self.id, ("rbac", "role"), self.role.id, "granted") - ) + tuples.append(create_relationship(("rbac", "role_binding"), self.id, ("rbac", "role"), self.role.id, "role")) for perm in self.role.permissions: - tuples.append(create_relationship(("rbac", "role"), self.role.id, ("rbac", "user"), "*", perm)) + tuples.append(create_relationship(("rbac", "role"), self.role.id, ("rbac", "principal"), "*", perm)) for group in self.groups: # These might be duplicate but it is OK, spiceDB will handle duplication through touch @@ -121,7 +119,7 @@ def as_tuples(self): self.resource.resource_id, ("rbac", "role_binding"), self.id, - "user_grant", + "binding", ) ) diff --git a/tests/management/group/test_view.py b/tests/management/group/test_view.py index de7e41cd9..ef393864d 100644 --- a/tests/management/group/test_view.py +++ b/tests/management/group/test_view.py @@ -60,7 +60,7 @@ def generate_relation_entry(group_uuid, principal_uuid): relation_entry["subject"]["subject"] = {} relation_entry["subject"]["subject"]["type"] = {} relation_entry["subject"]["subject"]["type"]["namespace"] = "rbac" - relation_entry["subject"]["subject"]["type"]["name"] = "user" + relation_entry["subject"]["subject"]["type"]["name"] = "principal" relation_entry["subject"]["subject"]["id"] = principal_uuid return relation_entry @@ -3705,7 +3705,7 @@ def assert_group_tuples(tuple_to_replicate): relation_tuple = relation_api_tuple( "role_binding", binding_mapping.mappings["id"], - "granted", + "role", "role", str(user_access_admin_role.uuid), ) @@ -3725,7 +3725,7 @@ def assert_group_tuples(tuple_to_replicate): relation_tuple = relation_api_tuple( "workspace", test_group.tenant.org_id, - "user_grant", + "binding", "role_binding", str(binding_mapping.mappings["id"]), ) diff --git a/tests/management/role/test_dual_write.py b/tests/management/role/test_dual_write.py index 161043a35..abd066771 100644 --- a/tests/management/role/test_dual_write.py +++ b/tests/management/role/test_dual_write.py @@ -144,10 +144,9 @@ def given_group( dual_write = RelationApiDualWriteGroupHandler( group, ReplicationEventType.CREATE_GROUP, - principals, replicator=InMemoryRelationReplicator(self.tuples), ) - dual_write.replicate_new_principals() + dual_write.replicate_new_principals(principals) return group, principals def given_additional_group_members( @@ -158,10 +157,9 @@ def given_additional_group_members( dual_write = RelationApiDualWriteGroupHandler( group, ReplicationEventType.CREATE_GROUP, - principals, replicator=InMemoryRelationReplicator(self.tuples), ) - dual_write.replicate_new_principals() + dual_write.replicate_new_principals(principals) return principals def given_removed_group_members( @@ -172,10 +170,9 @@ def given_removed_group_members( dual_write = RelationApiDualWriteGroupHandler( group, ReplicationEventType.CREATE_GROUP, - principals, replicator=InMemoryRelationReplicator(self.tuples), ) - dual_write.replicate_removed_principals() + dual_write.replicate_removed_principals(principals) return principals def given_roles_assigned_to_group(self, group: Group, roles: list[Role]) -> Policy: @@ -184,7 +181,6 @@ def given_roles_assigned_to_group(self, group: Group, roles: list[Role]) -> Poli dual_write_handler = RelationApiDualWriteGroupHandler( group, ReplicationEventType.ASSIGN_ROLE, - [], replicator=InMemoryRelationReplicator(self.tuples), ) policy: Policy @@ -201,7 +197,6 @@ def given_roles_unassigned_from_group(self, group: Group, roles: list[Role]) -> dual_write_handler = RelationApiDualWriteGroupHandler( group, ReplicationEventType.UNASSIGN_ROLE, - [], replicator=InMemoryRelationReplicator(self.tuples), ) policy: Policy @@ -259,7 +254,7 @@ def expect_role_bindings_to_workspace( """Assert there is [num] role bindings with the given roles and groups.""" # Find all bindings for the given workspace resources = self.tuples.find_tuples_grouped( - all_of(resource("rbac", "workspace", workspace), relation("user_grant")), + all_of(resource("rbac", "workspace", workspace), relation("binding")), group_by=lambda t: (t.resource_type_namespace, t.resource_type_name, t.resource_id), ) @@ -270,7 +265,7 @@ def expect_role_bindings_to_workspace( all_of( resource_type("rbac", "role_binding"), one_of(*[resource_id(t.subject_id) for _, tuples in resources.items() for t in tuples]), - relation("granted"), + relation("role"), subject("rbac", "role", role_id), ) for role_id in for_v2_roles diff --git a/tests/management/role/test_view.py b/tests/management/role/test_view.py index 5f2954193..e9a4b4aea 100644 --- a/tests/management/role/test_view.py +++ b/tests/management/role/test_view.py @@ -71,19 +71,19 @@ def relation_api_tuples_for_v1_role(v1_role_uuid, default_workspace_uuid): mappings = BindingMapping.objects.filter(role=role_id).all() relations = [] for role_binding in [m.get_role_binding() for m in mappings]: - relation_tuple = relation_api_tuple("role_binding", role_binding.id, "granted", "role", role_binding.role.id) + relation_tuple = relation_api_tuple("role_binding", role_binding.id, "role", "role", role_binding.role.id) relations.append(relation_tuple) for permission in role_binding.role.permissions: - relation_tuple = relation_api_tuple("role", role_binding.role.id, permission, "user", "*") + relation_tuple = relation_api_tuple("role", role_binding.role.id, permission, "principal", "*") relations.append(relation_tuple) if "app_all_read" in role_binding.role.permissions: relation_tuple = relation_api_tuple( - "workspace", default_workspace_uuid, "user_grant", "role_binding", role_binding.id + "workspace", default_workspace_uuid, "binding", "role_binding", role_binding.id ) relations.append(relation_tuple) else: - relation_tuple = relation_api_tuple("keya/id", "valueA", "user_grant", "role_binding", role_binding.id) + relation_tuple = relation_api_tuple("keya/id", "valueA", "binding", "role_binding", role_binding.id) relations.append(relation_tuple) return relations diff --git a/tests/migration_tool/tests_migrate.py b/tests/migration_tool/tests_migrate.py index 8160faec3..a91eee24a 100644 --- a/tests/migration_tool/tests_migrate.py +++ b/tests/migration_tool/tests_migrate.py @@ -117,11 +117,11 @@ def test_migration_of_data(self, logger_mock): workspace_1 = "123456" workspace_2 = "654321" # Switch these two if rolebinding order is not the same as v2 roles - if call(f"role_binding:{rolebinding_a31}#granted@role:{v2_role_a31}") not in logger_mock.info.call_args_list: + if call(f"role_binding:{rolebinding_a31}#role@role:{v2_role_a31}") not in logger_mock.info.call_args_list: rolebinding_a31, rolebinding_a32 = rolebinding_a32, rolebinding_a31 # Switch these two if binding is not in correct order if ( - call(f"workspace:{self.workspace_id_1}#user_grant@role_binding:{rolebinding_a31}") + call(f"workspace:{self.workspace_id_1}#binding@role_binding:{rolebinding_a31}") not in logger_mock.info.call_args_list ): workspace_1, workspace_2 = workspace_2, workspace_1 @@ -133,27 +133,27 @@ def test_migration_of_data(self, logger_mock): call(f"workspace:{org_id}#parent@workspace:{root_workspace_id}"), call(f"workspace:{root_workspace_id}#parent@tenant:{org_id}"), ## Realm - call(f"tenant:{org_id}#realm@realm:stage"), + call(f"tenant:{org_id}#platform@platform:stage"), ## Users to tenant - call(f"tenant:{org_id}#member@user:{self.principal1.uuid}"), - call(f"tenant:{org_id}#member@user:{self.principal2.uuid}"), + call(f"tenant:{org_id}#member@principal:{self.principal1.uuid}"), + call(f"tenant:{org_id}#member@principal:{self.principal2.uuid}"), ## Group member - call(f"group:{self.group_a2.uuid}#member@user:{self.principal1.uuid}"), - call(f"group:{self.group_a2.uuid}#member@user:{self.principal2.uuid}"), + call(f"group:{self.group_a2.uuid}#member@principal:{self.principal1.uuid}"), + call(f"group:{self.group_a2.uuid}#member@principal:{self.principal2.uuid}"), ## Role binding to role_a2 - call(f"role_binding:{rolebinding_a2}#granted@role:{v2_role_a2}"), - call(f"role:{v2_role_a2}#inventory_hosts_write@user:*"), + call(f"role_binding:{rolebinding_a2}#role@role:{v2_role_a2}"), + call(f"role:{v2_role_a2}#inventory_hosts_write@principal:*"), call(f"role_binding:{rolebinding_a2}#subject@group:{self.group_a2.uuid}"), call(f"workspace:{self.workspace_id_1}#parent@workspace:{org_id}"), - call(f"workspace:{self.workspace_id_1}#user_grant@role_binding:{rolebinding_a2}"), + call(f"workspace:{self.workspace_id_1}#binding@role_binding:{rolebinding_a2}"), ## Role binding to role_a3 - call(f"role_binding:{rolebinding_a31}#granted@role:{v2_role_a31}"), - call(f"role:{v2_role_a31}#inventory_hosts_write@user:*"), + call(f"role_binding:{rolebinding_a31}#role@role:{v2_role_a31}"), + call(f"role:{v2_role_a31}#inventory_hosts_write@principal:*"), call(f"workspace:{workspace_1}#parent@workspace:{org_id}"), - call(f"workspace:{workspace_1}#user_grant@role_binding:{rolebinding_a31}"), - call(f"role_binding:{rolebinding_a32}#granted@role:{v2_role_a32}"), - call(f"role:{v2_role_a32}#inventory_hosts_write@user:*"), + call(f"workspace:{workspace_1}#binding@role_binding:{rolebinding_a31}"), + call(f"role_binding:{rolebinding_a32}#role@role:{v2_role_a32}"), + call(f"role:{v2_role_a32}#inventory_hosts_write@principal:*"), call(f"workspace:{workspace_2}#parent@workspace:{org_id}"), - call(f"workspace:{workspace_2}#user_grant@role_binding:{rolebinding_a32}"), + call(f"workspace:{workspace_2}#binding@role_binding:{rolebinding_a32}"), ] logger_mock.info.assert_has_calls(tuples, any_order=True) From d3b1f977b45347f3c79e7b752a7b2d1ecd53c274 Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Thu, 3 Oct 2024 16:38:01 -0400 Subject: [PATCH 24/55] Reorder migrations after merging in master --- ..._and_more.py => 0053_remove_rolemapping_v1_role_and_more.py} | 2 +- .../{0053_bindingmapping.py => 0054_bindingmapping.py} | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) rename rbac/management/migrations/{0052_remove_rolemapping_v1_role_and_more.py => 0053_remove_rolemapping_v1_role_and_more.py} (92%) rename rbac/management/migrations/{0053_bindingmapping.py => 0054_bindingmapping.py} (94%) diff --git a/rbac/management/migrations/0052_remove_rolemapping_v1_role_and_more.py b/rbac/management/migrations/0053_remove_rolemapping_v1_role_and_more.py similarity index 92% rename from rbac/management/migrations/0052_remove_rolemapping_v1_role_and_more.py rename to rbac/management/migrations/0053_remove_rolemapping_v1_role_and_more.py index 848d271d5..04de87ab6 100644 --- a/rbac/management/migrations/0052_remove_rolemapping_v1_role_and_more.py +++ b/rbac/management/migrations/0053_remove_rolemapping_v1_role_and_more.py @@ -6,7 +6,7 @@ class Migration(migrations.Migration): dependencies = [ - ("management", "0051_alter_principal_user_id"), + ("management", "0052_workspace_type_and_more"), ] operations = [ diff --git a/rbac/management/migrations/0053_bindingmapping.py b/rbac/management/migrations/0054_bindingmapping.py similarity index 94% rename from rbac/management/migrations/0053_bindingmapping.py rename to rbac/management/migrations/0054_bindingmapping.py index 593e61fe3..91b17e43b 100644 --- a/rbac/management/migrations/0053_bindingmapping.py +++ b/rbac/management/migrations/0054_bindingmapping.py @@ -7,7 +7,7 @@ class Migration(migrations.Migration): dependencies = [ - ("management", "0052_remove_rolemapping_v1_role_and_more"), + ("management", "0053_remove_rolemapping_v1_role_and_more"), ] operations = [ From 0291df2a94a2cd28ca4825ed9e8d34aa46988cb4 Mon Sep 17 00:00:00 2001 From: Keith Walsh Date: Thu, 3 Oct 2024 17:20:41 -0400 Subject: [PATCH 25/55] Create proper root/default workspaces on initial migration Instead of using `Tenant#org_id` for the `default` workspace, and "root-workspace-{tenant.org_id}" for the `root` workspace identifiers in relations, this gets or creates the `Workspace` objects and passes it along to grab the `str(uuid)` value from the record. --- rbac/migration_tool/migrate.py | 33 ++++++++++++------- ...sharedSystemRolesReplicatedRoleBindings.py | 9 +++-- tests/migration_tool/tests_migrate.py | 18 +++++++--- 3 files changed, 40 insertions(+), 20 deletions(-) diff --git a/rbac/migration_tool/migrate.py b/rbac/migration_tool/migrate.py index 43d88e514..8a0e2400e 100644 --- a/rbac/migration_tool/migrate.py +++ b/rbac/migration_tool/migrate.py @@ -20,6 +20,7 @@ from django.conf import settings from kessel.relations.v1beta1 import common_pb2 +from management.models import Workspace from management.role.model import BindingMapping, Role from migration_tool.models import V2rolebinding from migration_tool.sharedSystemRolesReplicatedRoleBindings import v1_role_to_v2_bindings @@ -33,7 +34,7 @@ def get_kessel_relation_tuples( v2_role_bindings: Iterable[V2rolebinding], - default_workspace: str, + default_workspace: Workspace, ) -> list[common_pb2.Relationship]: """Generate a set of relationships and BindingMappings for the given set of v2 role bindings.""" relationships: list[common_pb2.Relationship] = list() @@ -48,9 +49,8 @@ def get_kessel_relation_tuples( # All other resource-resource or resource-workspace relations # which may be implied or necessary are intentionally ignored. # These should come from the apps that own the resource. - if ( - bound_resource.resource_type == ("rbac", "workspace") - and not bound_resource.resource_id == default_workspace + if bound_resource.resource_type == ("rbac", "workspace") and not bound_resource.resource_id == str( + default_workspace.uuid ): # This is not strictly necessary here and the relation may be a duplicate. # Once we have more Workspace API / Inventory Group migration progress, @@ -61,7 +61,7 @@ def get_kessel_relation_tuples( bound_resource.resource_type, bound_resource.resource_id, ("rbac", "workspace"), - default_workspace, + str(default_workspace.uuid), "parent", ) ) @@ -72,7 +72,7 @@ def get_kessel_relation_tuples( def migrate_role( role: Role, write_relationships: bool, - default_workspace: str, + default_workspace: Workspace, current_bindings: Iterable[BindingMapping] = [], ) -> tuple[list[common_pb2.Relationship], list[BindingMapping]]: """ @@ -81,7 +81,7 @@ def migrate_role( The mappings are returned so that we can reconstitute the corresponding tuples for a given role. This is needed so we can remove those tuples when the role changes if needed. """ - v2_role_bindings = v1_role_to_v2_bindings(role, default_workspace, current_bindings) + v2_role_bindings = v1_role_to_v2_bindings(role, str(default_workspace.uuid), current_bindings) relationships = get_kessel_relation_tuples([m.get_role_binding() for m in v2_role_bindings], default_workspace) output_relationships(relationships, write_relationships) return relationships, v2_role_bindings @@ -89,11 +89,20 @@ def migrate_role( def migrate_workspace(tenant: Tenant, write_relationships: bool): """Migrate a workspace from v1 to v2.""" - root_workspace = f"root-workspace-{tenant.org_id}" - # Org id represents the default workspace for now + root_workspace, _ = Workspace.objects.get_or_create(tenant=tenant, type=Workspace.Types.ROOT) + default_workspace, _ = Workspace.objects.get_or_create(tenant=tenant, type=Workspace.Types.DEFAULT) + relationships = [ - create_relationship(("rbac", "workspace"), tenant.org_id, ("rbac", "workspace"), root_workspace, "parent"), - create_relationship(("rbac", "workspace"), root_workspace, ("rbac", "tenant"), tenant.org_id, "parent"), + create_relationship( + ("rbac", "workspace"), + str(default_workspace.uuid), + ("rbac", "workspace"), + str(root_workspace.uuid), + "parent", + ), + create_relationship( + ("rbac", "workspace"), str(root_workspace.uuid), ("rbac", "tenant"), tenant.org_id, "parent" + ), ] # Include platform for tenant relationships.append( @@ -102,7 +111,7 @@ def migrate_workspace(tenant: Tenant, write_relationships: bool): ) ) output_relationships(relationships, write_relationships) - return root_workspace, tenant.org_id + return root_workspace, default_workspace def migrate_users(tenant: Tenant, write_relationships: bool): diff --git a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py index 4f03a01ca..c1a0d0cfd 100644 --- a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py +++ b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py @@ -20,7 +20,7 @@ from typing import Any, Iterable, Optional, Tuple, Union from django.conf import settings -from management.models import BindingMapping +from management.models import BindingMapping, Workspace from management.permission.model import Permission from management.role.model import Role from migration_tool.ingest import add_element @@ -86,7 +86,7 @@ def set_system_role(cls, role): def v1_role_to_v2_bindings( v1_role: Role, - default_workspace: str, + default_workspace: Workspace, role_bindings: Iterable[BindingMapping], ) -> list[BindingMapping]: """Convert a V1 role to a set of V2 role bindings.""" @@ -130,7 +130,10 @@ def v1_role_to_v2_bindings( add_element(perm_groupings, V2boundresource(resource_type, resource_id), v2_perm, collection=set) if default: add_element( - perm_groupings, V2boundresource(("rbac", "workspace"), default_workspace), v2_perm, collection=set + perm_groupings, + V2boundresource(("rbac", "workspace"), str(default_workspace.uuid)), + v2_perm, + collection=set, ) # Project permission sets to roles per set of resources diff --git a/tests/migration_tool/tests_migrate.py b/tests/migration_tool/tests_migrate.py index a91eee24a..b4c0740da 100644 --- a/tests/migration_tool/tests_migrate.py +++ b/tests/migration_tool/tests_migrate.py @@ -37,6 +37,13 @@ def setUp(self): permission2 = Permission.objects.create(permission="inventory:hosts:write", tenant=public_tenant) # Two organization self.tenant = Tenant.objects.create(org_id="1234567", tenant_name="tenant") + self.root_workspace = Workspace.objects.create( + type=Workspace.Types.ROOT, tenant=self.tenant, name="Root Workspace" + ) + self.default_workspace = Workspace.objects.create( + type=Workspace.Types.DEFAULT, tenant=self.tenant, name="Default Workspace" + ) + another_tenant = Tenant.objects.create(org_id="7654321") # setup data for organization 1234567 @@ -95,7 +102,8 @@ def test_migration_of_data(self, logger_mock): migrate_data(**kwargs) org_id = self.tenant.org_id - root_workspace_id = f"root-workspace-{self.tenant.org_id}" + root_workspace_id = str(self.root_workspace.uuid) + default_workspace_id = str(self.default_workspace.uuid) role_binding = BindingMapping.objects.filter(role=self.role_a2).get().get_role_binding() @@ -130,7 +138,7 @@ def test_migration_of_data(self, logger_mock): # Org relationships of self.tenant # the other org is not included since it is not specified in the orgs parameter ## Workspaces root and default - call(f"workspace:{org_id}#parent@workspace:{root_workspace_id}"), + call(f"workspace:{default_workspace_id}#parent@workspace:{root_workspace_id}"), call(f"workspace:{root_workspace_id}#parent@tenant:{org_id}"), ## Realm call(f"tenant:{org_id}#platform@platform:stage"), @@ -144,16 +152,16 @@ def test_migration_of_data(self, logger_mock): call(f"role_binding:{rolebinding_a2}#role@role:{v2_role_a2}"), call(f"role:{v2_role_a2}#inventory_hosts_write@principal:*"), call(f"role_binding:{rolebinding_a2}#subject@group:{self.group_a2.uuid}"), - call(f"workspace:{self.workspace_id_1}#parent@workspace:{org_id}"), + call(f"workspace:{self.workspace_id_1}#parent@workspace:{default_workspace_id}"), call(f"workspace:{self.workspace_id_1}#binding@role_binding:{rolebinding_a2}"), ## Role binding to role_a3 call(f"role_binding:{rolebinding_a31}#role@role:{v2_role_a31}"), call(f"role:{v2_role_a31}#inventory_hosts_write@principal:*"), - call(f"workspace:{workspace_1}#parent@workspace:{org_id}"), + call(f"workspace:{workspace_1}#parent@workspace:{default_workspace_id}"), call(f"workspace:{workspace_1}#binding@role_binding:{rolebinding_a31}"), call(f"role_binding:{rolebinding_a32}#role@role:{v2_role_a32}"), call(f"role:{v2_role_a32}#inventory_hosts_write@principal:*"), - call(f"workspace:{workspace_2}#parent@workspace:{org_id}"), + call(f"workspace:{workspace_2}#parent@workspace:{default_workspace_id}"), call(f"workspace:{workspace_2}#binding@role_binding:{rolebinding_a32}"), ] logger_mock.info.assert_has_calls(tuples, any_order=True) From 6891d664127bc87598ba5d5eb3830a4630435e57 Mon Sep 17 00:00:00 2001 From: Keith Walsh Date: Fri, 4 Oct 2024 07:42:33 -0400 Subject: [PATCH 26/55] Ensure we set the parent on the default workspace on get_or_create --- rbac/migration_tool/migrate.py | 4 +++- tests/migration_tool/tests_migrate.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/rbac/migration_tool/migrate.py b/rbac/migration_tool/migrate.py index 8a0e2400e..e6e3a8e00 100644 --- a/rbac/migration_tool/migrate.py +++ b/rbac/migration_tool/migrate.py @@ -90,7 +90,9 @@ def migrate_role( def migrate_workspace(tenant: Tenant, write_relationships: bool): """Migrate a workspace from v1 to v2.""" root_workspace, _ = Workspace.objects.get_or_create(tenant=tenant, type=Workspace.Types.ROOT) - default_workspace, _ = Workspace.objects.get_or_create(tenant=tenant, type=Workspace.Types.DEFAULT) + default_workspace, _ = Workspace.objects.get_or_create( + tenant=tenant, type=Workspace.Types.DEFAULT, parent=root_workspace + ) relationships = [ create_relationship( diff --git a/tests/migration_tool/tests_migrate.py b/tests/migration_tool/tests_migrate.py index b4c0740da..28746bf3e 100644 --- a/tests/migration_tool/tests_migrate.py +++ b/tests/migration_tool/tests_migrate.py @@ -41,7 +41,7 @@ def setUp(self): type=Workspace.Types.ROOT, tenant=self.tenant, name="Root Workspace" ) self.default_workspace = Workspace.objects.create( - type=Workspace.Types.DEFAULT, tenant=self.tenant, name="Default Workspace" + type=Workspace.Types.DEFAULT, tenant=self.tenant, name="Default Workspace", parent=self.root_workspace ) another_tenant = Tenant.objects.create(org_id="7654321") From 7de747036ca51c60e53d095ed2932eb3fc8122d0 Mon Sep 17 00:00:00 2001 From: Keith Walsh Date: Fri, 4 Oct 2024 07:44:34 -0400 Subject: [PATCH 27/55] Update dual write handler to reference default workspace vs org_id --- rbac/management/role/relation_api_dual_write_handler.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py index e30f16f97..78be365fc 100644 --- a/rbac/management/role/relation_api_dual_write_handler.py +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -24,7 +24,7 @@ from django.conf import settings from google.protobuf import json_format from kessel.relations.v1beta1 import common_pb2 -from management.models import Outbox +from management.models import Outbox, Workspace from management.role.model import BindingMapping, Role from migration_tool.migrate import migrate_role from migration_tool.sharedSystemRolesReplicatedRoleBindings import v1_perm_to_v2_perm @@ -215,7 +215,7 @@ def __init__( ) self.tenant_id = binding_tenant.id - self.org_id = binding_tenant.org_id + self.default_workspace = Workspace.objects.get(tenant=tenant, type=Workspace.Types.DEFAULT) except Exception as e: raise DualWriteException(e) @@ -247,7 +247,7 @@ def prepare_for_update(self): relations, _ = migrate_role( self.role, write_relationships=False, - default_workspace=self.org_id, + default_workspace=self.default_workspace, current_bindings=self.binding_mappings.values(), ) @@ -296,7 +296,7 @@ def _generate_relations_and_mappings_for_role(self): relations, mappings = migrate_role( self.role, write_relationships=False, - default_workspace=self.org_id, + default_workspace=self.default_workspace, current_bindings=self.binding_mappings.values(), ) From 2b6459f3b398e2a0edff8d35375c7de46b102f57 Mon Sep 17 00:00:00 2001 From: Keith Walsh Date: Fri, 4 Oct 2024 08:38:20 -0400 Subject: [PATCH 28/55] Update access tests to account for workspace relations --- .../role/relation_api_dual_write_handler.py | 3 ++- tests/management/access/test_view.py | 26 ++++++++++++++++--- 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py index 78be365fc..1a42caeb2 100644 --- a/rbac/management/role/relation_api_dual_write_handler.py +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -215,8 +215,9 @@ def __init__( ) self.tenant_id = binding_tenant.id - self.default_workspace = Workspace.objects.get(tenant=tenant, type=Workspace.Types.DEFAULT) + self.default_workspace = Workspace.objects.get(tenant=binding_tenant, type=Workspace.Types.DEFAULT) except Exception as e: + logger.error(f"Failed to initialize RelationApiDualWriteHandler with error: {e}") raise DualWriteException(e) def replication_enabled(self): diff --git a/tests/management/access/test_view.py b/tests/management/access/test_view.py index 06e3d76c7..62a0662aa 100644 --- a/tests/management/access/test_view.py +++ b/tests/management/access/test_view.py @@ -74,6 +74,15 @@ def setUp(self): ) request = request_context["request"] self.test_headers = request.META + test_tenant_root_workspace = Workspace.objects.create( + name="Test Tenant Root Workspace", type=Workspace.Types.ROOT, tenant=self.test_tenant + ) + Workspace.objects.create( + name="Test Tenant Default Workspace", + type=Workspace.Types.DEFAULT, + parent=test_tenant_root_workspace, + tenant=self.test_tenant, + ) self.principal = Principal(username=user.username, tenant=self.tenant) self.principal.save() @@ -85,8 +94,18 @@ def setUp(self): self.group.save() self.permission = Permission.objects.create(permission="app:*:*", tenant=self.tenant) Permission.objects.create(permission="app:foo:bar", tenant=self.tenant) - Workspace.objects.create(name="root", description="Root workspace", tenant=self.tenant) - Workspace.objects.create(name="root", description="Root workspace", tenant=self.test_tenant) + tenant_root_workspace = Workspace.objects.create( + name="root", + description="Root workspace", + tenant=self.tenant, + type=Workspace.Types.ROOT, + ) + Workspace.objects.create( + name="Tenant Default Workspace", + type=Workspace.Types.DEFAULT, + parent=tenant_root_workspace, + tenant=self.tenant, + ) def tearDown(self): """Tear down access view tests.""" @@ -94,7 +113,8 @@ def tearDown(self): Principal.objects.all().delete() Role.objects.all().delete() Policy.objects.all().delete() - Workspace.objects.all().delete() + Workspace.objects.filter(parent__isnull=False).delete() + Workspace.objects.filter(parent__isnull=True).delete() def create_role(self, role_name, headers, in_access_data=None): """Create a role.""" From 8d0a4e2cdc9e52c58904b8aaa563b12eedaea5b9 Mon Sep 17 00:00:00 2001 From: Keith Walsh Date: Fri, 4 Oct 2024 10:39:41 -0400 Subject: [PATCH 29/55] Fix binding caller arg --- rbac/migration_tool/migrate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rbac/migration_tool/migrate.py b/rbac/migration_tool/migrate.py index e6e3a8e00..a7fb4ebee 100644 --- a/rbac/migration_tool/migrate.py +++ b/rbac/migration_tool/migrate.py @@ -81,7 +81,7 @@ def migrate_role( The mappings are returned so that we can reconstitute the corresponding tuples for a given role. This is needed so we can remove those tuples when the role changes if needed. """ - v2_role_bindings = v1_role_to_v2_bindings(role, str(default_workspace.uuid), current_bindings) + v2_role_bindings = v1_role_to_v2_bindings(role, default_workspace, current_bindings) relationships = get_kessel_relation_tuples([m.get_role_binding() for m in v2_role_bindings], default_workspace) output_relationships(relationships, write_relationships) return relationships, v2_role_bindings From 277e71508abf0d4aee7bf379a95630e9bbdfe2fc Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Fri, 4 Oct 2024 15:43:04 +0200 Subject: [PATCH 30/55] Replicate Group removal to outbox table --- .../relation_api_dual_write_group_handler.py | 39 ++++++++++- rbac/management/group/view.py | 9 ++- tests/management/group/test_view.py | 66 ++++++++++++++++++- 3 files changed, 109 insertions(+), 5 deletions(-) diff --git a/rbac/management/group/relation_api_dual_write_group_handler.py b/rbac/management/group/relation_api_dual_write_group_handler.py index d8c5ea997..8be954a10 100644 --- a/rbac/management/group/relation_api_dual_write_group_handler.py +++ b/rbac/management/group/relation_api_dual_write_group_handler.py @@ -33,6 +33,8 @@ from migration_tool.models import V2boundresource, V2role, V2rolebinding from migration_tool.utils import create_relationship +from api.models import Tenant + logger = logging.getLogger(__name__) # pylint: disable=invalid-name @@ -148,13 +150,16 @@ def replicate_removed_role(self, role: Role): if self.group.tenant.tenant_name == "public": return + self._update_mapping_for_role_removal(role) + self._replicate() + + def _update_mapping_for_role_removal(self, role: Role): def remove_group_from_binding(mapping: BindingMapping): self.group_relations_to_remove.append(mapping.remove_group_from_bindings(str(self.group.uuid))) self._update_mapping_for_role( role, update_mapping=remove_group_from_binding, create_default_mapping_for_system_role=lambda: None ) - self._replicate() def _update_mapping_for_role( self, @@ -210,7 +215,6 @@ def _update_mapping_for_role( # Because custom roles must be locked already by this point, # we don't need to lock the binding here. bindings: Iterable[BindingMapping] = role.binding_mappings.all() - if not bindings: logger.warning( "[Dual Write] Binding mappings not found for role(%s): '%s'. " @@ -223,3 +227,34 @@ def _update_mapping_for_role( for mapping in bindings: update_mapping(mapping) mapping.save(force_update=True) + + def prepare_to_delete_group(self): + """Generate relations to delete.""" + roles = Role.objects.filter(policies__group=self.group) + + system_roles = roles.filter(tenant=Tenant.objects.get(tenant_name="public")) + + # Custom roles are locked to prevent resources from being added/removed concurrently, + # in the case that the Roles had _no_ resources specified to begin with. + # This should not be necessary for system roles. + custom_roles = roles.filter(tenant=self.group.tenant).select_for_update() + + custom_ids = [] + for role in [*system_roles, *custom_roles]: + if role.id in custom_ids: + # it was needed to skip distinct clause because distinct doesn't work with select_for_update + continue + self._update_mapping_for_role_removal(role) + custom_ids.append(role.id) + + if self.group.platform_default: + pass # TODO: create default bindings, + else: + self.principals = self.group.principals.all() + self.group_relations_to_remove.extend(self._generate_relations()) + + def replicate_deleted_group(self): + """Prepare for delete.""" + if not self.replication_enabled(): + return + self._replicate() diff --git a/rbac/management/group/view.py b/rbac/management/group/view.py index 19b1e7983..0b53f36ac 100644 --- a/rbac/management/group/view.py +++ b/rbac/management/group/view.py @@ -369,7 +369,14 @@ def destroy(self, request, *args, **kwargs): if not request.user.admin: self.protect_group_with_user_access_admin_role(group.roles_with_access(), "remove_group") - response = super().destroy(request=request, args=args, kwargs=kwargs) + with transaction.atomic(): + dual_write_handler = RelationApiDualWriteGroupHandler(group, ReplicationEventType.DELETE_GROUP) + dual_write_handler.prepare_to_delete_group() + + response = super().destroy(request=request, args=args, kwargs=kwargs) + + dual_write_handler.replicate_deleted_group() + if response.status_code == status.HTTP_204_NO_CONTENT: group_obj_change_notification_handler(request.user, group, "deleted") diff --git a/tests/management/group/test_view.py b/tests/management/group/test_view.py index ef393864d..39de35b6a 100644 --- a/tests/management/group/test_view.py +++ b/tests/management/group/test_view.py @@ -827,13 +827,75 @@ def test_update_group_invalid_guid(self): response = client.put(url, {}, format="json", **self.headers) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + @patch("management.role.relation_api_dual_write_handler.OutboxReplicator._save_replication_event") @patch("core.kafka.RBACProducer.send_kafka_message") - def test_delete_group_success(self, send_kafka_message): + def test_delete_group_success(self, send_kafka_message, mock_method): """Test that we can delete an existing group.""" - with self.settings(NOTIFICATIONS_ENABLED=True): + with (self.settings(NOTIFICATIONS_ENABLED=True)): + url = reverse("group-roles", kwargs={"uuid": self.group.uuid}) + request_body = {"roles": [self.role.uuid]} + + client = APIClient() + response = client.post(url, request_body, format="json", **self.headers) + self.assertEqual(response.status_code, status.HTTP_200_OK) + + role_binding_id = ( + BindingMapping.objects.filter(role=self.role, resource_id=self.role.tenant.org_id).get().mappings["id"] + ) + url = reverse("group-detail", kwargs={"uuid": self.group.uuid}) client = APIClient() + principals_uuids = self.group.principals.values_list("uuid", flat=True) + group_uuid = self.group.uuid response = client.delete(url, **self.headers) + + actual_call_arg = mock_method.call_args[0][0] + to_remove = actual_call_arg["relations_to_remove"] + print(to_remove) + self.assertEqual(8, len(to_remove)) + + def assert_group_tuples(tuples_to_replicate): + for principal_uuid in principals_uuids: + relation_tuple = relation_api_tuple( + "group", + group_uuid, + "member", + "principal", + str(principal_uuid), + ) + + self.assertIsNotNone(find_relation_in_list(tuples_to_replicate, relation_tuple)) + + relation_tuple = relation_api_tuple( + "role_binding", + role_binding_id, + "subject", + "group", + str(group_uuid), + "member", + ) + self.assertIsNotNone(find_relation_in_list(tuples_to_replicate, relation_tuple)) + + relation_tuple = relation_api_tuple( + "role_binding", + role_binding_id, + "role", + "role", + str(self.role.uuid), + ) + self.assertIsNotNone(find_relation_in_list(tuples_to_replicate, relation_tuple)) + + relation_tuple = relation_api_tuple( + "workspace", + self.group.tenant.org_id, + "binding", + "role_binding", + str(role_binding_id), + ) + self.assertIsNotNone(find_relation_in_list(tuples_to_replicate, relation_tuple)) + + assert_group_tuples(to_remove) + self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT) org_id = self.customer_data["org_id"] From dca399d9e6b1fe5e7b2a24ac88907f5de2e521d6 Mon Sep 17 00:00:00 2001 From: Keith Walsh Date: Fri, 4 Oct 2024 12:59:56 -0400 Subject: [PATCH 31/55] Ensure we set default/root workspace names --- rbac/migration_tool/migrate.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/rbac/migration_tool/migrate.py b/rbac/migration_tool/migrate.py index a7fb4ebee..0f51a13ff 100644 --- a/rbac/migration_tool/migrate.py +++ b/rbac/migration_tool/migrate.py @@ -89,9 +89,16 @@ def migrate_role( def migrate_workspace(tenant: Tenant, write_relationships: bool): """Migrate a workspace from v1 to v2.""" - root_workspace, _ = Workspace.objects.get_or_create(tenant=tenant, type=Workspace.Types.ROOT) + root_workspace, _ = Workspace.objects.get_or_create( + tenant=tenant, + type=Workspace.Types.ROOT, + name="Root Workspace", + ) default_workspace, _ = Workspace.objects.get_or_create( - tenant=tenant, type=Workspace.Types.DEFAULT, parent=root_workspace + tenant=tenant, + type=Workspace.Types.DEFAULT, + parent=root_workspace, + name="Default Workspace", ) relationships = [ From ce0eb6c1e728e173a4128088afee0f5e7ea85936 Mon Sep 17 00:00:00 2001 From: Keith Walsh Date: Fri, 4 Oct 2024 13:00:31 -0400 Subject: [PATCH 32/55] Update dual write tests to use workspace --- tests/management/role/test_dual_write.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/management/role/test_dual_write.py b/tests/management/role/test_dual_write.py index abd066771..904736865 100644 --- a/tests/management/role/test_dual_write.py +++ b/tests/management/role/test_dual_write.py @@ -21,6 +21,7 @@ from django.db.models import Q from management.group.model import Group from management.group.relation_api_dual_write_group_handler import RelationApiDualWriteGroupHandler +from management.models import Workspace from management.permission.model import Permission from management.policy.model import Policy from management.principal.model import Principal @@ -44,6 +45,7 @@ from api.models import Tenant +from migration_tool.migrate import migrate_workspace @override_settings(REPLICATION_TO_RELATION_ENABLED=True) @@ -79,8 +81,8 @@ def restore_test_tenant(self): def default_workspace(self, tenant: Optional[Tenant] = None) -> str: """Return the default workspace ID.""" tenant = tenant if tenant is not None else self.tenant - assert tenant.org_id is not None, "Tenant org_id should not be None" - return tenant.org_id + default = Workspace.objects.get(tenant=tenant, type=Workspace.Types.DEFAULT) + return str(default.uuid) def dual_write_handler(self, role: Role, event_type: ReplicationEventType) -> RelationApiDualWriteHandler: """Create a RelationApiDualWriteHandler for the given role and event type.""" @@ -673,7 +675,9 @@ def __init__(self): def new_tenant(self, name: str, org_id: str) -> Tenant: """Create a new tenant with the given name and organization ID.""" - return Tenant.objects.create(tenant_name=name, org_id=org_id) + tenant = Tenant.objects.create(tenant_name=name, org_id=org_id) + migrate_workspace(tenant, write_relationships=False) + return tenant def new_system_role(self, name: str, permissions: list[str]) -> Role: """Create a new system role with the given name and permissions.""" From e0d7224e3ae0b99d13112edb47045a99223b629d Mon Sep 17 00:00:00 2001 From: Keith Walsh Date: Fri, 4 Oct 2024 14:02:27 -0400 Subject: [PATCH 33/55] Skip dual write system role tests --- tests/management/role/test_dual_write.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/management/role/test_dual_write.py b/tests/management/role/test_dual_write.py index 904736865..007229e10 100644 --- a/tests/management/role/test_dual_write.py +++ b/tests/management/role/test_dual_write.py @@ -16,6 +16,7 @@ # """Test tuple changes for RBAC operations.""" +import unittest from typing import Optional, Tuple from django.test import TestCase, override_settings from django.db.models import Q @@ -384,6 +385,7 @@ def test_custom_roles_group_assignments_tuples(self): self.assertEquals(len(tuples), 0) +@unittest.skip("deferring until RHCLOUD-35357 / RHCLOUD-35303 / RHCLOUD-34511") class DualWriteSystemRolesTestCase(DualWriteTestCase): """Test dual write logic for system roles.""" From 711e4114f73dee62964588f3467db49c0d46a7bd Mon Sep 17 00:00:00 2001 From: Keith Walsh Date: Fri, 4 Oct 2024 14:04:30 -0400 Subject: [PATCH 34/55] Update role view tests to reference workspace --- tests/management/role/test_view.py | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/tests/management/role/test_view.py b/tests/management/role/test_view.py index e9a4b4aea..958d2f609 100644 --- a/tests/management/role/test_view.py +++ b/tests/management/role/test_view.py @@ -206,7 +206,19 @@ def setUp(self): self.access3 = Access.objects.create(permission=self.permission2, role=self.sysRole, tenant=self.tenant) Permission.objects.create(permission="cost-management:*:*", tenant=self.tenant) - self.root_workspace = Workspace.objects.create(name="root", description="Root workspace", tenant=self.tenant) + self.root_workspace = Workspace.objects.create( + name="root", + description="Root workspace", + tenant=self.tenant, + type="root", + ) + self.default_workspace = Workspace.objects.create( + name="default", + description="Default workspace", + tenant=self.tenant, + parent=self.root_workspace, + type="default", + ) def tearDown(self): """Tear down role viewset tests.""" @@ -218,7 +230,8 @@ def tearDown(self): Access.objects.all().delete() ExtTenant.objects.all().delete() ExtRoleRelation.objects.all().delete() - Workspace.objects.all().delete() + Workspace.objects.filter(parent__isnull=False).delete() + Workspace.objects.filter(parent__isnull=True).delete() # we need to delete old test_tenant's that may exist in cache test_tenant_org_id = "100001" cached_tenants = TenantCache() @@ -405,7 +418,7 @@ def test_create_role_with_display_success(self, mock_method): response = self.create_role(role_name, role_display=role_display, in_access_data=access_data) self.assertEqual(response.status_code, status.HTTP_201_CREATED) - replication_event = replication_event_for_v1_role(response.data.get("uuid"), str(self.tenant.org_id)) + replication_event = replication_event_for_v1_role(response.data.get("uuid"), str(self.default_workspace.uuid)) mock_method.assert_called_once() actual_call_arg = mock_method.call_args[0][0] @@ -1444,10 +1457,10 @@ def test_update_role(self, mock_method): test_data["access"] = new_access_data url = reverse("role-detail", kwargs={"uuid": role_uuid}) client = APIClient() - current_relations = relation_api_tuples_for_v1_role(role_uuid, str(self.tenant.org_id)) + current_relations = relation_api_tuples_for_v1_role(role_uuid, str(self.default_workspace.uuid)) response = client.put(url, test_data, format="json", **self.headers) - replication_event = replication_event_for_v1_role(response.data.get("uuid"), str(self.tenant.org_id)) + replication_event = replication_event_for_v1_role(response.data.get("uuid"), str(self.default_workspace.uuid)) replication_event["relations_to_remove"] = current_relations actual_call_arg = mock_method.call_args[0][0] expected_sorted = normalize_and_sort(replication_event) @@ -1555,7 +1568,7 @@ def test_delete_role(self, mock_method): url = reverse("role-detail", kwargs={"uuid": role_uuid}) client = APIClient() replication_event = {"relations_to_add": [], "relations_to_remove": []} - current_relations = relation_api_tuples_for_v1_role(role_uuid, str(self.tenant.org_id)) + current_relations = relation_api_tuples_for_v1_role(role_uuid, str(self.default_workspace.uuid)) replication_event["relations_to_remove"] = current_relations response = client.delete(url, **self.headers) actual_call_arg = mock_method.call_args[0][0] From 5d806845cca3595137747257970a35780a3884b9 Mon Sep 17 00:00:00 2001 From: Keith Walsh Date: Fri, 4 Oct 2024 14:18:26 -0400 Subject: [PATCH 35/55] Update group view tests to use workspace reference --- tests/management/group/test_view.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/management/group/test_view.py b/tests/management/group/test_view.py index ef393864d..b5543297f 100644 --- a/tests/management/group/test_view.py +++ b/tests/management/group/test_view.py @@ -40,6 +40,7 @@ Role, ExtRoleRelation, ExtTenant, + Workspace, ) from tests.core.test_kafka import copy_call_args from tests.identity_request import IdentityRequest @@ -2857,6 +2858,12 @@ def setUp(self): "Non org admin users are not allowed to add RBAC role with higher than 'read' permission into groups." ) + self.default_workspace = Workspace.objects.create( + type=Workspace.Types.DEFAULT, + name="Default", + tenant=self.tenant, + ) + def tearDown(self): """Tear down group view tests.""" Group.objects.all().delete() @@ -3724,7 +3731,7 @@ def assert_group_tuples(tuple_to_replicate): relation_tuple = relation_api_tuple( "workspace", - test_group.tenant.org_id, + str(self.default_workspace.uuid), "binding", "role_binding", str(binding_mapping.mappings["id"]), From 96788f3027278814e9d385c111c181daf9d1d51d Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Fri, 4 Oct 2024 15:35:00 -0400 Subject: [PATCH 36/55] Use correct user_id for user, and replicate with domain prefix Fixes RHCLOUD-35563 --- deploy/rbac-clowdapp.yml | 10 +++++ .../relation_api_dual_write_group_handler.py | 15 +++++-- rbac/management/group/view.py | 19 ++++++-- rbac/management/utils.py | 44 ++++++++++++------- rbac/rbac/settings.py | 2 + tests/identity_request.py | 2 +- tests/management/group/test_view.py | 26 +++++------ tests/management/principal/fake_proxy.py | 2 + 8 files changed, 81 insertions(+), 39 deletions(-) create mode 100644 tests/management/principal/fake_proxy.py diff --git a/deploy/rbac-clowdapp.yml b/deploy/rbac-clowdapp.yml index 1eedfd37a..cd88ad40d 100644 --- a/deploy/rbac-clowdapp.yml +++ b/deploy/rbac-clowdapp.yml @@ -292,6 +292,8 @@ objects: name: ${GLITCHTIP_SECRET} key: dsn optional: true + - name: PRINCIPAL_USER_DOMAIN + value: ${PRINCIPAL_USER_DOMAIN} - name: PRINCIPAL_CLEANUP_DELETION_ENABLED_UMB value: ${PRINCIPAL_CLEANUP_DELETION_ENABLED_UMB} - name: UMB_JOB_ENABLED @@ -912,6 +914,14 @@ parameters: value: '10' - name: IT_TOKEN_JKWS_CACHE_LIFETIME value: '28800' +- name: PRINCIPAL_USER_DOMAIN + description: > + Kessel requires principal IDs to be qualified by a domain, + in order to future proof integration of identities from multiple issuers. + RBAC currently expects all principals to either come from itself (cross-account), + or from a single identity infrastructure domain (identity header, SSO, BOP). + This defines that single domain. + value: 'redhat.com' - name: PRINCIPAL_CLEANUP_DELETION_ENABLED_UMB description: Allow cleanup job to delete principals via messages from UMB value: 'False' diff --git a/rbac/management/group/relation_api_dual_write_group_handler.py b/rbac/management/group/relation_api_dual_write_group_handler.py index d8c5ea997..31aa3781f 100644 --- a/rbac/management/group/relation_api_dual_write_group_handler.py +++ b/rbac/management/group/relation_api_dual_write_group_handler.py @@ -54,6 +54,7 @@ def __init__( self.principals = [] self.group = group self.event_type = event_type + self.user_domain = settings.PRINCIPAL_USER_DOMAIN self._replicator = replicator if replicator else OutboxReplicator(group) except Exception as e: raise DualWriteException(e) @@ -62,13 +63,19 @@ def replication_enabled(self): """Check whether replication enabled.""" return settings.REPLICATION_TO_RELATION_ENABLED is True - def _generate_relations(self): + def _generate_member_relations(self): """Generate user-groups relations.""" relations = [] for principal in self.principals: + if principal.user_id is None: + logger.warning( + "[Dual Write] Principal(uuid=%s) does not have user_id. Skipping replication.", principal.uuid + ) + continue + principal_id = f"{self.user_domain}:{principal.user_id}" relations.append( create_relationship( - ("rbac", "group"), str(self.group.uuid), ("rbac", "principal"), str(principal.uuid), "member" + ("rbac", "group"), str(self.group.uuid), ("rbac", "principal"), principal_id, "member" ) ) @@ -80,7 +87,7 @@ def replicate_new_principals(self, principals: list[Principal]): return logger.info("[Dual Write] Generate new relations from Group(%s): '%s'", self.group.uuid, self.group.name) self.principals = principals - self.group_relations_to_add = self._generate_relations() + self.group_relations_to_add = self._generate_member_relations() self._replicate() def replicate_removed_principals(self, principals: list[Principal]): @@ -89,7 +96,7 @@ def replicate_removed_principals(self, principals: list[Principal]): return logger.info("[Dual Write] Generate new relations from Group(%s): '%s'", self.group.uuid, self.group.name) self.principals = principals - self.group_relations_to_remove = self._generate_relations() + self.group_relations_to_remove = self._generate_member_relations() self._replicate() diff --git a/rbac/management/group/view.py b/rbac/management/group/view.py index 19b1e7983..6968188af 100644 --- a/rbac/management/group/view.py +++ b/rbac/management/group/view.py @@ -438,6 +438,17 @@ def add_principals(self, group, principals_from_response, org_id=None): username = item["username"] try: principal = Principal.objects.get(username__iexact=username, tenant=tenant) + if principal.user_id is None: + user_id = item.get("user_id") + # This should never happen, but just in case, we have the data here, so we can fix it. + logger.warning( + "User principal %s found without user_id. Setting user_id to %s.", + principal.username, + user_id, + ) + if user_id is not None: + principal.user_id = user_id + principal.save() except Principal.DoesNotExist: principal = Principal.objects.create(username=username, tenant=tenant, user_id=item["user_id"]) logger.info("Created new principal %s for org_id %s.", username, org_id) @@ -474,7 +485,7 @@ def raise_error_if_service_accounts_not_present_in_it_service( if len(invalid_service_accounts) > 0: raise ServiceAccountNotFoundError(f"Service account(s) {invalid_service_accounts} not found.") - def add_service_accounts( + def add_service_accounts_to_group( self, group: Group, service_accounts: Iterable[dict], @@ -514,7 +525,7 @@ def add_service_accounts( return group, new_service_accounts - def remove_principals(self, group, principals, org_id=None): + def remove_users(self, group, principals, org_id=None): """Process list of principals and remove them from the group.""" req_id = getattr(self.request, "req_id", None) log_prefix = f"[Request_id:{req_id}]" @@ -694,7 +705,7 @@ def principals(self, request: Request, uuid: Optional[UUID] = None): with transaction.atomic(): new_service_accounts = [] if len(service_accounts) > 0: - group, new_service_accounts = self.add_service_accounts( + group, new_service_accounts = self.add_service_accounts_to_group( group=group, service_accounts=service_accounts, org_id=org_id, @@ -912,7 +923,7 @@ def principals(self, request: Request, uuid: Optional[UUID] = None): if USERNAMES_KEY in request.query_params: username = request.query_params.get(USERNAMES_KEY, "") principals = [name.strip() for name in username.split(",")] - resp, principals_to_remove = self.remove_principals(group, principals, org_id=org_id) + resp, principals_to_remove = self.remove_users(group, principals, org_id=org_id) if isinstance(resp, dict) and "errors" in resp: return Response(status=resp.get("status_code"), data={"errors": resp.get("errors")}) response = Response(status=status.HTTP_204_NO_CONTENT) diff --git a/rbac/management/utils.py b/rbac/management/utils.py index 65c16cd9d..293237e40 100644 --- a/rbac/management/utils.py +++ b/rbac/management/utils.py @@ -94,29 +94,37 @@ def get_principal( tenant: Tenant = request.tenant is_username_service_account = ITService.is_username_service_account(username) - try: - # If the username was provided through a query we must verify if it exists in the corresponding services first. - if from_query and not is_username_service_account: - verify_principal_with_proxy(username=username, request=request, verify_principal=verify_principal) - - principal = Principal.objects.get(username__iexact=username, tenant=tenant) - except Principal.DoesNotExist: - # If the "from query" parameter was specified, the username was validated above, so there is no need to - # validate it again. - if not from_query and not is_username_service_account: - verify_principal_with_proxy(username=username, request=request, verify_principal=verify_principal) - - if is_username_service_account: + if is_username_service_account: + try: + principal = Principal.objects.get(username__iexact=username, tenant=tenant) + except Principal.DoesNotExist: client_id: uuid.UUID = ITService.extract_client_id_service_account_username(username) principal, _ = Principal.objects.get_or_create( username=username, tenant=tenant, type=SERVICE_ACCOUNT_KEY, service_account_id=client_id ) - else: + else: + # If the username was provided through a query we must verify if it exists + # in the corresponding services first. + if from_query: + resp = verify_principal_with_proxy(username=username, request=request, verify_principal=verify_principal) + user_id = resp.get("data")[0].get("user_id") + + try: + principal = Principal.objects.get(username__iexact=username, tenant=tenant) + except Principal.DoesNotExist: + # If the "from query" parameter was specified, the username was validated above, + # so there is no need to validate it again. + if not from_query: + resp = verify_principal_with_proxy( + username=username, request=request, verify_principal=verify_principal + ) + user_id = resp.get("data")[0].get("user_id") + # Avoid possible race condition if the user was created while checking BOP principal, _ = Principal.objects.get_or_create( - username=username, tenant=tenant, defaults={"user_id": request.user.user_id} - ) # pylint: disable=unused-variable + username=username, tenant=tenant, defaults={"user_id": user_id} + ) return principal @@ -126,7 +134,9 @@ def verify_principal_with_proxy(username, request, verify_principal=True): org_id = request.user.org_id proxy = PrincipalProxy() if verify_principal: - resp = proxy.request_filtered_principals([username], org_id=org_id, options=request.query_params) + resp = proxy.request_filtered_principals( + [username], org_id=org_id, options={"return_id": True, **(request.query_params)} + ) if isinstance(resp, dict) and "errors" in resp: raise Exception("Dependency error: request to get users from dependent service failed.") diff --git a/rbac/rbac/settings.py b/rbac/rbac/settings.py index 9e649f6f6..5fc0ddfe0 100644 --- a/rbac/rbac/settings.py +++ b/rbac/rbac/settings.py @@ -482,6 +482,8 @@ IT_SERVICE_TIMEOUT_SECONDS = ENVIRONMENT.int("IT_SERVICE_TIMEOUT_SECONDS", default=10) IT_TOKEN_JKWS_CACHE_LIFETIME = ENVIRONMENT.int("IT_TOKEN_JKWS_CACHE_LIFETIME", default=28800) +PRINCIPAL_USER_DOMAIN = ENVIRONMENT.get_value("PRINCIPAL_USER_DOMAIN", default="localhost") + # Settings for enabling/disabling deletion in principal cleanup job via UMB PRINCIPAL_CLEANUP_DELETION_ENABLED_UMB = ENVIRONMENT.bool("PRINCIPAL_CLEANUP_DELETION_ENABLED_UMB", default=False) UMB_JOB_ENABLED = ENVIRONMENT.bool("UMB_JOB_ENABLED", default=True) diff --git a/tests/identity_request.py b/tests/identity_request.py index 8dc50f44e..b403128b5 100644 --- a/tests/identity_request.py +++ b/tests/identity_request.py @@ -29,7 +29,7 @@ from api.common import RH_IDENTITY_HEADER -@override_settings(REPLICATION_TO_RELATION_ENABLED=True) +@override_settings(REPLICATION_TO_RELATION_ENABLED=True, PRINCIPAL_USER_DOMAIN="redhat.com") class IdentityRequest(TestCase): """Parent Class for IAM test cases.""" diff --git a/tests/management/group/test_view.py b/tests/management/group/test_view.py index ef393864d..7ed59262f 100644 --- a/tests/management/group/test_view.py +++ b/tests/management/group/test_view.py @@ -46,7 +46,7 @@ from tests.management.role.test_view import find_in_list, relation_api_tuple -def generate_relation_entry(group_uuid, principal_uuid): +def generate_group_member_relation_entry(group_uuid, principal_user_id): relation_entry = {"resource": {}} relation_entry["resource"]["type"] = {} @@ -61,7 +61,7 @@ def generate_relation_entry(group_uuid, principal_uuid): relation_entry["subject"]["subject"]["type"] = {} relation_entry["subject"]["subject"]["type"]["namespace"] = "rbac" relation_entry["subject"]["subject"]["type"]["name"] = "principal" - relation_entry["subject"]["subject"]["id"] = principal_uuid + relation_entry["subject"]["subject"]["id"] = principal_user_id return relation_entry @@ -74,12 +74,12 @@ def replication_event(relations_to_add, relations_to_remove): } -def generate_replication_event_to_add_principals(group_uuid, principal_uuid): - return {"relations_to_add": [generate_relation_entry(group_uuid, principal_uuid)], "relations_to_remove": []} +def generate_replication_event_to_add_principals(group_uuid, principal_user_id): + return {"relations_to_add": [generate_group_member_relation_entry(group_uuid, principal_user_id)], "relations_to_remove": []} def generate_replication_event_to_remove_principals(group_uuid, principal_uuid): - return {"relations_to_add": [], "relations_to_remove": [generate_relation_entry(group_uuid, principal_uuid)]} + return {"relations_to_add": [], "relations_to_remove": [generate_group_member_relation_entry(group_uuid, principal_uuid)]} def find_relation_in_list(relation_list, relation_tuple): @@ -92,7 +92,6 @@ def find_relation_in_list(relation_list, relation_tuple): and r["subject"]["subject"]["id"] == relation_tuple["subject"]["subject"]["id"], ) - class GroupViewsetTests(IdentityRequest): """Test the group viewset.""" @@ -1021,7 +1020,7 @@ def test_add_group_principals_success(self, send_kafka_message, mock_request, mo actual_call_arg = mock_method.call_args[0][0] self.assertEqual( - generate_replication_event_to_add_principals(str(test_group.uuid), str(principal.uuid)), + generate_replication_event_to_add_principals(str(test_group.uuid), "redhat.com:-448717"), actual_call_arg, ) @@ -1137,8 +1136,9 @@ def test_get_group_principals_nonempty_admin_only(self, mock_request): @patch("core.kafka.RBACProducer.send_kafka_message") def test_remove_group_principals_success(self, send_kafka_message, mock_request, mock_method): """Test that removing a principal to a group returns successfully.""" + self.maxDiff = None with self.settings(NOTIFICATIONS_ENABLED=True): - test_user = Principal.objects.create(username="test_user", tenant=self.tenant) + test_user = Principal.objects.create(username="test_user", tenant=self.tenant, user_id="123798") self.group.principals.add(test_user) url = reverse("group-principals", kwargs={"uuid": self.group.uuid}) @@ -1176,7 +1176,7 @@ def test_remove_group_principals_success(self, send_kafka_message, mock_request, actual_call_arg = mock_method.call_args[0][0] self.assertEqual( - generate_replication_event_to_remove_principals(str(self.group.uuid), str(test_user.uuid)), + generate_replication_event_to_remove_principals(str(self.group.uuid), "redhat.com:123798"), actual_call_arg, ) @@ -4115,11 +4115,11 @@ def test_add_user_based_principal_in_group_with_User_Access_Admin_success(self, test_group = Group(name="test group", tenant=self.tenant) test_group.save() - test_principal = Principal(username="test-principal", tenant=self.tenant) + test_principal = Principal(username="test-principal", tenant=self.tenant, user_id="1234") test_principal.save() # Set the return value for the mock - mock_request.return_value["data"] = [{"username": test_principal.username}] + mock_request.return_value["data"] = [{"username": test_principal.username, "user_id": "1234"}] url = reverse("group-principals", kwargs={"uuid": test_group.uuid}) client = APIClient() @@ -4130,7 +4130,7 @@ def test_add_user_based_principal_in_group_with_User_Access_Admin_success(self, self.assertEqual(response.status_code, status.HTTP_200_OK) actual_call_arg = mock_method.call_args[0][0] self.assertEqual( - generate_replication_event_to_add_principals(str(test_group.uuid), str(test_principal.uuid)), + generate_replication_event_to_add_principals(str(test_group.uuid), "redhat.com:1234"), actual_call_arg, ) @@ -4220,7 +4220,7 @@ def test_add_user_based_principal_in_group_with_User_Access_Admin_fail(self, moc # Role 'User Access administrator' added successfully into test group self.assertEqual(response.status_code, status.HTTP_200_OK) - test_principal = Principal(username="test-principal", tenant=self.tenant) + test_principal = Principal(username="test-principal", tenant=self.tenant, user_id="1234") test_principal.save() # Set the return value for the mock diff --git a/tests/management/principal/fake_proxy.py b/tests/management/principal/fake_proxy.py new file mode 100644 index 000000000..513860201 --- /dev/null +++ b/tests/management/principal/fake_proxy.py @@ -0,0 +1,2 @@ +class FakePrincipalProxy: + pass From af81cab26f62b13e00c511dda658cbb13cc28527 Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Fri, 4 Oct 2024 15:39:18 -0400 Subject: [PATCH 37/55] Undo rename --- rbac/management/group/view.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rbac/management/group/view.py b/rbac/management/group/view.py index 6968188af..40eadac4e 100644 --- a/rbac/management/group/view.py +++ b/rbac/management/group/view.py @@ -525,7 +525,7 @@ def add_service_accounts_to_group( return group, new_service_accounts - def remove_users(self, group, principals, org_id=None): + def remove_principals(self, group, principals, org_id=None): """Process list of principals and remove them from the group.""" req_id = getattr(self.request, "req_id", None) log_prefix = f"[Request_id:{req_id}]" @@ -923,7 +923,7 @@ def principals(self, request: Request, uuid: Optional[UUID] = None): if USERNAMES_KEY in request.query_params: username = request.query_params.get(USERNAMES_KEY, "") principals = [name.strip() for name in username.split(",")] - resp, principals_to_remove = self.remove_users(group, principals, org_id=org_id) + resp, principals_to_remove = self.remove_principals(group, principals, org_id=org_id) if isinstance(resp, dict) and "errors" in resp: return Response(status=resp.get("status_code"), data={"errors": resp.get("errors")}) response = Response(status=status.HTTP_204_NO_CONTENT) From e592de8853e54905ea2b6c4e55ceb1fa456c885f Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Fri, 4 Oct 2024 15:39:43 -0400 Subject: [PATCH 38/55] Undo another rename --- rbac/management/group/view.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rbac/management/group/view.py b/rbac/management/group/view.py index 40eadac4e..aebbe1726 100644 --- a/rbac/management/group/view.py +++ b/rbac/management/group/view.py @@ -485,7 +485,7 @@ def raise_error_if_service_accounts_not_present_in_it_service( if len(invalid_service_accounts) > 0: raise ServiceAccountNotFoundError(f"Service account(s) {invalid_service_accounts} not found.") - def add_service_accounts_to_group( + def add_service_accounts( self, group: Group, service_accounts: Iterable[dict], @@ -705,7 +705,7 @@ def principals(self, request: Request, uuid: Optional[UUID] = None): with transaction.atomic(): new_service_accounts = [] if len(service_accounts) > 0: - group, new_service_accounts = self.add_service_accounts_to_group( + group, new_service_accounts = self.add_service_accounts( group=group, service_accounts=service_accounts, org_id=org_id, From df538408964aed7e6018e0b09adca96547e2dea0 Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Sat, 5 Oct 2024 15:07:04 -0400 Subject: [PATCH 39/55] Update dual write test asserts for new ID format --- tests/management/role/test_dual_write.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/tests/management/role/test_dual_write.py b/tests/management/role/test_dual_write.py index abd066771..78d293ca4 100644 --- a/tests/management/role/test_dual_write.py +++ b/tests/management/role/test_dual_write.py @@ -303,7 +303,7 @@ def test_create_group_tuples(self): group, principals = self.given_group("g1", ["u1", "u2"]) tuples = self.tuples.find_tuples(all_of(resource("rbac", "group", group.uuid), relation("member"))) self.assertEquals(len(tuples), 2) - self.assertEquals({t.subject_id for t in tuples}, {str(p.uuid) for p in principals}) + self.assertEquals({t.subject_id for t in tuples}, {f"localhost:{p.user_id}" for p in principals}) def test_update_group_tuples(self): """Update a group by adding and removing users.""" @@ -313,14 +313,14 @@ def test_update_group_tuples(self): tuples = self.tuples.find_tuples(all_of(resource("rbac", "group", group.uuid), relation("member"))) self.assertEquals(len(tuples), 3) - self.assertEquals({t.subject_id for t in tuples}, {str(p.uuid) for p in principals}) + self.assertEquals({t.subject_id for t in tuples}, {f"localhost:{p.user_id}" for p in principals}) self.given_removed_group_members(group, ["u2"]) principals = [p for p in principals if p.username != "u2"] tuples = self.tuples.find_tuples(all_of(resource("rbac", "group", group.uuid), relation("member"))) self.assertEquals(len(tuples), 2) - self.assertEquals({t.subject_id for t in tuples}, {str(p.uuid) for p in principals}) + self.assertEquals({t.subject_id for t in tuples}, {f"localhost:{p.user_id}" for p in principals}) class DualWriteGroupRolesTestCase(DualWriteTestCase): @@ -760,10 +760,18 @@ def add_members_to_group( ) -> list[Principal]: """Add members to the group.""" principals = [ - *[Principal.objects.get_or_create(username=username, tenant=principal_tenant)[0] for username in users], *[ - Principal.objects.get_or_create(username=username, tenant=principal_tenant, type="service-account")[0] - for username in service_accounts + Principal.objects.get_or_create(username=user_id, tenant=principal_tenant, user_id=user_id)[0] + for user_id in users + ], + *[ + Principal.objects.get_or_create( + username="service-account-" + user_id, + tenant=principal_tenant, + type="service-account", + user_id=user_id, + )[0] + for user_id in service_accounts ], ] From 9fda7c64217f93da799f53a57259ed1496c1737a Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Sat, 5 Oct 2024 15:20:50 -0400 Subject: [PATCH 40/55] Revert getting user_id in get_principal since principals not always verified --- rbac/management/utils.py | 44 +++++++++++++--------------------- tests/management/test_utils.py | 37 +++++++++++++++++++++++++++- 2 files changed, 52 insertions(+), 29 deletions(-) diff --git a/rbac/management/utils.py b/rbac/management/utils.py index 293237e40..148a1bac1 100644 --- a/rbac/management/utils.py +++ b/rbac/management/utils.py @@ -94,37 +94,27 @@ def get_principal( tenant: Tenant = request.tenant is_username_service_account = ITService.is_username_service_account(username) - if is_username_service_account: - try: - principal = Principal.objects.get(username__iexact=username, tenant=tenant) - except Principal.DoesNotExist: + try: + # If the username was provided through a query we must verify if it exists in the corresponding services first. + if from_query and not is_username_service_account: + verify_principal_with_proxy(username=username, request=request, verify_principal=verify_principal) + + principal = Principal.objects.get(username__iexact=username, tenant=tenant) + except Principal.DoesNotExist: + # If the "from query" parameter was specified, the username was validated above, so there is no need to + # validate it again. + if not from_query and not is_username_service_account: + verify_principal_with_proxy(username=username, request=request, verify_principal=verify_principal) + + if is_username_service_account: client_id: uuid.UUID = ITService.extract_client_id_service_account_username(username) principal, _ = Principal.objects.get_or_create( username=username, tenant=tenant, type=SERVICE_ACCOUNT_KEY, service_account_id=client_id ) - else: - # If the username was provided through a query we must verify if it exists - # in the corresponding services first. - if from_query: - resp = verify_principal_with_proxy(username=username, request=request, verify_principal=verify_principal) - user_id = resp.get("data")[0].get("user_id") - - try: - principal = Principal.objects.get(username__iexact=username, tenant=tenant) - except Principal.DoesNotExist: - # If the "from query" parameter was specified, the username was validated above, - # so there is no need to validate it again. - if not from_query: - resp = verify_principal_with_proxy( - username=username, request=request, verify_principal=verify_principal - ) - user_id = resp.get("data")[0].get("user_id") - + else: # Avoid possible race condition if the user was created while checking BOP - principal, _ = Principal.objects.get_or_create( - username=username, tenant=tenant, defaults={"user_id": user_id} - ) + principal, _ = Principal.objects.get_or_create(username=username, tenant=tenant) return principal @@ -134,9 +124,7 @@ def verify_principal_with_proxy(username, request, verify_principal=True): org_id = request.user.org_id proxy = PrincipalProxy() if verify_principal: - resp = proxy.request_filtered_principals( - [username], org_id=org_id, options={"return_id": True, **(request.query_params)} - ) + resp = proxy.request_filtered_principals([username], org_id=org_id, options=request.query_params) if isinstance(resp, dict) and "errors" in resp: raise Exception("Dependency error: request to get users from dependent service failed.") diff --git a/tests/management/test_utils.py b/tests/management/test_utils.py index 50768f8ba..ba4e43ca0 100644 --- a/tests/management/test_utils.py +++ b/tests/management/test_utils.py @@ -21,13 +21,13 @@ from management.models import Access, Group, Permission, Principal, Policy, Role from management.utils import ( access_for_principal, + get_principal_from_request, groups_for_principal, policies_for_principal, roles_for_principal, account_id_for_tenant, get_principal, ) -from rest_framework.exceptions import ValidationError from tests.identity_request import IdentityRequest from unittest import mock @@ -229,3 +229,38 @@ def test_get_principal_service_account_created(self): self.assertEqual(created_service_account.service_account_id, str(client_id)) self.assertEqual(created_service_account.type, "service-account") self.assertEqual(created_service_account.username, service_account_username) + + @mock.patch( + "management.principal.proxy.PrincipalProxy.request_filtered_principals", + return_value={ + "status_code": 200, + "data": [ + { + "org_id": "100001", + "is_org_admin": False, + "is_internal": False, + "id": 52567473, + "username": "user_a", + "account_number": "1111111", + "is_active": True, + } + ], + }, + ) + def test_get_principal_from_request_created(self, mock_request_principals): + """Test that when a principal does not exist in the database, it gets created.""" + username = "abcde" + + request = mock.Mock() + request.tenant = self.tenant + request.user = User() + request.user.username = username + request.query_params = {} + + # Attempt to fetch the principal from the database. Since it does not exist, it should create one. + get_principal_from_request(request=request) + + # Assert that the principal was properly created in the database. + created_principal = Principal.objects.get(username=username) + self.assertEqual(created_principal.type, "user") + self.assertEqual(created_principal.username, username) From d45b83475a2e204ec4e86254979f25809d59d2ea Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Sat, 5 Oct 2024 15:49:15 -0400 Subject: [PATCH 41/55] Account for lazy ID load and also for service accounts Assumes eventual API change from IT API --- rbac/management/group/view.py | 52 +++++++++++++++-------------- tests/management/group/test_view.py | 21 +++++++++--- 2 files changed, 43 insertions(+), 30 deletions(-) diff --git a/rbac/management/group/view.py b/rbac/management/group/view.py index aebbe1726..0fe1fedce 100644 --- a/rbac/management/group/view.py +++ b/rbac/management/group/view.py @@ -438,17 +438,11 @@ def add_principals(self, group, principals_from_response, org_id=None): username = item["username"] try: principal = Principal.objects.get(username__iexact=username, tenant=tenant) - if principal.user_id is None: - user_id = item.get("user_id") - # This should never happen, but just in case, we have the data here, so we can fix it. - logger.warning( - "User principal %s found without user_id. Setting user_id to %s.", - principal.username, - user_id, - ) - if user_id is not None: - principal.user_id = user_id - principal.save() + if principal.user_id is None and "user_id" in item: + # Some lazily created Principals may not have user_id. + user_id = item["user_id"] + principal.user_id = user_id + principal.save() except Principal.DoesNotExist: principal = Principal.objects.create(username=username, tenant=tenant, user_id=item["user_id"]) logger.info("Created new principal %s for org_id %s.", username, org_id) @@ -457,12 +451,12 @@ def add_principals(self, group, principals_from_response, org_id=None): group_principal_change_notification_handler(self.request.user, group, username, "added") return group, new_principals - def raise_error_if_service_accounts_not_present_in_it_service( + def ensure_id_for_service_accounts_exists( self, user: User, service_accounts: Iterable[dict], ): - """Validate service account in it service.""" + """Validate service account in it service and populate user IDs if needed.""" # Fetch all the user's service accounts from IT. If we are on a development or testing environment, we might # want to skip calling IT it_service = ITService() @@ -475,11 +469,15 @@ def raise_error_if_service_accounts_not_present_in_it_service( it_service_accounts_by_client_ids[it_sa["clientId"]] = it_sa # Make sure that the service accounts the user specified are visible by them. - it_sa_client_ids = it_service_accounts_by_client_ids.keys() invalid_service_accounts: set = set() for specified_sa in service_accounts: - if specified_sa["clientId"] not in it_sa_client_ids: - invalid_service_accounts.add(specified_sa["clientId"]) + client_id = specified_sa["clientId"] + it_sa = it_service_accounts_by_client_ids.get(client_id) + if it_sa is None: + invalid_service_accounts.add(client_id) + elif "userId" in it_sa: + # Service may not be returning userId's yet. + specified_sa["userId"] = it_sa["userId"] # If we have any invalid service accounts, notify the user. if len(invalid_service_accounts) > 0: @@ -499,14 +497,20 @@ def add_service_accounts( # it. for specified_sa in service_accounts: client_id = specified_sa["clientId"] + user_id = specified_sa.get("userId") try: principal = Principal.objects.get( username__iexact=SERVICE_ACCOUNT_USERNAME_FORMAT.format(clientId=client_id), tenant=tenant, ) + if principal.user_id is None and user_id is not None: + # May happen in case principal is lazily created without user ID. + principal.user_id = user_id + principal.save() except Principal.DoesNotExist: principal = Principal.objects.create( username=SERVICE_ACCOUNT_USERNAME_FORMAT.format(clientId=client_id), + user_id=user_id, service_account_id=client_id, type=Principal.Types.SERVICE_ACCOUNT, tenant=tenant, @@ -673,9 +677,7 @@ def principals(self, request: Request, uuid: Optional[UUID] = None): additional_scopes_to_validate=set[ScopeClaims]([ScopeClaims.SERVICE_ACCOUNTS_CLAIM]), ) try: - self.raise_error_if_service_accounts_not_present_in_it_service( - user=request.user, service_accounts=service_accounts - ) + self.ensure_id_for_service_accounts_exists(user=request.user, service_accounts=service_accounts) except InsufficientPrivilegesError as ipe: return Response( status=status.HTTP_403_FORBIDDEN, @@ -710,14 +712,14 @@ def principals(self, request: Request, uuid: Optional[UUID] = None): service_accounts=service_accounts, org_id=org_id, ) - new_principals = [] + new_users = [] if len(principals) > 0: - group, new_principals = self.add_principals(group, principals_from_response, org_id=org_id) + group, new_users = self.add_principals(group, principals_from_response, org_id=org_id) dual_write_handler = RelationApiDualWriteGroupHandler( group, ReplicationEventType.ADD_PRINCIPALS_TO_GROUP ) - dual_write_handler.replicate_new_principals(new_principals + new_service_accounts) + dual_write_handler.replicate_new_principals(new_users + new_service_accounts) # Serialize the group... output = GroupSerializer(group) @@ -918,12 +920,12 @@ def principals(self, request: Request, uuid: Optional[UUID] = None): # removal generates. response = Response(status=status.HTTP_204_NO_CONTENT) - principals_to_remove = [] + users_to_remove = [] # Remove the users from the group too. if USERNAMES_KEY in request.query_params: username = request.query_params.get(USERNAMES_KEY, "") principals = [name.strip() for name in username.split(",")] - resp, principals_to_remove = self.remove_principals(group, principals, org_id=org_id) + resp, users_to_remove = self.remove_principals(group, principals, org_id=org_id) if isinstance(resp, dict) and "errors" in resp: return Response(status=resp.get("status_code"), data={"errors": resp.get("errors")}) response = Response(status=status.HTTP_204_NO_CONTENT) @@ -932,7 +934,7 @@ def principals(self, request: Request, uuid: Optional[UUID] = None): group, ReplicationEventType.REMOVE_PRINCIPALS_FROM_GROUP, ) - dual_write_handler.replicate_removed_principals(principals_to_remove + service_accounts_to_remove) + dual_write_handler.replicate_removed_principals(users_to_remove + service_accounts_to_remove) return response diff --git a/tests/management/group/test_view.py b/tests/management/group/test_view.py index 7ed59262f..a22340727 100644 --- a/tests/management/group/test_view.py +++ b/tests/management/group/test_view.py @@ -75,11 +75,17 @@ def replication_event(relations_to_add, relations_to_remove): def generate_replication_event_to_add_principals(group_uuid, principal_user_id): - return {"relations_to_add": [generate_group_member_relation_entry(group_uuid, principal_user_id)], "relations_to_remove": []} + return { + "relations_to_add": [generate_group_member_relation_entry(group_uuid, principal_user_id)], + "relations_to_remove": [], + } def generate_replication_event_to_remove_principals(group_uuid, principal_uuid): - return {"relations_to_add": [], "relations_to_remove": [generate_group_member_relation_entry(group_uuid, principal_uuid)]} + return { + "relations_to_add": [], + "relations_to_remove": [generate_group_member_relation_entry(group_uuid, principal_uuid)], + } def find_relation_in_list(relation_list, relation_tuple): @@ -92,6 +98,7 @@ def find_relation_in_list(relation_list, relation_tuple): and r["subject"]["subject"]["id"] == relation_tuple["subject"]["subject"]["id"], ) + class GroupViewsetTests(IdentityRequest): """Test the group viewset.""" @@ -4065,6 +4072,7 @@ def test_add_service_account_principal_in_group_without_User_Access_Admin_fail(s mocked_values = [ { "clientId": sa_uuid, + "userId": "2345", "name": f"Service Account name", "description": f"Service Account description", "owner": "jsmith", @@ -4094,7 +4102,7 @@ def test_add_service_account_principal_in_group_without_User_Access_Admin_fail(s actual_call_arg = mock_method.call_args[0][0] self.assertEqual( - generate_replication_event_to_add_principals(str(test_group.uuid), str(sa_principal.uuid)), actual_call_arg + generate_replication_event_to_add_principals(str(test_group.uuid), "redhat.com:2345"), actual_call_arg ) @patch("management.role.relation_api_dual_write_handler.OutboxReplicator._save_replication_event") @@ -4167,6 +4175,7 @@ def test_add_service_account_principal_in_group_with_User_Access_Admin_success(s mocked_values = [ { "clientId": sa_uuid, + "userId": "1234", "name": f"Service Account name", "description": f"Service Account description", "owner": "jsmith", @@ -4187,7 +4196,7 @@ def test_add_service_account_principal_in_group_with_User_Access_Admin_success(s actual_call_arg = mock_method.call_args[0][0] self.assertEqual( - generate_replication_event_to_add_principals(str(test_group.uuid), str(sa_principal.uuid)), + generate_replication_event_to_add_principals(str(test_group.uuid), "redhat.com:1234"), actual_call_arg, ) @@ -4417,6 +4426,8 @@ def test_remove_service_account_principal_from_group_with_User_Access_Admin_succ service_account_data = self._create_service_account_data() sa_principal = Principal( username=service_account_data["username"], + # Any Principal already added to group is expected to have user_id set + user_id="3456", tenant=self.tenant, type="service-account", service_account_id=service_account_data["client_id"], @@ -4436,7 +4447,7 @@ def test_remove_service_account_principal_from_group_with_User_Access_Admin_succ actual_call_arg = mock_method.call_args[0][0] self.assertEqual( - generate_replication_event_to_remove_principals(str(test_group.uuid), str(sa_principal.uuid)), + generate_replication_event_to_remove_principals(str(test_group.uuid), "redhat.com:3456"), actual_call_arg, ) From 07f27a8d367a6e06d10112691f149bdad76746bc Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Mon, 7 Oct 2024 14:02:56 -0400 Subject: [PATCH 42/55] Only update the mapping if we're not going to delete it --- .../management/group/relation_api_dual_write_group_handler.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/rbac/management/group/relation_api_dual_write_group_handler.py b/rbac/management/group/relation_api_dual_write_group_handler.py index 8be954a10..c049b1032 100644 --- a/rbac/management/group/relation_api_dual_write_group_handler.py +++ b/rbac/management/group/relation_api_dual_write_group_handler.py @@ -196,12 +196,14 @@ def _update_mapping_for_role( ) .get() ) + update_mapping(mapping) - mapping.save(force_update=True) if mapping.is_unassigned(): self.group_relations_to_remove.extend(mapping.as_tuples()) mapping.delete() + else: + mapping.save(force_update=True) except BindingMapping.DoesNotExist: mapping = create_default_mapping_for_system_role() if mapping is not None: From 0204e6a9c671104a77fc20230872bd00367ecf4d Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Mon, 7 Oct 2024 14:08:17 -0400 Subject: [PATCH 43/55] Fix lint issue --- rbac/management/group/relation_api_dual_write_group_handler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rbac/management/group/relation_api_dual_write_group_handler.py b/rbac/management/group/relation_api_dual_write_group_handler.py index c049b1032..b8d541fe2 100644 --- a/rbac/management/group/relation_api_dual_write_group_handler.py +++ b/rbac/management/group/relation_api_dual_write_group_handler.py @@ -196,7 +196,7 @@ def _update_mapping_for_role( ) .get() ) - + update_mapping(mapping) if mapping.is_unassigned(): From 794bde8fb01ec1ef67d91c169c3180555e0883e6 Mon Sep 17 00:00:00 2001 From: Keith Walsh Date: Mon, 7 Oct 2024 14:35:43 -0400 Subject: [PATCH 44/55] Update group dual-writes to use default workspace for system roles --- .../group/relation_api_dual_write_group_handler.py | 9 +++++---- tests/management/group/test_view.py | 8 +++++++- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/rbac/management/group/relation_api_dual_write_group_handler.py b/rbac/management/group/relation_api_dual_write_group_handler.py index d8c5ea997..d79db605b 100644 --- a/rbac/management/group/relation_api_dual_write_group_handler.py +++ b/rbac/management/group/relation_api_dual_write_group_handler.py @@ -21,6 +21,7 @@ from uuid import uuid4 from django.conf import settings +from management.models import Workspace from management.principal.model import Principal from management.role.model import BindingMapping, Role from management.role.relation_api_dual_write_handler import ( @@ -53,6 +54,8 @@ def __init__( self.group_relations_to_remove = [] self.principals = [] self.group = group + self.tenant = group.tenant + self.default_workspace = Workspace.objects.get(tenant=self.tenant, type=Workspace.Types.DEFAULT) self.event_type = event_type self._replicator = replicator if replicator else OutboxReplicator(group) except Exception as e: @@ -127,8 +130,7 @@ def create_default_mapping(): str(uuid4()), # Assumes same role UUID for V2 system role equivalent. V2role.for_system_role(str(role.uuid)), - # TODO: don't use org id once we have workspace built ins - V2boundresource(("rbac", "workspace"), self.group.tenant.org_id), + V2boundresource(("rbac", "workspace"), str(self.default_workspace.uuid)), groups=frozenset([str(self.group.uuid)]), ) mapping = BindingMapping.for_role_binding(binding, role) @@ -186,8 +188,7 @@ def _update_mapping_for_role( role=role, resource_type_namespace="rbac", resource_type_name="workspace", - # TODO: don't use org id once we have workspace built ins - resource_id=self.group.tenant.org_id, + resource_id=str(self.default_workspace.uuid), ) .get() ) diff --git a/tests/management/group/test_view.py b/tests/management/group/test_view.py index b5543297f..f25949890 100644 --- a/tests/management/group/test_view.py +++ b/tests/management/group/test_view.py @@ -2858,10 +2858,16 @@ def setUp(self): "Non org admin users are not allowed to add RBAC role with higher than 'read' permission into groups." ) + self.root_workspace = Workspace.objects.create( + type=Workspace.Types.ROOT, + name="Root", + tenant=self.tenant, + ) self.default_workspace = Workspace.objects.create( type=Workspace.Types.DEFAULT, name="Default", tenant=self.tenant, + parent=self.root_workspace, ) def tearDown(self): @@ -3700,7 +3706,7 @@ def test_add_and_remove_system_role_to_group(self, mock_method): response = client.post(url, request_body, format="json", **self.headers_org_admin) binding_mapping = BindingMapping.objects.filter( - role=user_access_admin_role, resource_id=user_access_admin_role.tenant.org_id + role=user_access_admin_role, resource_id=str(self.default_workspace.uuid) ).get() actual_call_arg = mock_method.call_args[0][0] From 4ed62307875f418092fb6f93f2c08ab703e42855 Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Mon, 7 Oct 2024 15:15:20 -0400 Subject: [PATCH 45/55] Add some additional tests --- tests/management/role/test_dual_write.py | 135 +++++++++++++++++++++-- 1 file changed, 128 insertions(+), 7 deletions(-) diff --git a/tests/management/role/test_dual_write.py b/tests/management/role/test_dual_write.py index abd066771..0e74bcf7a 100644 --- a/tests/management/role/test_dual_write.py +++ b/tests/management/role/test_dual_write.py @@ -205,6 +205,17 @@ def given_roles_unassigned_from_group(self, group: Group, roles: list[Role]) -> dual_write_handler.replicate_removed_role(role) return policy + def given_group_removed(self, group: Group): + """Remove the given group.""" + dual_write_handler = RelationApiDualWriteGroupHandler( + group, + ReplicationEventType.DELETE_GROUP, + replicator=InMemoryRelationReplicator(self.tuples), + ) + dual_write_handler.prepare_to_delete_group() + group.delete() + dual_write_handler.replicate_deleted_group() + def expect_1_v2_role_with_permissions(self, permissions: list[str]) -> str: """Assert there is a role matching the given permissions and return its ID.""" return self.expect_v2_roles_with_permissions(1, permissions)[0] @@ -295,8 +306,8 @@ def expect_role_bindings_to_workspace( ) -class DualWriteGroupMembershipTestCase(DualWriteTestCase): - """Test dual write logic for group membership.""" +class DualWriteGroupTestCase(DualWriteTestCase): + """Test dual write logic for group modifications.""" def test_create_group_tuples(self): """Create a group and add users to it.""" @@ -322,10 +333,6 @@ def test_update_group_tuples(self): self.assertEquals(len(tuples), 2) self.assertEquals({t.subject_id for t in tuples}, {str(p.uuid) for p in principals}) - -class DualWriteGroupRolesTestCase(DualWriteTestCase): - """Test case for verifying the dual write functionality for group role assignments.""" - def test_custom_roles_group_assignments_tuples(self): role_1 = self.given_v1_role( "r1", @@ -381,6 +388,120 @@ def test_custom_roles_group_assignments_tuples(self): self.assertEquals(len(tuples), 0) + def test_delete_group_removes_group_from_role_bindings(self): + # Add two groups to two roles + role_1 = self.given_v1_role( + "r1", + default=["app1:hosts:read", "inventory:hosts:write"], + ws_2=["app1:hosts:read", "inventory:hosts:write"], + ) + + role_2 = self.given_v1_role( + "r2", + default=["app2:hosts:read", "inventory:systems:write"], + ws_2=["app2:hosts:read", "inventory:systems:write"], + ) + + group_1, _ = self.given_group("g1", ["u1"]) + group_2, _ = self.given_group("g2", ["u2"]) + + self.given_roles_assigned_to_group(group_1, roles=[role_1, role_2]) + self.given_roles_assigned_to_group(group_2, roles=[role_1, role_2]) + + # Delete the first group + self.given_group_removed(group_1) + + # Assert that the group is removed from the role bindings by querying the role binding subject tuples + tuples = self.tuples.find_tuples( + all_of( + resource_type("rbac", "role_binding"), + relation("subject"), + subject_type("rbac", "group", "member"), + ) + ) + + self.assertEquals({t.subject_id for t in tuples}, {str(group_2.uuid)}) + # 2 resources * 2 roles * 1 group = 4 role bindings + self.assertEquals(len(tuples), 4) + + def test_delete_group_removes_principals(self): + group, _ = self.given_group("g1", ["u1", "u2"]) + + self.given_group_removed(group) + + tuples = self.tuples.find_tuples(all_of(resource("rbac", "group", group.uuid))) + self.assertEquals(len(tuples), 0) + + def test_delete_group_removes_role_binding_for_system_roles_if_last_group(self): + role_1 = self.given_v1_role( + "r1", + default=["app1:hosts:read", "inventory:hosts:write"], + ws_2=["app1:hosts:read", "inventory:hosts:write"], + ) + + # Given system role + role_2 = self.given_v1_system_role("r2", ["app2:hosts:read", "inventory:systems:write"]) + + group_1, _ = self.given_group("g1", ["u1"]) + self.given_roles_assigned_to_group(group_1, roles=[role_1, role_2]) + + # Now remove the group + self.given_group_removed(group_1) + + # Assert no role binding tuples exist for the system role + tuples = self.tuples.find_tuples( + all_of( + resource_type("rbac", "role_binding"), + relation("role"), + subject("rbac", "role", str(role_2.uuid)), + ) + ) + + self.assertEquals(len(tuples), 0) + + # But the custom role remains + tuples = self.tuples.find_tuples( + all_of( + resource_type("rbac", "role_binding"), + relation("role"), + subject_type("rbac", "role"), + ) + ) + + # 2 resources * 1 role + self.assertEquals(len(tuples), 2) + + def test_delete_group_keeps_role_binding_for_system_roles_if_not_last_group(self): + """Keep the role binding if it still has other groups assigned to it.""" + role_1 = self.given_v1_role( + "r1", + default=["app1:hosts:read", "inventory:hosts:write"], + ws_2=["app1:hosts:read", "inventory:hosts:write"], + ) + + # Given system role + role_2 = self.given_v1_system_role("r2", ["app2:hosts:read", "inventory:systems:write"]) + + group_1, _ = self.given_group("g1", ["u1"]) + group_2, _ = self.given_group("g2", ["u2"]) + + self.given_roles_assigned_to_group(group_1, roles=[role_1, role_2]) + self.given_roles_assigned_to_group(group_2, roles=[role_1, role_2]) + + # Delete the first group + self.given_group_removed(group_1) + + # Check the system role binding remains + tuples = self.tuples.find_tuples( + all_of( + resource_type("rbac", "role_binding"), + relation("role"), + subject("rbac", "role", str(role_2.uuid)), + ) + ) + + self.assertEquals(len(tuples), 1) + class DualWriteSystemRolesTestCase(DualWriteTestCase): """Test dual write logic for system roles.""" @@ -669,7 +790,7 @@ class RbacFixture: def __init__(self): """Initialize the RBAC fixture.""" - self.public_tenant = Tenant.objects.create(tenant_name="public") + self.public_tenant, _ = Tenant.objects.get_or_create(tenant_name="public") def new_tenant(self, name: str, org_id: str) -> Tenant: """Create a new tenant with the given name and organization ID.""" From 6a9581338a84df2c3d0f9cc4b730ad6b4370bfce Mon Sep 17 00:00:00 2001 From: Keith Walsh Date: Tue, 8 Oct 2024 08:15:36 -0400 Subject: [PATCH 46/55] Resolve failing tests needing built-in workspaces --- tests/management/group/test_definer.py | 17 ++++++++++++++++- tests/management/group/test_view.py | 21 +++++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/tests/management/group/test_definer.py b/tests/management/group/test_definer.py index 4f03b0c2d..51a105537 100644 --- a/tests/management/group/test_definer.py +++ b/tests/management/group/test_definer.py @@ -23,7 +23,7 @@ from management.role.definer import seed_roles from tests.identity_request import IdentityRequest from tests.core.test_kafka import copy_call_args -from management.models import Group, Role +from management.models import Group, Role, Workspace class GroupDefinerTests(IdentityRequest): @@ -33,9 +33,24 @@ def setUp(self): """Set up the group definer tests.""" super().setUp() self.public_tenant = Tenant.objects.get(tenant_name="public") + self.root_workspace = Workspace.objects.create( + type=Workspace.Types.ROOT, + name="Root", + tenant=self.public_tenant, + ) + self.default_workspace = Workspace.objects.create( + type=Workspace.Types.DEFAULT, + name="Default", + tenant=self.public_tenant, + parent=self.root_workspace, + ) seed_roles() seed_group() + def tearDown(self): + Workspace.objects.filter(parent__isnull=False).delete() + Workspace.objects.filter(parent__isnull=True).delete() + def test_default_group_seeding_properly(self): """Test that default group are seeded properly.""" group = Group.objects.get(platform_default=True) diff --git a/tests/management/group/test_view.py b/tests/management/group/test_view.py index f25949890..5a822ed8f 100644 --- a/tests/management/group/test_view.py +++ b/tests/management/group/test_view.py @@ -42,6 +42,7 @@ ExtTenant, Workspace, ) +from migration_tool.migrate import migrate_workspace from tests.core.test_kafka import copy_call_args from tests.identity_request import IdentityRequest from tests.management.role.test_view import find_in_list, relation_api_tuple @@ -210,12 +211,26 @@ def setUp(self): self.group.principals.add(*self.service_accounts) self.group.save() + self.root_workspace = Workspace.objects.create( + type=Workspace.Types.ROOT, + name="Root", + tenant=self.tenant, + ) + self.default_workspace = Workspace.objects.create( + type=Workspace.Types.DEFAULT, + name="Default", + tenant=self.tenant, + parent=self.root_workspace, + ) + def tearDown(self): """Tear down group viewset tests.""" Group.objects.all().delete() Principal.objects.all().delete() Role.objects.all().delete() Policy.objects.all().delete() + Workspace.objects.filter(parent__isnull=False).delete() + Workspace.objects.filter(parent__isnull=True).delete() @patch( "management.principal.proxy.PrincipalProxy.request_filtered_principals", @@ -2878,6 +2893,8 @@ def tearDown(self): Access.objects.all().delete() Role.objects.all().delete() Policy.objects.all().delete() + Workspace.objects.filter(parent__isnull=False).delete() + Workspace.objects.filter(parent__isnull=True).delete() @staticmethod def _create_group_with_user_access_administrator_role(tenant: Tenant) -> Group: @@ -2997,6 +3014,7 @@ def test_group_service_account_with_user_administrator_role_add_principals( user_access_admin_tenant.ready = True user_access_admin_tenant.tenant_name = "new-tenant" user_access_admin_tenant.save() + migrate_workspace(user_access_admin_tenant, write_relationships=False) user_access_admin_group = self._create_group_with_user_access_administrator_role( tenant=user_access_admin_tenant @@ -3153,6 +3171,7 @@ def test_group_service_account_with_user_administrator_role_remove_principals(se user_access_admin_tenant.ready = True user_access_admin_tenant.tenant_name = "new-tenant" user_access_admin_tenant.save() + migrate_workspace(user_access_admin_tenant, write_relationships=False) user_access_admin_group = self._create_group_with_user_access_administrator_role( tenant=user_access_admin_tenant @@ -3291,6 +3310,7 @@ def test_group_user_with_user_administrator_role_add_principals(self, request_fi user_access_admin_tenant.ready = True user_access_admin_tenant.tenant_name = "new-tenant" user_access_admin_tenant.save() + migrate_workspace(user_access_admin_tenant, write_relationships=False) user_access_admin_group = self._create_group_with_user_access_administrator_role( tenant=user_access_admin_tenant @@ -3443,6 +3463,7 @@ def test_group_user_with_user_administrator_role_remove_principals(self): user_access_admin_tenant.ready = True user_access_admin_tenant.tenant_name = "new-tenant" user_access_admin_tenant.save() + migrate_workspace(user_access_admin_tenant, write_relationships=False) user_access_admin_group = self._create_group_with_user_access_administrator_role( tenant=user_access_admin_tenant From d97b639117e6975e96c5e31217340c427a490f29 Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Tue, 8 Oct 2024 13:57:51 -0400 Subject: [PATCH 47/55] Fix & unskip tests post merge --- .../relation_api_dual_write_group_handler.py | 2 +- tests/management/group/test_view.py | 16 +++++++++------- tests/management/role/test_dual_write.py | 1 - 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/rbac/management/group/relation_api_dual_write_group_handler.py b/rbac/management/group/relation_api_dual_write_group_handler.py index 32ef8aed0..aa4aef0a9 100644 --- a/rbac/management/group/relation_api_dual_write_group_handler.py +++ b/rbac/management/group/relation_api_dual_write_group_handler.py @@ -261,7 +261,7 @@ def prepare_to_delete_group(self): pass # TODO: create default bindings, else: self.principals = self.group.principals.all() - self.group_relations_to_remove.extend(self._generate_relations()) + self.group_relations_to_remove.extend(self._generate_member_relations()) def replicate_deleted_group(self): """Prepare for delete.""" diff --git a/tests/management/group/test_view.py b/tests/management/group/test_view.py index 767a391d6..87359bf55 100644 --- a/tests/management/group/test_view.py +++ b/tests/management/group/test_view.py @@ -143,9 +143,9 @@ def setUp(self): self.test_headers = test_request.META self.public_tenant = Tenant.objects.get(tenant_name="public") - self.principal = Principal(username=self.user_data["username"], tenant=self.tenant) + self.principal = Principal(username=self.user_data["username"], tenant=self.tenant, user_id="1") self.principal.save() - self.principalB = Principal(username="mock_user", tenant=self.tenant) + self.principalB = Principal(username="mock_user", tenant=self.tenant, user_id="2") self.principalB.save() self.principalC = Principal(username="user_not_attached_to_group_explicitly", tenant=self.tenant) self.principalC.save() @@ -210,6 +210,7 @@ def setUp(self): tenant=self.tenant, type="service-account", service_account_id=uuid, + user_id=f"sa_sub_{uuid}", ) self.service_accounts.append(principal) principal.save() @@ -861,13 +862,14 @@ def test_delete_group_success(self, send_kafka_message, mock_method): response = client.post(url, request_body, format="json", **self.headers) self.assertEqual(response.status_code, status.HTTP_200_OK) + default_workspace_uuid = str(self.default_workspace.uuid) role_binding_id = ( - BindingMapping.objects.filter(role=self.role, resource_id=self.role.tenant.org_id).get().mappings["id"] + BindingMapping.objects.filter(role=self.role, resource_id=default_workspace_uuid).get().mappings["id"] ) url = reverse("group-detail", kwargs={"uuid": self.group.uuid}) client = APIClient() - principals_uuids = self.group.principals.values_list("uuid", flat=True) + principals_user_ids = self.group.principals.values_list("user_id", flat=True) group_uuid = self.group.uuid response = client.delete(url, **self.headers) @@ -877,13 +879,13 @@ def test_delete_group_success(self, send_kafka_message, mock_method): self.assertEqual(8, len(to_remove)) def assert_group_tuples(tuples_to_replicate): - for principal_uuid in principals_uuids: + for user_id in principals_user_ids: relation_tuple = relation_api_tuple( "group", group_uuid, "member", "principal", - str(principal_uuid), + f"redhat.com:{user_id}", ) self.assertIsNotNone(find_relation_in_list(tuples_to_replicate, relation_tuple)) @@ -909,7 +911,7 @@ def assert_group_tuples(tuples_to_replicate): relation_tuple = relation_api_tuple( "workspace", - self.group.tenant.org_id, + default_workspace_uuid, "binding", "role_binding", str(role_binding_id), diff --git a/tests/management/role/test_dual_write.py b/tests/management/role/test_dual_write.py index 58aa26078..74e1ab93a 100644 --- a/tests/management/role/test_dual_write.py +++ b/tests/management/role/test_dual_write.py @@ -506,7 +506,6 @@ def test_delete_group_keeps_role_binding_for_system_roles_if_not_last_group(self self.assertEquals(len(tuples), 1) -@unittest.skip("deferring until RHCLOUD-35357 / RHCLOUD-35303 / RHCLOUD-34511") class DualWriteSystemRolesTestCase(DualWriteTestCase): """Test dual write logic for system roles.""" From 2fbc193443cda3fe5c51e6f151e58e67c0a85f16 Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Fri, 11 Oct 2024 14:33:05 +0200 Subject: [PATCH 48/55] Add guard condition in prepare_to_delete_group in group dual write handler --- rbac/management/group/relation_api_dual_write_group_handler.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/rbac/management/group/relation_api_dual_write_group_handler.py b/rbac/management/group/relation_api_dual_write_group_handler.py index aa4aef0a9..469b166be 100644 --- a/rbac/management/group/relation_api_dual_write_group_handler.py +++ b/rbac/management/group/relation_api_dual_write_group_handler.py @@ -240,6 +240,8 @@ def _update_mapping_for_role( def prepare_to_delete_group(self): """Generate relations to delete.""" + if not self.replication_enabled(): + return roles = Role.objects.filter(policies__group=self.group) system_roles = roles.filter(tenant=Tenant.objects.get(tenant_name="public")) From 2543055b2ffb49570dcb0cc002312851a7b177be Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Fri, 11 Oct 2024 14:34:01 +0200 Subject: [PATCH 49/55] Remove unecessary print and change type->event in replicatin event --- .../group/relation_api_dual_write_group_handler.py | 2 +- rbac/management/role/relation_api_dual_write_handler.py | 6 +++--- tests/management/group/test_view.py | 1 - 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/rbac/management/group/relation_api_dual_write_group_handler.py b/rbac/management/group/relation_api_dual_write_group_handler.py index 469b166be..fb53d7a13 100644 --- a/rbac/management/group/relation_api_dual_write_group_handler.py +++ b/rbac/management/group/relation_api_dual_write_group_handler.py @@ -111,7 +111,7 @@ def _replicate(self): try: self._replicator.replicate( ReplicationEvent( - type=self.event_type, + event_type=self.event_type, # TODO: need to think about partitioning # Maybe resource id partition_key="rbactodo", diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py index 1a42caeb2..674fadbfd 100644 --- a/rbac/management/role/relation_api_dual_write_handler.py +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -71,14 +71,14 @@ class ReplicationEvent: def __init__( self, - type: ReplicationEventType, + event_type: ReplicationEventType, partition_key: str, add: list[common_pb2.Relationship] = [], remove: list[common_pb2.Relationship] = [], ): """Initialize ReplicationEvent.""" self.partition_key = partition_key - self.event_type = type + self.event_type = event_type self.add = add self.remove = remove @@ -276,7 +276,7 @@ def _replicate(self): try: self._replicator.replicate( ReplicationEvent( - type=self.event_type, + event_type=self.event_type, # TODO: need to think about partitioning # Maybe resource id partition_key="rbactodo", diff --git a/tests/management/group/test_view.py b/tests/management/group/test_view.py index 87359bf55..16c74f6ac 100644 --- a/tests/management/group/test_view.py +++ b/tests/management/group/test_view.py @@ -875,7 +875,6 @@ def test_delete_group_success(self, send_kafka_message, mock_method): actual_call_arg = mock_method.call_args[0][0] to_remove = actual_call_arg["relations_to_remove"] - print(to_remove) self.assertEqual(8, len(to_remove)) def assert_group_tuples(tuples_to_replicate): From 5cc27e3ae2d19cc7acc71f367ebd297c7c016073 Mon Sep 17 00:00:00 2001 From: Libor Pichler Date: Fri, 11 Oct 2024 14:43:41 +0200 Subject: [PATCH 50/55] Add guard condition in replicate_new_system_role_permissions in role dual write handler --- rbac/management/role/relation_api_dual_write_handler.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/rbac/management/role/relation_api_dual_write_handler.py b/rbac/management/role/relation_api_dual_write_handler.py index 674fadbfd..df393d8d5 100644 --- a/rbac/management/role/relation_api_dual_write_handler.py +++ b/rbac/management/role/relation_api_dual_write_handler.py @@ -323,6 +323,8 @@ def _generate_relations_and_mappings_for_role(self): # TODO: Remove/replace - placeholder for testing def replicate_new_system_role_permissions(self, role: Role): """Replicate system role permissions.""" + if not self.replication_enabled(): + return permissions = list() for access in role.access.all(): v1_perm = access.permission From 41cf9c729a24cdb6dd13aa5005e4adcf2f4372b8 Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Fri, 11 Oct 2024 10:21:59 -0400 Subject: [PATCH 51/55] Remove unused method --- rbac/migration_tool/utils.py | 9 --------- 1 file changed, 9 deletions(-) diff --git a/rbac/migration_tool/utils.py b/rbac/migration_tool/utils.py index 2cbbe5153..4d3deb5da 100644 --- a/rbac/migration_tool/utils.py +++ b/rbac/migration_tool/utils.py @@ -100,15 +100,6 @@ def stringify_spicedb_relationship(rel: common_pb2.Relationship): ) -def relationship_to_json(rel): - """Convert a relationship to a JSON object.""" - return { - "resource": {"type": rel.resource.type.name, "id": rel.resource.id}, - "relation": rel.relation, - "subject": {"type": rel.subject.subject.type.name, "id": rel.subject.subject.id}, - } - - def output_relationships(relationships: list, write_db: bool): """Output relationships to the console and optionally write them to the database.""" for rel in relationships: From 83d77f1145b77579df3c3c9932410801ea9b907d Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Fri, 11 Oct 2024 10:24:31 -0400 Subject: [PATCH 52/55] Rename methods per prior discussion --- rbac/management/group/view.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/rbac/management/group/view.py b/rbac/management/group/view.py index 63ffa049f..ed4beb984 100644 --- a/rbac/management/group/view.py +++ b/rbac/management/group/view.py @@ -436,7 +436,7 @@ def validate_principals_in_proxy_request(self, principals, org_id=None): } return resp - def add_principals(self, group, principals_from_response, org_id=None): + def add_users(self, group, principals_from_response, org_id=None): """Add principals to the group.""" tenant = self.request.tenant new_principals = [] @@ -536,7 +536,7 @@ def add_service_accounts( return group, new_service_accounts - def remove_principals(self, group, principals, org_id=None): + def remove_users(self, group, principals, org_id=None): """Process list of principals and remove them from the group.""" req_id = getattr(self.request, "req_id", None) log_prefix = f"[Request_id:{req_id}]" @@ -721,7 +721,7 @@ def principals(self, request: Request, uuid: Optional[UUID] = None): ) new_users = [] if len(principals) > 0: - group, new_users = self.add_principals(group, principals_from_response, org_id=org_id) + group, new_users = self.add_users(group, principals_from_response, org_id=org_id) dual_write_handler = RelationApiDualWriteGroupHandler( group, ReplicationEventType.ADD_PRINCIPALS_TO_GROUP @@ -932,7 +932,7 @@ def principals(self, request: Request, uuid: Optional[UUID] = None): if USERNAMES_KEY in request.query_params: username = request.query_params.get(USERNAMES_KEY, "") principals = [name.strip() for name in username.split(",")] - resp, users_to_remove = self.remove_principals(group, principals, org_id=org_id) + resp, users_to_remove = self.remove_users(group, principals, org_id=org_id) if isinstance(resp, dict) and "errors" in resp: return Response(status=resp.get("status_code"), data={"errors": resp.get("errors")}) response = Response(status=status.HTTP_204_NO_CONTENT) From 9b3aea71bf89efdb0e6906a27f0cb226b84971d6 Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Fri, 11 Oct 2024 13:17:00 -0400 Subject: [PATCH 53/55] Simplify how config works for excluding resource definition migration --- deploy/rbac-clowdapp.yml | 1868 ++++++++--------- ...sharedSystemRolesReplicatedRoleBindings.py | 10 +- rbac/rbac/settings.py | 4 +- tests/management/role/test_view.py | 4 +- tox.ini | 1 - 5 files changed, 941 insertions(+), 946 deletions(-) diff --git a/deploy/rbac-clowdapp.yml b/deploy/rbac-clowdapp.yml index cd88ad40d..c713e947d 100644 --- a/deploy/rbac-clowdapp.yml +++ b/deploy/rbac-clowdapp.yml @@ -4,950 +4,950 @@ kind: Template metadata: name: rbac objects: -- apiVersion: cloud.redhat.com/v1alpha1 - kind: ClowdApp - metadata: - name: rbac - annotations: - bonfire.dependencies: ${BONFIRE_DEPENDENCIES} - spec: - envName: ${ENV_NAME} - testing: - iqePlugin: rbac - database: + - apiVersion: cloud.redhat.com/v1alpha1 + kind: ClowdApp + metadata: name: rbac - dbVolumeSize: medium - inMemoryDb: true - kafkaTopics: - - topicName: ${NOTIFICATIONS_TOPIC} - partitions: 3 - replicas: 3 - - topicName: ${EXTERNAL_SYNC_TOPIC} - partitions: 1 - replicas: 3 - - topicName: ${EXTERNAL_CHROME_TOPIC} - partitions: 1 - replicas: 3 - deployments: - - name: worker-service - minReplicas: ${{MIN_WORKER_REPLICAS}} - metadata: - annotations: - ignore-check.kube-linter.io/minimum-three-replicas: "dont need 3 replicas - runs background processes from turnpike/weekly tasks" - podSpec: - image: ${IMAGE}:${IMAGE_TAG} - initContainers: - - env: - inheritEnv: true + annotations: + bonfire.dependencies: ${BONFIRE_DEPENDENCIES} + spec: + envName: ${ENV_NAME} + testing: + iqePlugin: rbac + database: + name: rbac + dbVolumeSize: medium + inMemoryDb: true + kafkaTopics: + - topicName: ${NOTIFICATIONS_TOPIC} + partitions: 3 + replicas: 3 + - topicName: ${EXTERNAL_SYNC_TOPIC} + partitions: 1 + replicas: 3 + - topicName: ${EXTERNAL_CHROME_TOPIC} + partitions: 1 + replicas: 3 + deployments: + - name: worker-service + minReplicas: ${{MIN_WORKER_REPLICAS}} + metadata: + annotations: + ignore-check.kube-linter.io/minimum-three-replicas: "dont need 3 replicas - runs background processes from turnpike/weekly tasks" + podSpec: + image: ${IMAGE}:${IMAGE_TAG} + initContainers: + - env: + inheritEnv: true + command: + - sh + - /opt/rbac/deploy/init-container-setup.sh + resources: + limits: + cpu: ${INIT_WORKER_CPU_LIMIT} + memory: ${INIT_WORKER_MEMORY_LIMIT} + requests: + cpu: ${INIT_WORKER_CPU_REQUEST} + memory: ${INIT_WORKER_MEMORY_REQUEST} command: - - sh - - /opt/rbac/deploy/init-container-setup.sh + - /bin/bash + - "-c" + - > + PYTHONPATH=${PWD}/rbac/ celery -A + rbac.celery worker -l $DJANGO_LOG_LEVEL + livenessProbe: + exec: + command: + - /bin/bash + - "-c" + - > + PYTHONPATH=${PWD}/rbac/ celery -A rbac.celery + inspect ping + failureThreshold: 3 + initialDelaySeconds: ${{CELERY_INITIAL_DELAY_SEC}} + periodSeconds: ${{CELERY_PERIOD_SEC}} + successThreshold: 1 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - /bin/bash + - "-c" + - > + PYTHONPATH=${PWD}/rbac/ celery -A rbac.celery + inspect ping + failureThreshold: 3 + periodSeconds: ${{CELERY_PERIOD_SEC}} + successThreshold: 1 + timeoutSeconds: 10 + volumeMounts: + - mountPath: /opt/rbac/rbac/management/role/definitions + name: default-role-config + - mountPath: /opt/rbac/rbac/management/role/permissions + name: model-access-permissions + - mountPath: /opt/rbac/rbac/management/principal/umb_certs + name: umb-certs + volumes: + - configMap: + name: ${CONFIG_MAP_NAME} + name: default-role-config + - configMap: + name: ${MODEL_ACCESS_PERMISSIONS} + name: model-access-permissions + - name: umb-certs + secret: + secretName: service-accounts + items: + - key: umb-cert + path: cert.pem + - key: umb-key + path: key.pem resources: limits: - cpu: ${INIT_WORKER_CPU_LIMIT} - memory: ${INIT_WORKER_MEMORY_LIMIT} + cpu: ${CELERY_WORKER_CPU_LIMIT} + memory: ${CELERY_WORKER_MEMORY_LIMIT} requests: - cpu: ${INIT_WORKER_CPU_REQUEST} - memory: ${INIT_WORKER_MEMORY_REQUEST} - command: - - /bin/bash - - '-c' - - > - PYTHONPATH=${PWD}/rbac/ celery -A - rbac.celery worker -l $DJANGO_LOG_LEVEL - livenessProbe: - exec: + cpu: ${CELERY_WORKER_CPU_REQUEST} + memory: ${CELERY_WORKER_MEMORY_REQUEST} + env: + - name: DJANGO_LOG_LEVEL + value: ${DJANGO_LOG_LEVEL} + - name: DJANGO_DEBUG + value: ${DJANGO_DEBUG} + - name: PERMISSION_SEEDING_ENABLED + value: "False" + - name: ROLE_SEEDING_ENABLED + value: "False" + - name: GROUP_SEEDING_ENABLED + value: "False" + - name: DJANGO_SECRET_KEY + valueFrom: + secretKeyRef: + key: django-secret-key + name: ${NAME}-secret + optional: false + - name: ENV_NAME + value: ${ENV_NAME} + - name: PRINCIPAL_PROXY_SERVICE_PROTOCOL + valueFrom: + secretKeyRef: + key: principal-proxy-protocol + name: ${NAME}-secret + optional: false + - name: PRINCIPAL_PROXY_SERVICE_HOST + valueFrom: + secretKeyRef: + key: principal-proxy-host + name: ${NAME}-secret + optional: false + - name: PRINCIPAL_PROXY_SERVICE_PORT + valueFrom: + secretKeyRef: + key: principal-proxy-port + name: ${NAME}-secret + optional: false + - name: PRINCIPAL_PROXY_SERVICE_PATH + value: "" + - name: PRINCIPAL_PROXY_USER_ENV + valueFrom: + secretKeyRef: + key: principal-proxy-env + name: ${NAME}-secret + optional: false + - name: PRINCIPAL_PROXY_CLIENT_ID + valueFrom: + secretKeyRef: + key: client-id + name: insights-rbac + optional: false + - name: PRINCIPAL_PROXY_API_TOKEN + valueFrom: + secretKeyRef: + key: token + name: insights-rbac + optional: false + - name: PRINCIPAL_PROXY_SERVICE_SSL_VERIFY + valueFrom: + secretKeyRef: + key: principal-proxy-ssl-verify + name: ${NAME}-secret + optional: true + - name: PRINCIPAL_PROXY_SERVICE_SOURCE_CERT + valueFrom: + secretKeyRef: + key: principal-proxy-source-cert + name: ${NAME}-secret + optional: true + - name: APP_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PGSSLMODE + value: ${PGSSLMODE} + - name: CLOWDER_ENABLED + value: ${CLOWDER_ENABLED} + - name: CW_NULL_WORKAROUND + value: ${CW_NULL_WORKAROUND} + - name: GLITCHTIP_DSN + valueFrom: + secretKeyRef: + name: ${GLITCHTIP_SECRET} + key: dsn + optional: true + - name: MAX_SEED_THREADS + value: ${MAX_SEED_THREADS} + - name: ACCESS_CACHE_CONNECT_SIGNALS + value: "False" + - name: NOTIFICATIONS_ENABLED + value: ${NOTIFICATIONS_ENABLED} + - name: NOTIFICATIONS_RH_ENABLED + value: ${NOTIFICATIONS_RH_ENABLED} + - name: KAFKA_ENABLED + value: ${KAFKA_ENABLED} + - name: NOTIFICATIONS_TOPIC + value: ${NOTIFICATIONS_TOPIC} + - name: EXTERNAL_SYNC_TOPIC + value: ${EXTERNAL_SYNC_TOPIC} + - name: EXTERNAL_CHROME_TOPIC + value: ${EXTERNAL_CHROME_TOPIC} + - name: MIGRATE_AND_SEED_ON_INIT + value: ${WORKER_MIGRATE_AND_SEED_ON_INIT} + - name: UMB_HOST + value: ${UMB_HOST} + - name: UMB_PORT + value: ${UMB_PORT} + - name: SA_NAME + value: ${SA_NAME} + - name: RELATION_API_SERVER + value: ${RELATION_API_SERVER} + - name: REPLICATION_TO_RELATION_ENABLED + value: ${REPLICATION_TO_RELATION_ENABLED} + - name: scheduler-service + minReplicas: ${{MIN_SCHEDULER_REPLICAS}} + metadata: + annotations: + ignore-check.kube-linter.io/minimum-three-replicas: "dont need 3 replicas - keeps the cron scheduled for the weekly tasks" + podSpec: + image: ${IMAGE}:${IMAGE_TAG} command: - /bin/bash - - '-c' + - "-c" - > - PYTHONPATH=${PWD}/rbac/ celery -A rbac.celery - inspect ping - failureThreshold: 3 - initialDelaySeconds: ${{CELERY_INITIAL_DELAY_SEC}} - periodSeconds: ${{CELERY_PERIOD_SEC}} - successThreshold: 1 - timeoutSeconds: 10 - readinessProbe: - exec: - command: - - /bin/bash - - '-c' - - > - PYTHONPATH=${PWD}/rbac/ celery -A rbac.celery - inspect ping - failureThreshold: 3 - periodSeconds: ${{CELERY_PERIOD_SEC}} - successThreshold: 1 - timeoutSeconds: 10 - volumeMounts: - - mountPath: /opt/rbac/rbac/management/role/definitions - name: default-role-config - - mountPath: /opt/rbac/rbac/management/role/permissions - name: model-access-permissions - - mountPath: /opt/rbac/rbac/management/principal/umb_certs - name: umb-certs - volumes: - - configMap: - name: ${CONFIG_MAP_NAME} - name: default-role-config - - configMap: - name: ${MODEL_ACCESS_PERMISSIONS} - name: model-access-permissions - - name: umb-certs - secret: - secretName: service-accounts - items: - - key: umb-cert - path: cert.pem - - key: umb-key - path: key.pem - resources: - limits: - cpu: ${CELERY_WORKER_CPU_LIMIT} - memory: ${CELERY_WORKER_MEMORY_LIMIT} - requests: - cpu: ${CELERY_WORKER_CPU_REQUEST} - memory: ${CELERY_WORKER_MEMORY_REQUEST} - env: - - name: DJANGO_LOG_LEVEL - value: ${DJANGO_LOG_LEVEL} - - name: DJANGO_DEBUG - value: ${DJANGO_DEBUG} - - name: PERMISSION_SEEDING_ENABLED - value: 'False' - - name: ROLE_SEEDING_ENABLED - value: 'False' - - name: GROUP_SEEDING_ENABLED - value: 'False' - - name: DJANGO_SECRET_KEY - valueFrom: - secretKeyRef: - key: django-secret-key - name: ${NAME}-secret - optional: false - - name: ENV_NAME - value: ${ENV_NAME} - - name: PRINCIPAL_PROXY_SERVICE_PROTOCOL - valueFrom: - secretKeyRef: - key: principal-proxy-protocol - name: ${NAME}-secret - optional: false - - name: PRINCIPAL_PROXY_SERVICE_HOST - valueFrom: - secretKeyRef: - key: principal-proxy-host - name: ${NAME}-secret - optional: false - - name: PRINCIPAL_PROXY_SERVICE_PORT - valueFrom: - secretKeyRef: - key: principal-proxy-port - name: ${NAME}-secret - optional: false - - name: PRINCIPAL_PROXY_SERVICE_PATH - value: '' - - name: PRINCIPAL_PROXY_USER_ENV - valueFrom: - secretKeyRef: - key: principal-proxy-env - name: ${NAME}-secret - optional: false - - name: PRINCIPAL_PROXY_CLIENT_ID - valueFrom: - secretKeyRef: - key: client-id - name: insights-rbac - optional: false - - name: PRINCIPAL_PROXY_API_TOKEN - valueFrom: - secretKeyRef: - key: token - name: insights-rbac - optional: false - - name: PRINCIPAL_PROXY_SERVICE_SSL_VERIFY - valueFrom: - secretKeyRef: - key: principal-proxy-ssl-verify - name: ${NAME}-secret - optional: true - - name: PRINCIPAL_PROXY_SERVICE_SOURCE_CERT - valueFrom: - secretKeyRef: - key: principal-proxy-source-cert - name: ${NAME}-secret - optional: true - - name: APP_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PGSSLMODE - value: ${PGSSLMODE} - - name: CLOWDER_ENABLED - value: ${CLOWDER_ENABLED} - - name: CW_NULL_WORKAROUND - value: ${CW_NULL_WORKAROUND} - - name: GLITCHTIP_DSN - valueFrom: - secretKeyRef: - name: ${GLITCHTIP_SECRET} - key: dsn - optional: true - - name: MAX_SEED_THREADS - value: ${MAX_SEED_THREADS} - - name: ACCESS_CACHE_CONNECT_SIGNALS - value: 'False' - - name: NOTIFICATIONS_ENABLED - value: ${NOTIFICATIONS_ENABLED} - - name: NOTIFICATIONS_RH_ENABLED - value: ${NOTIFICATIONS_RH_ENABLED} - - name: KAFKA_ENABLED - value: ${KAFKA_ENABLED} - - name: NOTIFICATIONS_TOPIC - value: ${NOTIFICATIONS_TOPIC} - - name: EXTERNAL_SYNC_TOPIC - value: ${EXTERNAL_SYNC_TOPIC} - - name: EXTERNAL_CHROME_TOPIC - value: ${EXTERNAL_CHROME_TOPIC} - - name: MIGRATE_AND_SEED_ON_INIT - value: ${WORKER_MIGRATE_AND_SEED_ON_INIT} - - name: UMB_HOST - value: ${UMB_HOST} - - name: UMB_PORT - value: ${UMB_PORT} - - name: SA_NAME - value: ${SA_NAME} - - name: RELATION_API_SERVER - value: ${RELATION_API_SERVER} - - name: REPLICATION_TO_RELATION_ENABLED - value: ${REPLICATION_TO_RELATION_ENABLED} - - name: scheduler-service - minReplicas: ${{MIN_SCHEDULER_REPLICAS}} - metadata: - annotations: - ignore-check.kube-linter.io/minimum-three-replicas: "dont need 3 replicas - keeps the cron scheduled for the weekly tasks" - podSpec: - image: ${IMAGE}:${IMAGE_TAG} - command: - - /bin/bash - - '-c' - - > - PYTHONPATH=${PWD}/rbac/ celery -A - rbac.celery beat -l $DJANGO_LOG_LEVEL - livenessProbe: - exec: - command: - - /bin/bash - - '-c' - - > - PYTHONPATH=${PWD}/rbac/ celery -A rbac.celery - inspect ping - failureThreshold: 3 - initialDelaySeconds: ${{CELERY_INITIAL_DELAY_SEC}} - periodSeconds: ${{CELERY_PERIOD_SEC}} - successThreshold: 1 - timeoutSeconds: 10 - readinessProbe: - exec: - command: - - /bin/bash - - '-c' - - > - PYTHONPATH=${PWD}/rbac/ celery -A rbac.celery - inspect ping - failureThreshold: 3 - periodSeconds: ${{CELERY_PERIOD_SEC}} - successThreshold: 1 - timeoutSeconds: 10 - resources: - limits: - cpu: ${CELERY_SCHEDULER_CPU_LIMIT} - memory: ${CELERY_SCHEDULER_MEMORY_LIMIT} - requests: - cpu: ${CELERY_SCHEDULER_CPU_REQUEST} - memory: ${CELERY_SCHEDULER_MEMORY_REQUEST} - env: - - name: DJANGO_LOG_LEVEL - value: ${DJANGO_LOG_LEVEL} - - name: DJANGO_DEBUG - value: ${DJANGO_DEBUG} - - name: APP_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PERMISSION_SEEDING_ENABLED - value: 'False' - - name: ROLE_SEEDING_ENABLED - value: 'False' - - name: GROUP_SEEDING_ENABLED - value: 'False' - - name: CLOWDER_ENABLED - value: ${CLOWDER_ENABLED} - - name: CW_NULL_WORKAROUND - value: ${CW_NULL_WORKAROUND} - - name: GLITCHTIP_DSN - valueFrom: - secretKeyRef: - name: ${GLITCHTIP_SECRET} - key: dsn - optional: true - - name: PRINCIPAL_USER_DOMAIN - value: ${PRINCIPAL_USER_DOMAIN} - - name: PRINCIPAL_CLEANUP_DELETION_ENABLED_UMB - value: ${PRINCIPAL_CLEANUP_DELETION_ENABLED_UMB} - - name: UMB_JOB_ENABLED - value: ${UMB_JOB_ENABLED} + PYTHONPATH=${PWD}/rbac/ celery -A + rbac.celery beat -l $DJANGO_LOG_LEVEL + livenessProbe: + exec: + command: + - /bin/bash + - "-c" + - > + PYTHONPATH=${PWD}/rbac/ celery -A rbac.celery + inspect ping + failureThreshold: 3 + initialDelaySeconds: ${{CELERY_INITIAL_DELAY_SEC}} + periodSeconds: ${{CELERY_PERIOD_SEC}} + successThreshold: 1 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - /bin/bash + - "-c" + - > + PYTHONPATH=${PWD}/rbac/ celery -A rbac.celery + inspect ping + failureThreshold: 3 + periodSeconds: ${{CELERY_PERIOD_SEC}} + successThreshold: 1 + timeoutSeconds: 10 + resources: + limits: + cpu: ${CELERY_SCHEDULER_CPU_LIMIT} + memory: ${CELERY_SCHEDULER_MEMORY_LIMIT} + requests: + cpu: ${CELERY_SCHEDULER_CPU_REQUEST} + memory: ${CELERY_SCHEDULER_MEMORY_REQUEST} + env: + - name: DJANGO_LOG_LEVEL + value: ${DJANGO_LOG_LEVEL} + - name: DJANGO_DEBUG + value: ${DJANGO_DEBUG} + - name: APP_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PERMISSION_SEEDING_ENABLED + value: "False" + - name: ROLE_SEEDING_ENABLED + value: "False" + - name: GROUP_SEEDING_ENABLED + value: "False" + - name: CLOWDER_ENABLED + value: ${CLOWDER_ENABLED} + - name: CW_NULL_WORKAROUND + value: ${CW_NULL_WORKAROUND} + - name: GLITCHTIP_DSN + valueFrom: + secretKeyRef: + name: ${GLITCHTIP_SECRET} + key: dsn + optional: true + - name: PRINCIPAL_USER_DOMAIN + value: ${PRINCIPAL_USER_DOMAIN} + - name: PRINCIPAL_CLEANUP_DELETION_ENABLED_UMB + value: ${PRINCIPAL_CLEANUP_DELETION_ENABLED_UMB} + - name: UMB_JOB_ENABLED + value: ${UMB_JOB_ENABLED} - - name: service - minReplicas: ${{MIN_REPLICAS}} - webServices: - public: - enabled: true - apiPath: rbac - podSpec: - image: ${IMAGE}:${IMAGE_TAG} - initContainers: - - env: - inheritEnv: true - command: - - sh - - /opt/rbac/deploy/init-container-setup.sh - livenessProbe: - httpGet: - path: /api/rbac/v1/status/ - port: 8000 - scheme: HTTP - initialDelaySeconds: 60 - periodSeconds: 10 - successThreshold: 1 - failureThreshold: 3 - timeoutSeconds: 3 - readinessProbe: - httpGet: - path: /api/rbac/v1/status/ - port: 8000 - scheme: HTTP - initialDelaySeconds: 60 - periodSeconds: 10 - successThreshold: 1 - failureThreshold: 3 - timeoutSeconds: 3 - volumes: - - configMap: - name: ${CONFIG_MAP_NAME} - name: default-role-config - - configMap: - name: ${MODEL_ACCESS_PERMISSIONS} - name: model-access-permissions - volumeMounts: - - mountPath: /opt/rbac/rbac/management/role/definitions - name: default-role-config - - mountPath: /opt/rbac/rbac/management/role/permissions - name: model-access-permissions - resources: - limits: - cpu: ${CPU_LIMIT} - memory: ${MEMORY_LIMIT} - requests: - cpu: ${CPU_REQUEST} - memory: ${MEMORY_REQUEST} - env: - - name: GLITCHTIP_DSN - valueFrom: - secretKeyRef: - name: ${GLITCHTIP_SECRET} - key: dsn - optional: true - - name: SERVICE_PSKS - valueFrom: - secretKeyRef: - key: psks.json - name: ${RBAC_PSKS} - optional: false - - name: PGSSLMODE - value: ${PGSSLMODE} - - name: DJANGO_SECRET_KEY - valueFrom: - secretKeyRef: - key: django-secret-key - name: rbac-secret - optional: false - - name: PRINCIPAL_PROXY_SERVICE_PROTOCOL - valueFrom: - secretKeyRef: - key: principal-proxy-protocol - name: rbac-secret - optional: false - - name: PRINCIPAL_PROXY_SERVICE_HOST - valueFrom: - secretKeyRef: - key: principal-proxy-host - name: rbac-secret - optional: false - - name: PRINCIPAL_PROXY_SERVICE_PORT - valueFrom: - secretKeyRef: - key: principal-proxy-port - name: rbac-secret - optional: false - - name: PRINCIPAL_PROXY_SERVICE_PATH - value: '' - - name: PRINCIPAL_PROXY_USER_ENV - valueFrom: - secretKeyRef: - key: principal-proxy-env - name: rbac-secret - optional: false - - name: PRINCIPAL_PROXY_CLIENT_ID - valueFrom: - secretKeyRef: - key: client-id - name: insights-rbac - optional: false - - name: PRINCIPAL_PROXY_API_TOKEN - valueFrom: - secretKeyRef: - key: token - name: insights-rbac - optional: false - - name: PRINCIPAL_PROXY_SERVICE_SSL_VERIFY - valueFrom: - secretKeyRef: - key: principal-proxy-ssl-verify - name: rbac-secret - optional: true - - name: PRINCIPAL_PROXY_SERVICE_SOURCE_CERT - valueFrom: - secretKeyRef: - key: principal-proxy-source-cert - name: rbac-secret - optional: true - - name: POD_CPU_LIMIT - valueFrom: - resourceFieldRef: - containerName: rbac-service - resource: limits.cpu - - name: ACCESS_CACHE_ENABLED - value: ${ACCESS_CACHE_ENABLED} - - name: APP_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: DJANGO_DEBUG - value: ${DJANGO_DEBUG} - - name: API_PATH_PREFIX - value: ${API_PATH_PREFIX} - - name: DEVELOPMENT - value: ${DEVELOPMENT} - - name: RBAC_LOG_LEVEL - value: ${RBAC_LOG_LEVEL} - - name: DJANGO_LOG_LEVEL - value: ${DJANGO_LOG_LEVEL} - - name: DJANGO_LOG_FORMATTER - value: ${DJANGO_LOG_FORMATTER} - - name: DJANGO_LOG_HANDLERS - value: ${DJANGO_LOG_HANDLERS} - - name: DJANGO_LOG_DIRECTORY - value: ${DJANGO_LOG_DIRECTORY} - - name: DJANGO_LOGGING_FILE - value: ${DJANGO_LOGGING_FILE} - - name: ENV_NAME - value: ${ENV_NAME} - - name: PERMISSION_SEEDING_ENABLED - value: ${PERMISSION_SEEDING_ENABLED} - - name: ROLE_SEEDING_ENABLED - value: ${ROLE_SEEDING_ENABLED} - - name: GROUP_SEEDING_ENABLED - value: ${GROUP_SEEDING_ENABLED} - - name: BYPASS_BOP_VERIFICATION - value: ${BYPASS_BOP_VERIFICATION} - - name: REPLICATION_TO_RELATION_ENABLED - value: ${REPLICATION_TO_RELATION_ENABLED} - - name: ROLE_CREATE_ALLOW_LIST - value: ${ROLE_CREATE_ALLOW_LIST} - - name: V2_MIGRATION_APP_EXCLUDE_LIST - value: ${V2_MIGRATION_APP_EXCLUDE_LIST} - - name: V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST - value: ${V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST} - - name: RBAC_DESTRUCTIVE_API_ENABLED_UNTIL - value: ${RBAC_DESTRUCTIVE_API_ENABLED_UNTIL} - - name: RBAC_DESTRUCTIVE_SEEDING_ENABLED_UNTIL - value: ${RBAC_DESTRUCTIVE_SEEDING_ENABLED_UNTIL} - - name: CLOWDER_ENABLED - value: ${CLOWDER_ENABLED} - - name: APP_NAMESPACE - value: ${APP_NAMESPACE} - - name: CW_NULL_WORKAROUND - value: ${CW_NULL_WORKAROUND} - - name: REDIS_MAX_CONNECTIONS - value: ${REDIS_MAX_CONNECTIONS} - - name: REDIS_SOCKET_CONNECT_TIMEOUT - value: ${REDIS_SOCKET_CONNECT_TIMEOUT} - - name: REDIS_SOCKET_TIMEOUT - value: ${REDIS_SOCKET_TIMEOUT} - - name: NOTIFICATIONS_ENABLED - value: ${NOTIFICATIONS_ENABLED} - - name: GUNICORN_WORKER_MULTIPLIER - value: ${GUNICORN_WORKER_MULTIPLIER} - - name: GUNICORN_THREAD_LIMIT - value: ${GUNICORN_THREAD_LIMIT} - - name: NOTIFICATIONS_TOPIC - value: ${NOTIFICATIONS_TOPIC} - - name: KAFKA_ENABLED - value: ${KAFKA_ENABLED} - - name: EXTERNAL_SYNC_TOPIC - value: ${EXTERNAL_SYNC_TOPIC} - - name: EXTERNAL_CHROME_TOPIC - value: ${EXTERNAL_CHROME_TOPIC} - - name: MIGRATE_AND_SEED_ON_INIT - value: ${SERVICE_MIGRATE_AND_SEED_ON_INIT} - - name: USE_CLOWDER_CA_FOR_BOP - value: ${USE_CLOWDER_CA_FOR_BOP} - - name: IT_BYPASS_IT_CALLS - value: ${IT_BYPASS_IT_CALLS} - - name: IT_BYPASS_PERMISSIONS_MODIFY_SERVICE_ACCOUNTS - value: ${IT_BYPASS_PERMISSIONS_MODIFY_SERVICE_ACCOUNTS} - - name: IT_BYPASS_TOKEN_VALIDATION - value: ${IT_BYPASS_TOKEN_VALIDATION} - - name: IT_SERVICE_BASE_PATH - value: ${IT_SERVICE_BASE_PATH} - - name: IT_SERVICE_HOST - value: ${IT_SERVICE_HOST} - - name: IT_SERVICE_PORT - value: ${IT_SERVICE_PORT} - - name: IT_SERVICE_PROTOCOL_SCHEME - value: ${IT_SERVICE_PROTOCOL_SCHEME} - - name: IT_SERVICE_TIMEOUT_SECONDS - value: ${IT_SERVICE_TIMEOUT_SECONDS} - - name: IT_TOKEN_JKWS_CACHE_LIFETIME - value: ${IT_TOKEN_JKWS_CACHE_LIFETIME} - - name: V2_APIS_ENABLED - value: ${V2_APIS_ENABLED} - - name: READ_ONLY_API_MODE - value: ${READ_ONLY_API_MODE} + - name: service + minReplicas: ${{MIN_REPLICAS}} + webServices: + public: + enabled: true + apiPath: rbac + podSpec: + image: ${IMAGE}:${IMAGE_TAG} + initContainers: + - env: + inheritEnv: true + command: + - sh + - /opt/rbac/deploy/init-container-setup.sh + livenessProbe: + httpGet: + path: /api/rbac/v1/status/ + port: 8000 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: /api/rbac/v1/status/ + port: 8000 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 3 + volumes: + - configMap: + name: ${CONFIG_MAP_NAME} + name: default-role-config + - configMap: + name: ${MODEL_ACCESS_PERMISSIONS} + name: model-access-permissions + volumeMounts: + - mountPath: /opt/rbac/rbac/management/role/definitions + name: default-role-config + - mountPath: /opt/rbac/rbac/management/role/permissions + name: model-access-permissions + resources: + limits: + cpu: ${CPU_LIMIT} + memory: ${MEMORY_LIMIT} + requests: + cpu: ${CPU_REQUEST} + memory: ${MEMORY_REQUEST} + env: + - name: GLITCHTIP_DSN + valueFrom: + secretKeyRef: + name: ${GLITCHTIP_SECRET} + key: dsn + optional: true + - name: SERVICE_PSKS + valueFrom: + secretKeyRef: + key: psks.json + name: ${RBAC_PSKS} + optional: false + - name: PGSSLMODE + value: ${PGSSLMODE} + - name: DJANGO_SECRET_KEY + valueFrom: + secretKeyRef: + key: django-secret-key + name: rbac-secret + optional: false + - name: PRINCIPAL_PROXY_SERVICE_PROTOCOL + valueFrom: + secretKeyRef: + key: principal-proxy-protocol + name: rbac-secret + optional: false + - name: PRINCIPAL_PROXY_SERVICE_HOST + valueFrom: + secretKeyRef: + key: principal-proxy-host + name: rbac-secret + optional: false + - name: PRINCIPAL_PROXY_SERVICE_PORT + valueFrom: + secretKeyRef: + key: principal-proxy-port + name: rbac-secret + optional: false + - name: PRINCIPAL_PROXY_SERVICE_PATH + value: "" + - name: PRINCIPAL_PROXY_USER_ENV + valueFrom: + secretKeyRef: + key: principal-proxy-env + name: rbac-secret + optional: false + - name: PRINCIPAL_PROXY_CLIENT_ID + valueFrom: + secretKeyRef: + key: client-id + name: insights-rbac + optional: false + - name: PRINCIPAL_PROXY_API_TOKEN + valueFrom: + secretKeyRef: + key: token + name: insights-rbac + optional: false + - name: PRINCIPAL_PROXY_SERVICE_SSL_VERIFY + valueFrom: + secretKeyRef: + key: principal-proxy-ssl-verify + name: rbac-secret + optional: true + - name: PRINCIPAL_PROXY_SERVICE_SOURCE_CERT + valueFrom: + secretKeyRef: + key: principal-proxy-source-cert + name: rbac-secret + optional: true + - name: POD_CPU_LIMIT + valueFrom: + resourceFieldRef: + containerName: rbac-service + resource: limits.cpu + - name: ACCESS_CACHE_ENABLED + value: ${ACCESS_CACHE_ENABLED} + - name: APP_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: DJANGO_DEBUG + value: ${DJANGO_DEBUG} + - name: API_PATH_PREFIX + value: ${API_PATH_PREFIX} + - name: DEVELOPMENT + value: ${DEVELOPMENT} + - name: RBAC_LOG_LEVEL + value: ${RBAC_LOG_LEVEL} + - name: DJANGO_LOG_LEVEL + value: ${DJANGO_LOG_LEVEL} + - name: DJANGO_LOG_FORMATTER + value: ${DJANGO_LOG_FORMATTER} + - name: DJANGO_LOG_HANDLERS + value: ${DJANGO_LOG_HANDLERS} + - name: DJANGO_LOG_DIRECTORY + value: ${DJANGO_LOG_DIRECTORY} + - name: DJANGO_LOGGING_FILE + value: ${DJANGO_LOGGING_FILE} + - name: ENV_NAME + value: ${ENV_NAME} + - name: PERMISSION_SEEDING_ENABLED + value: ${PERMISSION_SEEDING_ENABLED} + - name: ROLE_SEEDING_ENABLED + value: ${ROLE_SEEDING_ENABLED} + - name: GROUP_SEEDING_ENABLED + value: ${GROUP_SEEDING_ENABLED} + - name: BYPASS_BOP_VERIFICATION + value: ${BYPASS_BOP_VERIFICATION} + - name: REPLICATION_TO_RELATION_ENABLED + value: ${REPLICATION_TO_RELATION_ENABLED} + - name: ROLE_CREATE_ALLOW_LIST + value: ${ROLE_CREATE_ALLOW_LIST} + - name: V2_MIGRATION_APP_EXCLUDE_LIST + value: ${V2_MIGRATION_APP_EXCLUDE_LIST} + - name: V2_MIGRATION_RESOURCE_EXCLUDE_LIST + value: ${V2_MIGRATION_RESOURCE_EXCLUDE_LIST} + - name: RBAC_DESTRUCTIVE_API_ENABLED_UNTIL + value: ${RBAC_DESTRUCTIVE_API_ENABLED_UNTIL} + - name: RBAC_DESTRUCTIVE_SEEDING_ENABLED_UNTIL + value: ${RBAC_DESTRUCTIVE_SEEDING_ENABLED_UNTIL} + - name: CLOWDER_ENABLED + value: ${CLOWDER_ENABLED} + - name: APP_NAMESPACE + value: ${APP_NAMESPACE} + - name: CW_NULL_WORKAROUND + value: ${CW_NULL_WORKAROUND} + - name: REDIS_MAX_CONNECTIONS + value: ${REDIS_MAX_CONNECTIONS} + - name: REDIS_SOCKET_CONNECT_TIMEOUT + value: ${REDIS_SOCKET_CONNECT_TIMEOUT} + - name: REDIS_SOCKET_TIMEOUT + value: ${REDIS_SOCKET_TIMEOUT} + - name: NOTIFICATIONS_ENABLED + value: ${NOTIFICATIONS_ENABLED} + - name: GUNICORN_WORKER_MULTIPLIER + value: ${GUNICORN_WORKER_MULTIPLIER} + - name: GUNICORN_THREAD_LIMIT + value: ${GUNICORN_THREAD_LIMIT} + - name: NOTIFICATIONS_TOPIC + value: ${NOTIFICATIONS_TOPIC} + - name: KAFKA_ENABLED + value: ${KAFKA_ENABLED} + - name: EXTERNAL_SYNC_TOPIC + value: ${EXTERNAL_SYNC_TOPIC} + - name: EXTERNAL_CHROME_TOPIC + value: ${EXTERNAL_CHROME_TOPIC} + - name: MIGRATE_AND_SEED_ON_INIT + value: ${SERVICE_MIGRATE_AND_SEED_ON_INIT} + - name: USE_CLOWDER_CA_FOR_BOP + value: ${USE_CLOWDER_CA_FOR_BOP} + - name: IT_BYPASS_IT_CALLS + value: ${IT_BYPASS_IT_CALLS} + - name: IT_BYPASS_PERMISSIONS_MODIFY_SERVICE_ACCOUNTS + value: ${IT_BYPASS_PERMISSIONS_MODIFY_SERVICE_ACCOUNTS} + - name: IT_BYPASS_TOKEN_VALIDATION + value: ${IT_BYPASS_TOKEN_VALIDATION} + - name: IT_SERVICE_BASE_PATH + value: ${IT_SERVICE_BASE_PATH} + - name: IT_SERVICE_HOST + value: ${IT_SERVICE_HOST} + - name: IT_SERVICE_PORT + value: ${IT_SERVICE_PORT} + - name: IT_SERVICE_PROTOCOL_SCHEME + value: ${IT_SERVICE_PROTOCOL_SCHEME} + - name: IT_SERVICE_TIMEOUT_SECONDS + value: ${IT_SERVICE_TIMEOUT_SECONDS} + - name: IT_TOKEN_JKWS_CACHE_LIFETIME + value: ${IT_TOKEN_JKWS_CACHE_LIFETIME} + - name: V2_APIS_ENABLED + value: ${V2_APIS_ENABLED} + - name: READ_ONLY_API_MODE + value: ${READ_ONLY_API_MODE} - jobs: - - name: tenant-org-id-populator - podSpec: - image: quay.io/cloudservices/tenant-utils:latest - command: - - ./org-id-column-populator - - -C - - -a - - account_id - - -o - - org_id - - -t - - api_tenant - - --ean-translator-addr - - http://${TENANT_TRANSLATOR_HOST}:${TENANT_TRANSLATOR_PORT} - - --batch-size - - "50" - env: - - name: LOG_FORMAT - value: ${LOG_FORMAT} - - name: LOG_BATCH_FREQUENCY - value: '1' - resources: - limits: - cpu: 300m - memory: 1Gi - requests: - cpu: 50m - memory: 512Mi - - name: cross-account-request-target-org-populator - podSpec: - image: quay.io/cloudservices/tenant-utils:latest - command: - - ./org-id-column-populator - - -C - - -a - - target_account - - -o - - target_org - - -t - - api_crossaccountrequest - - --ean-translator-addr - - http://${TENANT_TRANSLATOR_HOST}:${TENANT_TRANSLATOR_PORT} - - --batch-size - - "50" - env: - - name: LOG_FORMAT - value: ${LOG_FORMAT} - - name: LOG_BATCH_FREQUENCY - value: '1' - resources: - limits: - cpu: 300m - memory: 1Gi - requests: - cpu: 50m - memory: 512Mi -- apiVersion: v1 - kind: ConfigMap - metadata: - name: rbac-env - data: - api-path-prefix: /api/rbac - app-config: /opt/rbac/rbac/gunicorn.py - app-domain: ${APP_DOMAIN} - app-home: /opt/rbac/rbac - app-module: rbac.wsgi - app-namespace: rbac-stage - database-engine: postgresql - database-name: rbac - database-service-name: POSTGRES_SQL - development: "False" - django-debug: "False" - django-log-directory: "" - django-log-formatter: simple - django-log-handlers: console - django-log-level: INFO - django-logging-file: "" - postgres-sql-service-host: rbac-pgsql.rbac-stage.svc - postgres-sql-service-port: "5432" - rbac-log-level: INFO + jobs: + - name: tenant-org-id-populator + podSpec: + image: quay.io/cloudservices/tenant-utils:latest + command: + - ./org-id-column-populator + - -C + - -a + - account_id + - -o + - org_id + - -t + - api_tenant + - --ean-translator-addr + - http://${TENANT_TRANSLATOR_HOST}:${TENANT_TRANSLATOR_PORT} + - --batch-size + - "50" + env: + - name: LOG_FORMAT + value: ${LOG_FORMAT} + - name: LOG_BATCH_FREQUENCY + value: "1" + resources: + limits: + cpu: 300m + memory: 1Gi + requests: + cpu: 50m + memory: 512Mi + - name: cross-account-request-target-org-populator + podSpec: + image: quay.io/cloudservices/tenant-utils:latest + command: + - ./org-id-column-populator + - -C + - -a + - target_account + - -o + - target_org + - -t + - api_crossaccountrequest + - --ean-translator-addr + - http://${TENANT_TRANSLATOR_HOST}:${TENANT_TRANSLATOR_PORT} + - --batch-size + - "50" + env: + - name: LOG_FORMAT + value: ${LOG_FORMAT} + - name: LOG_BATCH_FREQUENCY + value: "1" + resources: + limits: + cpu: 300m + memory: 1Gi + requests: + cpu: 50m + memory: 512Mi + - apiVersion: v1 + kind: ConfigMap + metadata: + name: rbac-env + data: + api-path-prefix: /api/rbac + app-config: /opt/rbac/rbac/gunicorn.py + app-domain: ${APP_DOMAIN} + app-home: /opt/rbac/rbac + app-module: rbac.wsgi + app-namespace: rbac-stage + database-engine: postgresql + database-name: rbac + database-service-name: POSTGRES_SQL + development: "False" + django-debug: "False" + django-log-directory: "" + django-log-formatter: simple + django-log-handlers: console + django-log-level: INFO + django-logging-file: "" + postgres-sql-service-host: rbac-pgsql.rbac-stage.svc + postgres-sql-service-port: "5432" + rbac-log-level: INFO -- apiVersion: v1 - kind: Secret - metadata: - name: rbac-psks - data: - psks.json: >- - ewogICJhZHZpc29yIjogewogICAgImFsdC1zZWNyZXQiOiAiMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEiCiAgfSwKICAiYXBwcm92YWwiOiB7CiAgICAiYWx0LXNlY3JldCI6ICIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMiIKICB9LAogICJub3RpZmljYXRpb25zIjogewogICAgImFsdC1zZWNyZXQiOiAiMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMiCiAgfQp9 - type: Opaque -- apiVersion: v1 - kind: Secret - metadata: - name: insights-rbac - data: - client-id: aW5zaWdodHMtcmJhYw== - token: MjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMg== - type: Opaque -- apiVersion: v1 - kind: Secret - metadata: - name: rbac-secret - data: - django-secret-key: MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTE= - principal-proxy-api-token: >- - MjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMg== - principal-proxy-client-id: aW5zaWdodHMtcmJhYw== - principal-proxy-env: c3RhZ2U= - principal-proxy-host: bWJvcA== - principal-proxy-port: ODA5MA== - principal-proxy-protocol: aHR0cA== - principal-proxy-source-cert: RmFsc2U= - principal-proxy-ssl-verify: RmFsc2U= - sentry-dsn: '' - type: Opaque -- apiVersion: v1 - kind: Secret - metadata: - name: service-accounts - data: - cert: MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTE= - key: MjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMg== - type: Opaque + - apiVersion: v1 + kind: Secret + metadata: + name: rbac-psks + data: + psks.json: >- + ewogICJhZHZpc29yIjogewogICAgImFsdC1zZWNyZXQiOiAiMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEiCiAgfSwKICAiYXBwcm92YWwiOiB7CiAgICAiYWx0LXNlY3JldCI6ICIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMiIKICB9LAogICJub3RpZmljYXRpb25zIjogewogICAgImFsdC1zZWNyZXQiOiAiMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMiCiAgfQp9 + type: Opaque + - apiVersion: v1 + kind: Secret + metadata: + name: insights-rbac + data: + client-id: aW5zaWdodHMtcmJhYw== + token: MjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMg== + type: Opaque + - apiVersion: v1 + kind: Secret + metadata: + name: rbac-secret + data: + django-secret-key: MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTE= + principal-proxy-api-token: >- + MjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMg== + principal-proxy-client-id: aW5zaWdodHMtcmJhYw== + principal-proxy-env: c3RhZ2U= + principal-proxy-host: bWJvcA== + principal-proxy-port: ODA5MA== + principal-proxy-protocol: aHR0cA== + principal-proxy-source-cert: RmFsc2U= + principal-proxy-ssl-verify: RmFsc2U= + sentry-dsn: "" + type: Opaque + - apiVersion: v1 + kind: Secret + metadata: + name: service-accounts + data: + cert: MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTE= + key: MjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMg== + type: Opaque parameters: -- description: Image name - name: IMAGE - value: quay.io/cloudservices/rbac -- description: Determines Clowder deployment - name: CLOWDER_ENABLED - value: "true" -- description: ClowdEnv Name - name: ENV_NAME - required: true -- description: Initial amount of memory the Django container will request. - displayName: Memory Request - name: MEMORY_REQUEST - value: 512Mi -- description: Maximum amount of memory the Django container can use. - displayName: Memory Limit - name: MEMORY_LIMIT - value: 1Gi -- description: Initial amount of cpu the Django container will request. - displayName: CPU Request - name: CPU_REQUEST - value: 200m -- description: Maximum amount of cpu the Django container can use. - displayName: CPU Limit - name: CPU_LIMIT - value: 700m -- displayName: RBAC PSKs - name: RBAC_PSKS - value: rbac-psks -- displayName: Service Dependency Name - name: SERVICE_DEPENDENCY_NAME - value: rbac-pgsql -- displayName: API Prefix Path - name: API_PATH_PREFIX - value: /api/rbac -- displayName: Development - name: DEVELOPMENT - value: 'false' -- displayName: Rbac log level - name: RBAC_LOG_LEVEL - value: INFO -- displayName: Django log level - name: DJANGO_LOG_LEVEL - value: INFO -- displayName: Django log formatter - name: DJANGO_LOG_FORMATTER - value: simple -- displayName: Django log handlers - name: DJANGO_LOG_HANDLERS - value: console,ecs -- displayName: Django log directory - name: DJANGO_LOG_DIRECTORY -- displayName: Django logging file - name: DJANGO_LOGGING_FILE -- description: Name of the rbac-config config map - name: CONFIG_MAP_NAME - value: rbac-config -- description: Name of the predefined access permissions config map - name: MODEL_ACCESS_PERMISSIONS - value: model-access-permissions -- description: minimum number of pods to use when autoscaling is enabled - name: MIN_REPLICAS - value: '1' -- description: maximum number of pods to use when autoscaling is enabled - name: MAX_REPLICAS - value: '1' -- description: minimum number of pods to use when autoscaling is enabled for worker service - name: MIN_WORKER_REPLICAS - value: '1' -- description: minimum number of pods to use when autoscaling is enabled for scheduler service - name: MIN_SCHEDULER_REPLICAS - value: '1' -- description: target CPU utilization for the service - name: TARGET_CPU_UTILIZATION - value: '90' -- description: 'Options can be found in the doc: https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-SSLMODE-STATEMENTS' - displayName: Postgres SSL mode - name: PGSSLMODE - value: prefer -- description: Python boolean value to enable/disable permission seeding on app boot - name: PERMISSION_SEEDING_ENABLED - required: true -- description: Python boolean value to enable/disable role seeding on app boot - name: ROLE_SEEDING_ENABLED - required: true -- description: Python boolean value to enable/disable group seeding on app boot - name: GROUP_SEEDING_ENABLED - required: true -- description: Enable the RBAC access cache - name: ACCESS_CACHE_ENABLED - value: 'True' -- description: Bypass interaction with the BOP service - name: BYPASS_BOP_VERIFICATION - value: 'False' -- description: Application allow list for role creation in RBAC - name: ROLE_CREATE_ALLOW_LIST - value: cost-management,remediations,inventory,drift,policies,advisor,vulnerability,compliance,automation-analytics,notifications,patch,integrations,ros,staleness,config-manager,idmsvc -- description: Application exclude list for v2 migration (all permissions) - name: V2_MIGRATION_APP_EXCLUDE_LIST - value: approval -- description: Application exclude list for v2 migration (resource definitions only) - name: V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST - value: cost-management,playbook-dispatcher -- description: Timestamp expiration allowance on destructive actions through the internal RBAC API - name: RBAC_DESTRUCTIVE_API_ENABLED_UNTIL - value: '' -- description: Timestamp expiration allowance on destructive actions through the seeding job - name: RBAC_DESTRUCTIVE_SEEDING_ENABLED_UNTIL - value: '' -- description: Image tag - name: IMAGE_TAG - required: true -- description: Name of DB secret - name: DB_SECRET_NAME - value: rbac-db -- description: The name assigned to all frontend objects defined in this template. - displayName: Name - name: NAME - value: rbac -- description: Initial amount of CPU the Flower container will request. - displayName: Celery scheduler CPU Resource Request - name: CELERY_SCHEDULER_CPU_REQUEST - value: 100m -- description: Maximum amount of CPU the scheduler container can use. - displayName: CPU Limit - name: CELERY_SCHEDULER_CPU_LIMIT - value: 300m -- description: Initial amount of memory the scheduler container will request. - displayName: Celery scheduler Memory Resource Request - name: CELERY_SCHEDULER_MEMORY_REQUEST - value: 256Mi -- description: Maximum amount of memory the scheduler container can use. - displayName: Memory Limit - name: CELERY_SCHEDULER_MEMORY_LIMIT - value: 512Mi -- description: Initial amount of CPU the worker container will request. - displayName: Celery worker CPU Resource Request - name: CELERY_WORKER_CPU_REQUEST - value: 100m -- description: Maximum amount of CPU the worker container can use. - displayName: CPU Limit - name: CELERY_WORKER_CPU_LIMIT - value: 300m -- description: Initial amount of memory the worker container will request. - displayName: Celery worker Memory Resource Request - name: CELERY_WORKER_MEMORY_REQUEST - value: 256Mi -- description: Maximum amount of memory the worker container can use. - displayName: Memory Limit - name: CELERY_WORKER_MEMORY_LIMIT - value: 512Mi -- description: Initial amount of CPU the init worker container will request. - displayName: RBAC worker init container CPU Resource Request - name: INIT_WORKER_CPU_REQUEST - value: 500m -- description: Maximum amount of CPU the init worker container can use. - displayName: RBAC worker init container CPU Resource Limit - name: INIT_WORKER_CPU_LIMIT - value: 2000m -- description: Initial amount of memory the init worker container will request. - displayName: RBAC worker init container Memory Resource Request - name: INIT_WORKER_MEMORY_REQUEST - value: 512Mi -- description: Maximum amount of memory the init worker container can use. - displayName: RBAC worker init container Memory Resource Limit - name: INIT_WORKER_MEMORY_LIMIT - value: 3Gi -- displayName: Django Debug - name: DJANGO_DEBUG - value: 'False' -- displayName: Django log level - name: DJANGO_LOG_LEVEL - value: INFO -- description: 'Options can be found in the doc: https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-SSLMODE-STATEMENTS' - displayName: Postgres SSL mode - name: PGSSLMODE - value: prefer -- description: Name of the rbac-config config map - name: CONFIG_MAP_NAME - value: rbac-config -- description: Name of the predefined access permissions config map - name: MODEL_ACCESS_PERMISSIONS - value: model-access-permissions -- description: Name of DB secret - name: DB_SECRET_NAME - value: rbac-db -- name: APP_NAMESPACE - value: 'rbac' -- name: CW_NULL_WORKAROUND - value: 'true' -- name: CELERY_INITIAL_DELAY_SEC - value: "30" -- name: CELERY_PERIOD_SEC - value: "300" -- description: Default number of threads to use for seeding - name: MAX_SEED_THREADS - value: "2" -- description: max_connections for redis client - name: REDIS_MAX_CONNECTIONS - value: "10" -- description: socket connect timeout for redis - name: REDIS_SOCKET_CONNECT_TIMEOUT - value: "0.1" -- description: socket timeout for redis - name: REDIS_SOCKET_TIMEOUT - value: "0.1" -- description: Enable sending out notification events - name: NOTIFICATIONS_ENABLED - value: 'False' -- description: Enable sending out notification events of Red Hat changes - name: NOTIFICATIONS_RH_ENABLED - value: 'False' -- name: TENANT_TRANSLATOR_HOST - required: true -- name: TENANT_TRANSLATOR_PORT - value: '8892' -- name: GUNICORN_WORKER_MULTIPLIER - value: '2' -- name: GUNICORN_THREAD_LIMIT - value: '10' -- name: NOTIFICATIONS_TOPIC - value: 'platform.notifications.ingress' -- description: Enable kafka - name: KAFKA_ENABLED - value: 'False' -- name: EXTERNAL_SYNC_TOPIC - value: 'platform.rbac.sync' -- name: EXTERNAL_CHROME_TOPIC - value: 'platform.chrome' -- name: SERVICE_MIGRATE_AND_SEED_ON_INIT - value: 'True' -- name: WORKER_MIGRATE_AND_SEED_ON_INIT - value: 'False' -- name: GLITCHTIP_SECRET - value: 'rbac-secret' -- name: USE_CLOWDER_CA_FOR_BOP - value: 'False' -- name: IT_BYPASS_IT_CALLS - description: Bypass calling IT for fetching real service accounts and to use mocked responses instead? - value: 'False' -- name: IT_BYPASS_PERMISSIONS_MODIFY_SERVICE_ACCOUNTS - description: Bypass the permissions check for when a user wants to add or remove a service account from a group? - value: 'False' -- name: IT_BYPASS_TOKEN_VALIDATION - description: Bypass validating the token that the user must provide in the Authorization header for making IT calls? - value: 'False' -- name: IT_SERVICE_BASE_PATH - description: Path of the IT service's API - value: '/auth/realms/redhat-external/apis' -- name: IT_SERVICE_HOST - description: Host of the IT service - required: true -- name: IT_SERVICE_PORT - description: Port of the IT service - value: '443' -- name: IT_SERVICE_PROTOCOL_SCHEME - description: Protocol scheme of the IT service - value: 'https' -- name: IT_SERVICE_TIMEOUT_SECONDS - description: Number of seconds to wait for a response from IT before timing out and failing the request - value: '10' -- name: IT_TOKEN_JKWS_CACHE_LIFETIME - value: '28800' -- name: PRINCIPAL_USER_DOMAIN - description: > - Kessel requires principal IDs to be qualified by a domain, - in order to future proof integration of identities from multiple issuers. - RBAC currently expects all principals to either come from itself (cross-account), - or from a single identity infrastructure domain (identity header, SSO, BOP). - This defines that single domain. - value: 'redhat.com' -- name: PRINCIPAL_CLEANUP_DELETION_ENABLED_UMB - description: Allow cleanup job to delete principals via messages from UMB - value: 'False' -- name: UMB_JOB_ENABLE - description: Temp env to enable the UMB job - value: 'True' -- name: UMB_HOST - description: Host of the UMB service - value: 'localhost' -- name: UMB_PORT - description: Port of the UMB service - value: '61612' -- name: SA_NAME - description: Name of the rbac service account - value: 'nonprod-hcc-rbac' -- name: BONFIRE_DEPENDENCIES - description: A comma separated list of non ClowdApp dependencies for bonfire to deploy - value: "model-access-permissions-yml-stage,rbac-config-yml-stage" -- name: RELATION_API_SERVER - description: The gRPC API server to use for the relation - value: "localhost:9000" -- name: REPLICATION_TO_RELATION_ENABLED - description: Enable replication to Relation API - value: "False" -- name: V2_APIS_ENABLED - description: Flag to explicitly enable v2 API endpoints -- name: READ_ONLY_API_MODE - description: Enforce GET only on RBAC APIs - value: 'False' + - description: Image name + name: IMAGE + value: quay.io/cloudservices/rbac + - description: Determines Clowder deployment + name: CLOWDER_ENABLED + value: "true" + - description: ClowdEnv Name + name: ENV_NAME + required: true + - description: Initial amount of memory the Django container will request. + displayName: Memory Request + name: MEMORY_REQUEST + value: 512Mi + - description: Maximum amount of memory the Django container can use. + displayName: Memory Limit + name: MEMORY_LIMIT + value: 1Gi + - description: Initial amount of cpu the Django container will request. + displayName: CPU Request + name: CPU_REQUEST + value: 200m + - description: Maximum amount of cpu the Django container can use. + displayName: CPU Limit + name: CPU_LIMIT + value: 700m + - displayName: RBAC PSKs + name: RBAC_PSKS + value: rbac-psks + - displayName: Service Dependency Name + name: SERVICE_DEPENDENCY_NAME + value: rbac-pgsql + - displayName: API Prefix Path + name: API_PATH_PREFIX + value: /api/rbac + - displayName: Development + name: DEVELOPMENT + value: "false" + - displayName: Rbac log level + name: RBAC_LOG_LEVEL + value: INFO + - displayName: Django log level + name: DJANGO_LOG_LEVEL + value: INFO + - displayName: Django log formatter + name: DJANGO_LOG_FORMATTER + value: simple + - displayName: Django log handlers + name: DJANGO_LOG_HANDLERS + value: console,ecs + - displayName: Django log directory + name: DJANGO_LOG_DIRECTORY + - displayName: Django logging file + name: DJANGO_LOGGING_FILE + - description: Name of the rbac-config config map + name: CONFIG_MAP_NAME + value: rbac-config + - description: Name of the predefined access permissions config map + name: MODEL_ACCESS_PERMISSIONS + value: model-access-permissions + - description: minimum number of pods to use when autoscaling is enabled + name: MIN_REPLICAS + value: "1" + - description: maximum number of pods to use when autoscaling is enabled + name: MAX_REPLICAS + value: "1" + - description: minimum number of pods to use when autoscaling is enabled for worker service + name: MIN_WORKER_REPLICAS + value: "1" + - description: minimum number of pods to use when autoscaling is enabled for scheduler service + name: MIN_SCHEDULER_REPLICAS + value: "1" + - description: target CPU utilization for the service + name: TARGET_CPU_UTILIZATION + value: "90" + - description: "Options can be found in the doc: https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-SSLMODE-STATEMENTS" + displayName: Postgres SSL mode + name: PGSSLMODE + value: prefer + - description: Python boolean value to enable/disable permission seeding on app boot + name: PERMISSION_SEEDING_ENABLED + required: true + - description: Python boolean value to enable/disable role seeding on app boot + name: ROLE_SEEDING_ENABLED + required: true + - description: Python boolean value to enable/disable group seeding on app boot + name: GROUP_SEEDING_ENABLED + required: true + - description: Enable the RBAC access cache + name: ACCESS_CACHE_ENABLED + value: "True" + - description: Bypass interaction with the BOP service + name: BYPASS_BOP_VERIFICATION + value: "False" + - description: Application allow list for role creation in RBAC + name: ROLE_CREATE_ALLOW_LIST + value: cost-management,remediations,inventory,drift,policies,advisor,vulnerability,compliance,automation-analytics,notifications,patch,integrations,ros,staleness,config-manager,idmsvc + - description: Application exclude list for v2 migration (all permissions) + name: V2_MIGRATION_APP_EXCLUDE_LIST + value: approval + - description: Application exclude list for v2 migration (resource definitions only) + name: V2_MIGRATION_RESOURCE_EXCLUDE_LIST + value: rbac:workspace + - description: Timestamp expiration allowance on destructive actions through the internal RBAC API + name: RBAC_DESTRUCTIVE_API_ENABLED_UNTIL + value: "" + - description: Timestamp expiration allowance on destructive actions through the seeding job + name: RBAC_DESTRUCTIVE_SEEDING_ENABLED_UNTIL + value: "" + - description: Image tag + name: IMAGE_TAG + required: true + - description: Name of DB secret + name: DB_SECRET_NAME + value: rbac-db + - description: The name assigned to all frontend objects defined in this template. + displayName: Name + name: NAME + value: rbac + - description: Initial amount of CPU the Flower container will request. + displayName: Celery scheduler CPU Resource Request + name: CELERY_SCHEDULER_CPU_REQUEST + value: 100m + - description: Maximum amount of CPU the scheduler container can use. + displayName: CPU Limit + name: CELERY_SCHEDULER_CPU_LIMIT + value: 300m + - description: Initial amount of memory the scheduler container will request. + displayName: Celery scheduler Memory Resource Request + name: CELERY_SCHEDULER_MEMORY_REQUEST + value: 256Mi + - description: Maximum amount of memory the scheduler container can use. + displayName: Memory Limit + name: CELERY_SCHEDULER_MEMORY_LIMIT + value: 512Mi + - description: Initial amount of CPU the worker container will request. + displayName: Celery worker CPU Resource Request + name: CELERY_WORKER_CPU_REQUEST + value: 100m + - description: Maximum amount of CPU the worker container can use. + displayName: CPU Limit + name: CELERY_WORKER_CPU_LIMIT + value: 300m + - description: Initial amount of memory the worker container will request. + displayName: Celery worker Memory Resource Request + name: CELERY_WORKER_MEMORY_REQUEST + value: 256Mi + - description: Maximum amount of memory the worker container can use. + displayName: Memory Limit + name: CELERY_WORKER_MEMORY_LIMIT + value: 512Mi + - description: Initial amount of CPU the init worker container will request. + displayName: RBAC worker init container CPU Resource Request + name: INIT_WORKER_CPU_REQUEST + value: 500m + - description: Maximum amount of CPU the init worker container can use. + displayName: RBAC worker init container CPU Resource Limit + name: INIT_WORKER_CPU_LIMIT + value: 2000m + - description: Initial amount of memory the init worker container will request. + displayName: RBAC worker init container Memory Resource Request + name: INIT_WORKER_MEMORY_REQUEST + value: 512Mi + - description: Maximum amount of memory the init worker container can use. + displayName: RBAC worker init container Memory Resource Limit + name: INIT_WORKER_MEMORY_LIMIT + value: 3Gi + - displayName: Django Debug + name: DJANGO_DEBUG + value: "False" + - displayName: Django log level + name: DJANGO_LOG_LEVEL + value: INFO + - description: "Options can be found in the doc: https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-SSLMODE-STATEMENTS" + displayName: Postgres SSL mode + name: PGSSLMODE + value: prefer + - description: Name of the rbac-config config map + name: CONFIG_MAP_NAME + value: rbac-config + - description: Name of the predefined access permissions config map + name: MODEL_ACCESS_PERMISSIONS + value: model-access-permissions + - description: Name of DB secret + name: DB_SECRET_NAME + value: rbac-db + - name: APP_NAMESPACE + value: "rbac" + - name: CW_NULL_WORKAROUND + value: "true" + - name: CELERY_INITIAL_DELAY_SEC + value: "30" + - name: CELERY_PERIOD_SEC + value: "300" + - description: Default number of threads to use for seeding + name: MAX_SEED_THREADS + value: "2" + - description: max_connections for redis client + name: REDIS_MAX_CONNECTIONS + value: "10" + - description: socket connect timeout for redis + name: REDIS_SOCKET_CONNECT_TIMEOUT + value: "0.1" + - description: socket timeout for redis + name: REDIS_SOCKET_TIMEOUT + value: "0.1" + - description: Enable sending out notification events + name: NOTIFICATIONS_ENABLED + value: "False" + - description: Enable sending out notification events of Red Hat changes + name: NOTIFICATIONS_RH_ENABLED + value: "False" + - name: TENANT_TRANSLATOR_HOST + required: true + - name: TENANT_TRANSLATOR_PORT + value: "8892" + - name: GUNICORN_WORKER_MULTIPLIER + value: "2" + - name: GUNICORN_THREAD_LIMIT + value: "10" + - name: NOTIFICATIONS_TOPIC + value: "platform.notifications.ingress" + - description: Enable kafka + name: KAFKA_ENABLED + value: "False" + - name: EXTERNAL_SYNC_TOPIC + value: "platform.rbac.sync" + - name: EXTERNAL_CHROME_TOPIC + value: "platform.chrome" + - name: SERVICE_MIGRATE_AND_SEED_ON_INIT + value: "True" + - name: WORKER_MIGRATE_AND_SEED_ON_INIT + value: "False" + - name: GLITCHTIP_SECRET + value: "rbac-secret" + - name: USE_CLOWDER_CA_FOR_BOP + value: "False" + - name: IT_BYPASS_IT_CALLS + description: Bypass calling IT for fetching real service accounts and to use mocked responses instead? + value: "False" + - name: IT_BYPASS_PERMISSIONS_MODIFY_SERVICE_ACCOUNTS + description: Bypass the permissions check for when a user wants to add or remove a service account from a group? + value: "False" + - name: IT_BYPASS_TOKEN_VALIDATION + description: Bypass validating the token that the user must provide in the Authorization header for making IT calls? + value: "False" + - name: IT_SERVICE_BASE_PATH + description: Path of the IT service's API + value: "/auth/realms/redhat-external/apis" + - name: IT_SERVICE_HOST + description: Host of the IT service + required: true + - name: IT_SERVICE_PORT + description: Port of the IT service + value: "443" + - name: IT_SERVICE_PROTOCOL_SCHEME + description: Protocol scheme of the IT service + value: "https" + - name: IT_SERVICE_TIMEOUT_SECONDS + description: Number of seconds to wait for a response from IT before timing out and failing the request + value: "10" + - name: IT_TOKEN_JKWS_CACHE_LIFETIME + value: "28800" + - name: PRINCIPAL_USER_DOMAIN + description: > + Kessel requires principal IDs to be qualified by a domain, + in order to future proof integration of identities from multiple issuers. + RBAC currently expects all principals to either come from itself (cross-account), + or from a single identity infrastructure domain (identity header, SSO, BOP). + This defines that single domain. + value: "redhat.com" + - name: PRINCIPAL_CLEANUP_DELETION_ENABLED_UMB + description: Allow cleanup job to delete principals via messages from UMB + value: "False" + - name: UMB_JOB_ENABLE + description: Temp env to enable the UMB job + value: "True" + - name: UMB_HOST + description: Host of the UMB service + value: "localhost" + - name: UMB_PORT + description: Port of the UMB service + value: "61612" + - name: SA_NAME + description: Name of the rbac service account + value: "nonprod-hcc-rbac" + - name: BONFIRE_DEPENDENCIES + description: A comma separated list of non ClowdApp dependencies for bonfire to deploy + value: "model-access-permissions-yml-stage,rbac-config-yml-stage" + - name: RELATION_API_SERVER + description: The gRPC API server to use for the relation + value: "localhost:9000" + - name: REPLICATION_TO_RELATION_ENABLED + description: Enable replication to Relation API + value: "False" + - name: V2_APIS_ENABLED + description: Flag to explicitly enable v2 API endpoints + - name: READ_ONLY_API_MODE + description: Enforce GET only on RBAC APIs + value: "False" diff --git a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py index c1a0d0cfd..08f579807 100644 --- a/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py +++ b/rbac/migration_tool/sharedSystemRolesReplicatedRoleBindings.py @@ -103,10 +103,6 @@ def v1_role_to_v2_bindings( default = True for resource_def in access.resourceDefinitions.all(): - if not is_for_enabled_resource(v1_perm): - default = False - continue - default = False attri_filter = resource_def.attributeFilter @@ -123,6 +119,8 @@ def v1_role_to_v2_bindings( if resource_type is None: # Resource type not mapped to v2 continue + if not is_for_enabled_resource(resource_type): + continue for resource_id in values_from_attribute_filter(attri_filter): # TODO: Need to bind against "ungrouped hosts" for inventory if resource_id is None: @@ -199,7 +197,7 @@ def is_for_enabled_app(perm: Permission): return perm.application not in settings.V2_MIGRATION_APP_EXCLUDE_LIST -def is_for_enabled_resource(perm: Permission): +def is_for_enabled_resource(resource: Tuple[str, str]): """ Return true if the resource is for an app that should migrate. @@ -210,7 +208,7 @@ def is_for_enabled_resource(perm: Permission): Once the resource model is finalized, we should no longer exclude that app, and should instead update the migration code to account for migrating those resources in whatever form they should migrate. """ - return perm.application not in settings.V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST + return f"{resource[0]}:{resource[1]}" not in settings.V2_MIGRATION_RESOURCE_EXCLUDE_LIST def values_from_attribute_filter(attribute_filter: dict[str, Any]) -> list[str]: diff --git a/rbac/rbac/settings.py b/rbac/rbac/settings.py index 5fc0ddfe0..30f8b0e27 100644 --- a/rbac/rbac/settings.py +++ b/rbac/rbac/settings.py @@ -350,9 +350,7 @@ # Dual write migration configuration REPLICATION_TO_RELATION_ENABLED = ENVIRONMENT.bool("REPLICATION_TO_RELATION_ENABLED", default=False) V2_MIGRATION_APP_EXCLUDE_LIST = ENVIRONMENT.get_value("V2_MIGRATION_APP_EXCLUDE_LIST", default="").split(",") -V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST = ENVIRONMENT.get_value( - "V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST", default="" -).split(",") +V2_MIGRATION_RESOURCE_EXCLUDE_LIST = ENVIRONMENT.get_value("V2_MIGRATION_RESOURCE_EXCLUDE_LIST", default="").split(",") # Migration Setup TENANT_PARALLEL_MIGRATION_MAX_PROCESSES = ENVIRONMENT.int("TENANT_PARALLEL_MIGRATION_MAX_PROCESSES", default=2) diff --git a/tests/management/role/test_view.py b/tests/management/role/test_view.py index 958d2f609..ee79e033e 100644 --- a/tests/management/role/test_view.py +++ b/tests/management/role/test_view.py @@ -360,7 +360,7 @@ def test_create_role_success(self, send_kafka_message): ANY, ) - @override_settings(V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST=["app"]) + @override_settings(V2_MIGRATION_RESOURCE_EXCLUDE_LIST=["rbac:workspace"]) @patch("management.role.relation_api_dual_write_handler.OutboxReplicator._save_replication_event") def test_role_replication_exluded_resource(self, mock_method): """Test that excluded resources do not replicate via dual write.""" @@ -370,7 +370,7 @@ def test_role_replication_exluded_resource(self, mock_method): { "permission": "app:*:*", "resourceDefinitions": [ - {"attributeFilter": {"key": "keyA.id", "operation": "equal", "value": "valueA"}} + {"attributeFilter": {"key": "group.id", "operation": "equal", "value": "valueA"}} ], }, {"permission": "app:*:read", "resourceDefinitions": []}, diff --git a/tox.ini b/tox.ini index 796ba3bb6..490dfb711 100644 --- a/tox.ini +++ b/tox.ini @@ -31,7 +31,6 @@ setenv = TESTING_APPLICATION=app ROLE_CREATE_ALLOW_LIST=cost-management V2_MIGRATION_APP_EXCLUDE_LIST=approval - V2_MIGRATION_RESOURCE_APP_EXCLUDE_LIST=cost-management,playbook-dispatcher NOTIFICATIONS_TOPIC=platform.notifications.ingress EXTERNAL_SYNC_TOPIC=platform.rbac.sync EXTERNAL_CHROME_TOPIC=platform.chrome From 9bec5379ddbf66588a1fb2a09055d694eb811d1c Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Fri, 11 Oct 2024 13:18:39 -0400 Subject: [PATCH 54/55] Undo formatting changes --- deploy/rbac-clowdapp.yml | 1868 +++++++++++++++++++------------------- 1 file changed, 934 insertions(+), 934 deletions(-) diff --git a/deploy/rbac-clowdapp.yml b/deploy/rbac-clowdapp.yml index c713e947d..17f71c2d8 100644 --- a/deploy/rbac-clowdapp.yml +++ b/deploy/rbac-clowdapp.yml @@ -4,950 +4,950 @@ kind: Template metadata: name: rbac objects: - - apiVersion: cloud.redhat.com/v1alpha1 - kind: ClowdApp - metadata: +- apiVersion: cloud.redhat.com/v1alpha1 + kind: ClowdApp + metadata: + name: rbac + annotations: + bonfire.dependencies: ${BONFIRE_DEPENDENCIES} + spec: + envName: ${ENV_NAME} + testing: + iqePlugin: rbac + database: name: rbac - annotations: - bonfire.dependencies: ${BONFIRE_DEPENDENCIES} - spec: - envName: ${ENV_NAME} - testing: - iqePlugin: rbac - database: - name: rbac - dbVolumeSize: medium - inMemoryDb: true - kafkaTopics: - - topicName: ${NOTIFICATIONS_TOPIC} - partitions: 3 - replicas: 3 - - topicName: ${EXTERNAL_SYNC_TOPIC} - partitions: 1 - replicas: 3 - - topicName: ${EXTERNAL_CHROME_TOPIC} - partitions: 1 - replicas: 3 - deployments: - - name: worker-service - minReplicas: ${{MIN_WORKER_REPLICAS}} - metadata: - annotations: - ignore-check.kube-linter.io/minimum-three-replicas: "dont need 3 replicas - runs background processes from turnpike/weekly tasks" - podSpec: - image: ${IMAGE}:${IMAGE_TAG} - initContainers: - - env: - inheritEnv: true - command: - - sh - - /opt/rbac/deploy/init-container-setup.sh - resources: - limits: - cpu: ${INIT_WORKER_CPU_LIMIT} - memory: ${INIT_WORKER_MEMORY_LIMIT} - requests: - cpu: ${INIT_WORKER_CPU_REQUEST} - memory: ${INIT_WORKER_MEMORY_REQUEST} + dbVolumeSize: medium + inMemoryDb: true + kafkaTopics: + - topicName: ${NOTIFICATIONS_TOPIC} + partitions: 3 + replicas: 3 + - topicName: ${EXTERNAL_SYNC_TOPIC} + partitions: 1 + replicas: 3 + - topicName: ${EXTERNAL_CHROME_TOPIC} + partitions: 1 + replicas: 3 + deployments: + - name: worker-service + minReplicas: ${{MIN_WORKER_REPLICAS}} + metadata: + annotations: + ignore-check.kube-linter.io/minimum-three-replicas: "dont need 3 replicas - runs background processes from turnpike/weekly tasks" + podSpec: + image: ${IMAGE}:${IMAGE_TAG} + initContainers: + - env: + inheritEnv: true command: - - /bin/bash - - "-c" - - > - PYTHONPATH=${PWD}/rbac/ celery -A - rbac.celery worker -l $DJANGO_LOG_LEVEL - livenessProbe: - exec: - command: - - /bin/bash - - "-c" - - > - PYTHONPATH=${PWD}/rbac/ celery -A rbac.celery - inspect ping - failureThreshold: 3 - initialDelaySeconds: ${{CELERY_INITIAL_DELAY_SEC}} - periodSeconds: ${{CELERY_PERIOD_SEC}} - successThreshold: 1 - timeoutSeconds: 10 - readinessProbe: - exec: - command: - - /bin/bash - - "-c" - - > - PYTHONPATH=${PWD}/rbac/ celery -A rbac.celery - inspect ping - failureThreshold: 3 - periodSeconds: ${{CELERY_PERIOD_SEC}} - successThreshold: 1 - timeoutSeconds: 10 - volumeMounts: - - mountPath: /opt/rbac/rbac/management/role/definitions - name: default-role-config - - mountPath: /opt/rbac/rbac/management/role/permissions - name: model-access-permissions - - mountPath: /opt/rbac/rbac/management/principal/umb_certs - name: umb-certs - volumes: - - configMap: - name: ${CONFIG_MAP_NAME} - name: default-role-config - - configMap: - name: ${MODEL_ACCESS_PERMISSIONS} - name: model-access-permissions - - name: umb-certs - secret: - secretName: service-accounts - items: - - key: umb-cert - path: cert.pem - - key: umb-key - path: key.pem + - sh + - /opt/rbac/deploy/init-container-setup.sh resources: limits: - cpu: ${CELERY_WORKER_CPU_LIMIT} - memory: ${CELERY_WORKER_MEMORY_LIMIT} + cpu: ${INIT_WORKER_CPU_LIMIT} + memory: ${INIT_WORKER_MEMORY_LIMIT} requests: - cpu: ${CELERY_WORKER_CPU_REQUEST} - memory: ${CELERY_WORKER_MEMORY_REQUEST} - env: - - name: DJANGO_LOG_LEVEL - value: ${DJANGO_LOG_LEVEL} - - name: DJANGO_DEBUG - value: ${DJANGO_DEBUG} - - name: PERMISSION_SEEDING_ENABLED - value: "False" - - name: ROLE_SEEDING_ENABLED - value: "False" - - name: GROUP_SEEDING_ENABLED - value: "False" - - name: DJANGO_SECRET_KEY - valueFrom: - secretKeyRef: - key: django-secret-key - name: ${NAME}-secret - optional: false - - name: ENV_NAME - value: ${ENV_NAME} - - name: PRINCIPAL_PROXY_SERVICE_PROTOCOL - valueFrom: - secretKeyRef: - key: principal-proxy-protocol - name: ${NAME}-secret - optional: false - - name: PRINCIPAL_PROXY_SERVICE_HOST - valueFrom: - secretKeyRef: - key: principal-proxy-host - name: ${NAME}-secret - optional: false - - name: PRINCIPAL_PROXY_SERVICE_PORT - valueFrom: - secretKeyRef: - key: principal-proxy-port - name: ${NAME}-secret - optional: false - - name: PRINCIPAL_PROXY_SERVICE_PATH - value: "" - - name: PRINCIPAL_PROXY_USER_ENV - valueFrom: - secretKeyRef: - key: principal-proxy-env - name: ${NAME}-secret - optional: false - - name: PRINCIPAL_PROXY_CLIENT_ID - valueFrom: - secretKeyRef: - key: client-id - name: insights-rbac - optional: false - - name: PRINCIPAL_PROXY_API_TOKEN - valueFrom: - secretKeyRef: - key: token - name: insights-rbac - optional: false - - name: PRINCIPAL_PROXY_SERVICE_SSL_VERIFY - valueFrom: - secretKeyRef: - key: principal-proxy-ssl-verify - name: ${NAME}-secret - optional: true - - name: PRINCIPAL_PROXY_SERVICE_SOURCE_CERT - valueFrom: - secretKeyRef: - key: principal-proxy-source-cert - name: ${NAME}-secret - optional: true - - name: APP_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PGSSLMODE - value: ${PGSSLMODE} - - name: CLOWDER_ENABLED - value: ${CLOWDER_ENABLED} - - name: CW_NULL_WORKAROUND - value: ${CW_NULL_WORKAROUND} - - name: GLITCHTIP_DSN - valueFrom: - secretKeyRef: - name: ${GLITCHTIP_SECRET} - key: dsn - optional: true - - name: MAX_SEED_THREADS - value: ${MAX_SEED_THREADS} - - name: ACCESS_CACHE_CONNECT_SIGNALS - value: "False" - - name: NOTIFICATIONS_ENABLED - value: ${NOTIFICATIONS_ENABLED} - - name: NOTIFICATIONS_RH_ENABLED - value: ${NOTIFICATIONS_RH_ENABLED} - - name: KAFKA_ENABLED - value: ${KAFKA_ENABLED} - - name: NOTIFICATIONS_TOPIC - value: ${NOTIFICATIONS_TOPIC} - - name: EXTERNAL_SYNC_TOPIC - value: ${EXTERNAL_SYNC_TOPIC} - - name: EXTERNAL_CHROME_TOPIC - value: ${EXTERNAL_CHROME_TOPIC} - - name: MIGRATE_AND_SEED_ON_INIT - value: ${WORKER_MIGRATE_AND_SEED_ON_INIT} - - name: UMB_HOST - value: ${UMB_HOST} - - name: UMB_PORT - value: ${UMB_PORT} - - name: SA_NAME - value: ${SA_NAME} - - name: RELATION_API_SERVER - value: ${RELATION_API_SERVER} - - name: REPLICATION_TO_RELATION_ENABLED - value: ${REPLICATION_TO_RELATION_ENABLED} - - name: scheduler-service - minReplicas: ${{MIN_SCHEDULER_REPLICAS}} - metadata: - annotations: - ignore-check.kube-linter.io/minimum-three-replicas: "dont need 3 replicas - keeps the cron scheduled for the weekly tasks" - podSpec: - image: ${IMAGE}:${IMAGE_TAG} + cpu: ${INIT_WORKER_CPU_REQUEST} + memory: ${INIT_WORKER_MEMORY_REQUEST} + command: + - /bin/bash + - '-c' + - > + PYTHONPATH=${PWD}/rbac/ celery -A + rbac.celery worker -l $DJANGO_LOG_LEVEL + livenessProbe: + exec: command: - /bin/bash - - "-c" + - '-c' - > - PYTHONPATH=${PWD}/rbac/ celery -A - rbac.celery beat -l $DJANGO_LOG_LEVEL - livenessProbe: - exec: - command: - - /bin/bash - - "-c" - - > - PYTHONPATH=${PWD}/rbac/ celery -A rbac.celery - inspect ping - failureThreshold: 3 - initialDelaySeconds: ${{CELERY_INITIAL_DELAY_SEC}} - periodSeconds: ${{CELERY_PERIOD_SEC}} - successThreshold: 1 - timeoutSeconds: 10 - readinessProbe: - exec: - command: - - /bin/bash - - "-c" - - > - PYTHONPATH=${PWD}/rbac/ celery -A rbac.celery - inspect ping - failureThreshold: 3 - periodSeconds: ${{CELERY_PERIOD_SEC}} - successThreshold: 1 - timeoutSeconds: 10 - resources: - limits: - cpu: ${CELERY_SCHEDULER_CPU_LIMIT} - memory: ${CELERY_SCHEDULER_MEMORY_LIMIT} - requests: - cpu: ${CELERY_SCHEDULER_CPU_REQUEST} - memory: ${CELERY_SCHEDULER_MEMORY_REQUEST} - env: - - name: DJANGO_LOG_LEVEL - value: ${DJANGO_LOG_LEVEL} - - name: DJANGO_DEBUG - value: ${DJANGO_DEBUG} - - name: APP_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: PERMISSION_SEEDING_ENABLED - value: "False" - - name: ROLE_SEEDING_ENABLED - value: "False" - - name: GROUP_SEEDING_ENABLED - value: "False" - - name: CLOWDER_ENABLED - value: ${CLOWDER_ENABLED} - - name: CW_NULL_WORKAROUND - value: ${CW_NULL_WORKAROUND} - - name: GLITCHTIP_DSN - valueFrom: - secretKeyRef: - name: ${GLITCHTIP_SECRET} - key: dsn - optional: true - - name: PRINCIPAL_USER_DOMAIN - value: ${PRINCIPAL_USER_DOMAIN} - - name: PRINCIPAL_CLEANUP_DELETION_ENABLED_UMB - value: ${PRINCIPAL_CLEANUP_DELETION_ENABLED_UMB} - - name: UMB_JOB_ENABLED - value: ${UMB_JOB_ENABLED} - - - name: service - minReplicas: ${{MIN_REPLICAS}} - webServices: - public: - enabled: true - apiPath: rbac - podSpec: - image: ${IMAGE}:${IMAGE_TAG} - initContainers: - - env: - inheritEnv: true - command: - - sh - - /opt/rbac/deploy/init-container-setup.sh - livenessProbe: - httpGet: - path: /api/rbac/v1/status/ - port: 8000 - scheme: HTTP - initialDelaySeconds: 60 - periodSeconds: 10 - successThreshold: 1 - failureThreshold: 3 - timeoutSeconds: 3 - readinessProbe: - httpGet: - path: /api/rbac/v1/status/ - port: 8000 - scheme: HTTP - initialDelaySeconds: 60 - periodSeconds: 10 - successThreshold: 1 - failureThreshold: 3 - timeoutSeconds: 3 - volumes: - - configMap: - name: ${CONFIG_MAP_NAME} - name: default-role-config - - configMap: - name: ${MODEL_ACCESS_PERMISSIONS} - name: model-access-permissions - volumeMounts: - - mountPath: /opt/rbac/rbac/management/role/definitions - name: default-role-config - - mountPath: /opt/rbac/rbac/management/role/permissions - name: model-access-permissions - resources: - limits: - cpu: ${CPU_LIMIT} - memory: ${MEMORY_LIMIT} - requests: - cpu: ${CPU_REQUEST} - memory: ${MEMORY_REQUEST} - env: - - name: GLITCHTIP_DSN - valueFrom: - secretKeyRef: - name: ${GLITCHTIP_SECRET} - key: dsn - optional: true - - name: SERVICE_PSKS - valueFrom: - secretKeyRef: - key: psks.json - name: ${RBAC_PSKS} - optional: false - - name: PGSSLMODE - value: ${PGSSLMODE} - - name: DJANGO_SECRET_KEY - valueFrom: - secretKeyRef: - key: django-secret-key - name: rbac-secret - optional: false - - name: PRINCIPAL_PROXY_SERVICE_PROTOCOL - valueFrom: - secretKeyRef: - key: principal-proxy-protocol - name: rbac-secret - optional: false - - name: PRINCIPAL_PROXY_SERVICE_HOST - valueFrom: - secretKeyRef: - key: principal-proxy-host - name: rbac-secret - optional: false - - name: PRINCIPAL_PROXY_SERVICE_PORT - valueFrom: - secretKeyRef: - key: principal-proxy-port - name: rbac-secret - optional: false - - name: PRINCIPAL_PROXY_SERVICE_PATH - value: "" - - name: PRINCIPAL_PROXY_USER_ENV - valueFrom: - secretKeyRef: - key: principal-proxy-env - name: rbac-secret - optional: false - - name: PRINCIPAL_PROXY_CLIENT_ID - valueFrom: - secretKeyRef: - key: client-id - name: insights-rbac - optional: false - - name: PRINCIPAL_PROXY_API_TOKEN - valueFrom: - secretKeyRef: - key: token - name: insights-rbac - optional: false - - name: PRINCIPAL_PROXY_SERVICE_SSL_VERIFY - valueFrom: - secretKeyRef: - key: principal-proxy-ssl-verify - name: rbac-secret - optional: true - - name: PRINCIPAL_PROXY_SERVICE_SOURCE_CERT - valueFrom: - secretKeyRef: - key: principal-proxy-source-cert - name: rbac-secret - optional: true - - name: POD_CPU_LIMIT - valueFrom: - resourceFieldRef: - containerName: rbac-service - resource: limits.cpu - - name: ACCESS_CACHE_ENABLED - value: ${ACCESS_CACHE_ENABLED} - - name: APP_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: DJANGO_DEBUG - value: ${DJANGO_DEBUG} - - name: API_PATH_PREFIX - value: ${API_PATH_PREFIX} - - name: DEVELOPMENT - value: ${DEVELOPMENT} - - name: RBAC_LOG_LEVEL - value: ${RBAC_LOG_LEVEL} - - name: DJANGO_LOG_LEVEL - value: ${DJANGO_LOG_LEVEL} - - name: DJANGO_LOG_FORMATTER - value: ${DJANGO_LOG_FORMATTER} - - name: DJANGO_LOG_HANDLERS - value: ${DJANGO_LOG_HANDLERS} - - name: DJANGO_LOG_DIRECTORY - value: ${DJANGO_LOG_DIRECTORY} - - name: DJANGO_LOGGING_FILE - value: ${DJANGO_LOGGING_FILE} - - name: ENV_NAME - value: ${ENV_NAME} - - name: PERMISSION_SEEDING_ENABLED - value: ${PERMISSION_SEEDING_ENABLED} - - name: ROLE_SEEDING_ENABLED - value: ${ROLE_SEEDING_ENABLED} - - name: GROUP_SEEDING_ENABLED - value: ${GROUP_SEEDING_ENABLED} - - name: BYPASS_BOP_VERIFICATION - value: ${BYPASS_BOP_VERIFICATION} - - name: REPLICATION_TO_RELATION_ENABLED - value: ${REPLICATION_TO_RELATION_ENABLED} - - name: ROLE_CREATE_ALLOW_LIST - value: ${ROLE_CREATE_ALLOW_LIST} - - name: V2_MIGRATION_APP_EXCLUDE_LIST - value: ${V2_MIGRATION_APP_EXCLUDE_LIST} - - name: V2_MIGRATION_RESOURCE_EXCLUDE_LIST - value: ${V2_MIGRATION_RESOURCE_EXCLUDE_LIST} - - name: RBAC_DESTRUCTIVE_API_ENABLED_UNTIL - value: ${RBAC_DESTRUCTIVE_API_ENABLED_UNTIL} - - name: RBAC_DESTRUCTIVE_SEEDING_ENABLED_UNTIL - value: ${RBAC_DESTRUCTIVE_SEEDING_ENABLED_UNTIL} - - name: CLOWDER_ENABLED - value: ${CLOWDER_ENABLED} - - name: APP_NAMESPACE - value: ${APP_NAMESPACE} - - name: CW_NULL_WORKAROUND - value: ${CW_NULL_WORKAROUND} - - name: REDIS_MAX_CONNECTIONS - value: ${REDIS_MAX_CONNECTIONS} - - name: REDIS_SOCKET_CONNECT_TIMEOUT - value: ${REDIS_SOCKET_CONNECT_TIMEOUT} - - name: REDIS_SOCKET_TIMEOUT - value: ${REDIS_SOCKET_TIMEOUT} - - name: NOTIFICATIONS_ENABLED - value: ${NOTIFICATIONS_ENABLED} - - name: GUNICORN_WORKER_MULTIPLIER - value: ${GUNICORN_WORKER_MULTIPLIER} - - name: GUNICORN_THREAD_LIMIT - value: ${GUNICORN_THREAD_LIMIT} - - name: NOTIFICATIONS_TOPIC - value: ${NOTIFICATIONS_TOPIC} - - name: KAFKA_ENABLED - value: ${KAFKA_ENABLED} - - name: EXTERNAL_SYNC_TOPIC - value: ${EXTERNAL_SYNC_TOPIC} - - name: EXTERNAL_CHROME_TOPIC - value: ${EXTERNAL_CHROME_TOPIC} - - name: MIGRATE_AND_SEED_ON_INIT - value: ${SERVICE_MIGRATE_AND_SEED_ON_INIT} - - name: USE_CLOWDER_CA_FOR_BOP - value: ${USE_CLOWDER_CA_FOR_BOP} - - name: IT_BYPASS_IT_CALLS - value: ${IT_BYPASS_IT_CALLS} - - name: IT_BYPASS_PERMISSIONS_MODIFY_SERVICE_ACCOUNTS - value: ${IT_BYPASS_PERMISSIONS_MODIFY_SERVICE_ACCOUNTS} - - name: IT_BYPASS_TOKEN_VALIDATION - value: ${IT_BYPASS_TOKEN_VALIDATION} - - name: IT_SERVICE_BASE_PATH - value: ${IT_SERVICE_BASE_PATH} - - name: IT_SERVICE_HOST - value: ${IT_SERVICE_HOST} - - name: IT_SERVICE_PORT - value: ${IT_SERVICE_PORT} - - name: IT_SERVICE_PROTOCOL_SCHEME - value: ${IT_SERVICE_PROTOCOL_SCHEME} - - name: IT_SERVICE_TIMEOUT_SECONDS - value: ${IT_SERVICE_TIMEOUT_SECONDS} - - name: IT_TOKEN_JKWS_CACHE_LIFETIME - value: ${IT_TOKEN_JKWS_CACHE_LIFETIME} - - name: V2_APIS_ENABLED - value: ${V2_APIS_ENABLED} - - name: READ_ONLY_API_MODE - value: ${READ_ONLY_API_MODE} - - jobs: - - name: tenant-org-id-populator - podSpec: - image: quay.io/cloudservices/tenant-utils:latest + PYTHONPATH=${PWD}/rbac/ celery -A rbac.celery + inspect ping + failureThreshold: 3 + initialDelaySeconds: ${{CELERY_INITIAL_DELAY_SEC}} + periodSeconds: ${{CELERY_PERIOD_SEC}} + successThreshold: 1 + timeoutSeconds: 10 + readinessProbe: + exec: command: - - ./org-id-column-populator - - -C - - -a - - account_id - - -o - - org_id - - -t - - api_tenant - - --ean-translator-addr - - http://${TENANT_TRANSLATOR_HOST}:${TENANT_TRANSLATOR_PORT} - - --batch-size - - "50" - env: - - name: LOG_FORMAT - value: ${LOG_FORMAT} - - name: LOG_BATCH_FREQUENCY - value: "1" - resources: - limits: - cpu: 300m - memory: 1Gi - requests: - cpu: 50m - memory: 512Mi - - name: cross-account-request-target-org-populator - podSpec: - image: quay.io/cloudservices/tenant-utils:latest + - /bin/bash + - '-c' + - > + PYTHONPATH=${PWD}/rbac/ celery -A rbac.celery + inspect ping + failureThreshold: 3 + periodSeconds: ${{CELERY_PERIOD_SEC}} + successThreshold: 1 + timeoutSeconds: 10 + volumeMounts: + - mountPath: /opt/rbac/rbac/management/role/definitions + name: default-role-config + - mountPath: /opt/rbac/rbac/management/role/permissions + name: model-access-permissions + - mountPath: /opt/rbac/rbac/management/principal/umb_certs + name: umb-certs + volumes: + - configMap: + name: ${CONFIG_MAP_NAME} + name: default-role-config + - configMap: + name: ${MODEL_ACCESS_PERMISSIONS} + name: model-access-permissions + - name: umb-certs + secret: + secretName: service-accounts + items: + - key: umb-cert + path: cert.pem + - key: umb-key + path: key.pem + resources: + limits: + cpu: ${CELERY_WORKER_CPU_LIMIT} + memory: ${CELERY_WORKER_MEMORY_LIMIT} + requests: + cpu: ${CELERY_WORKER_CPU_REQUEST} + memory: ${CELERY_WORKER_MEMORY_REQUEST} + env: + - name: DJANGO_LOG_LEVEL + value: ${DJANGO_LOG_LEVEL} + - name: DJANGO_DEBUG + value: ${DJANGO_DEBUG} + - name: PERMISSION_SEEDING_ENABLED + value: 'False' + - name: ROLE_SEEDING_ENABLED + value: 'False' + - name: GROUP_SEEDING_ENABLED + value: 'False' + - name: DJANGO_SECRET_KEY + valueFrom: + secretKeyRef: + key: django-secret-key + name: ${NAME}-secret + optional: false + - name: ENV_NAME + value: ${ENV_NAME} + - name: PRINCIPAL_PROXY_SERVICE_PROTOCOL + valueFrom: + secretKeyRef: + key: principal-proxy-protocol + name: ${NAME}-secret + optional: false + - name: PRINCIPAL_PROXY_SERVICE_HOST + valueFrom: + secretKeyRef: + key: principal-proxy-host + name: ${NAME}-secret + optional: false + - name: PRINCIPAL_PROXY_SERVICE_PORT + valueFrom: + secretKeyRef: + key: principal-proxy-port + name: ${NAME}-secret + optional: false + - name: PRINCIPAL_PROXY_SERVICE_PATH + value: '' + - name: PRINCIPAL_PROXY_USER_ENV + valueFrom: + secretKeyRef: + key: principal-proxy-env + name: ${NAME}-secret + optional: false + - name: PRINCIPAL_PROXY_CLIENT_ID + valueFrom: + secretKeyRef: + key: client-id + name: insights-rbac + optional: false + - name: PRINCIPAL_PROXY_API_TOKEN + valueFrom: + secretKeyRef: + key: token + name: insights-rbac + optional: false + - name: PRINCIPAL_PROXY_SERVICE_SSL_VERIFY + valueFrom: + secretKeyRef: + key: principal-proxy-ssl-verify + name: ${NAME}-secret + optional: true + - name: PRINCIPAL_PROXY_SERVICE_SOURCE_CERT + valueFrom: + secretKeyRef: + key: principal-proxy-source-cert + name: ${NAME}-secret + optional: true + - name: APP_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PGSSLMODE + value: ${PGSSLMODE} + - name: CLOWDER_ENABLED + value: ${CLOWDER_ENABLED} + - name: CW_NULL_WORKAROUND + value: ${CW_NULL_WORKAROUND} + - name: GLITCHTIP_DSN + valueFrom: + secretKeyRef: + name: ${GLITCHTIP_SECRET} + key: dsn + optional: true + - name: MAX_SEED_THREADS + value: ${MAX_SEED_THREADS} + - name: ACCESS_CACHE_CONNECT_SIGNALS + value: 'False' + - name: NOTIFICATIONS_ENABLED + value: ${NOTIFICATIONS_ENABLED} + - name: NOTIFICATIONS_RH_ENABLED + value: ${NOTIFICATIONS_RH_ENABLED} + - name: KAFKA_ENABLED + value: ${KAFKA_ENABLED} + - name: NOTIFICATIONS_TOPIC + value: ${NOTIFICATIONS_TOPIC} + - name: EXTERNAL_SYNC_TOPIC + value: ${EXTERNAL_SYNC_TOPIC} + - name: EXTERNAL_CHROME_TOPIC + value: ${EXTERNAL_CHROME_TOPIC} + - name: MIGRATE_AND_SEED_ON_INIT + value: ${WORKER_MIGRATE_AND_SEED_ON_INIT} + - name: UMB_HOST + value: ${UMB_HOST} + - name: UMB_PORT + value: ${UMB_PORT} + - name: SA_NAME + value: ${SA_NAME} + - name: RELATION_API_SERVER + value: ${RELATION_API_SERVER} + - name: REPLICATION_TO_RELATION_ENABLED + value: ${REPLICATION_TO_RELATION_ENABLED} + - name: scheduler-service + minReplicas: ${{MIN_SCHEDULER_REPLICAS}} + metadata: + annotations: + ignore-check.kube-linter.io/minimum-three-replicas: "dont need 3 replicas - keeps the cron scheduled for the weekly tasks" + podSpec: + image: ${IMAGE}:${IMAGE_TAG} + command: + - /bin/bash + - '-c' + - > + PYTHONPATH=${PWD}/rbac/ celery -A + rbac.celery beat -l $DJANGO_LOG_LEVEL + livenessProbe: + exec: command: - - ./org-id-column-populator - - -C - - -a - - target_account - - -o - - target_org - - -t - - api_crossaccountrequest - - --ean-translator-addr - - http://${TENANT_TRANSLATOR_HOST}:${TENANT_TRANSLATOR_PORT} - - --batch-size - - "50" - env: - - name: LOG_FORMAT - value: ${LOG_FORMAT} - - name: LOG_BATCH_FREQUENCY - value: "1" - resources: - limits: - cpu: 300m - memory: 1Gi - requests: - cpu: 50m - memory: 512Mi - - apiVersion: v1 - kind: ConfigMap - metadata: - name: rbac-env - data: - api-path-prefix: /api/rbac - app-config: /opt/rbac/rbac/gunicorn.py - app-domain: ${APP_DOMAIN} - app-home: /opt/rbac/rbac - app-module: rbac.wsgi - app-namespace: rbac-stage - database-engine: postgresql - database-name: rbac - database-service-name: POSTGRES_SQL - development: "False" - django-debug: "False" - django-log-directory: "" - django-log-formatter: simple - django-log-handlers: console - django-log-level: INFO - django-logging-file: "" - postgres-sql-service-host: rbac-pgsql.rbac-stage.svc - postgres-sql-service-port: "5432" - rbac-log-level: INFO + - /bin/bash + - '-c' + - > + PYTHONPATH=${PWD}/rbac/ celery -A rbac.celery + inspect ping + failureThreshold: 3 + initialDelaySeconds: ${{CELERY_INITIAL_DELAY_SEC}} + periodSeconds: ${{CELERY_PERIOD_SEC}} + successThreshold: 1 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - /bin/bash + - '-c' + - > + PYTHONPATH=${PWD}/rbac/ celery -A rbac.celery + inspect ping + failureThreshold: 3 + periodSeconds: ${{CELERY_PERIOD_SEC}} + successThreshold: 1 + timeoutSeconds: 10 + resources: + limits: + cpu: ${CELERY_SCHEDULER_CPU_LIMIT} + memory: ${CELERY_SCHEDULER_MEMORY_LIMIT} + requests: + cpu: ${CELERY_SCHEDULER_CPU_REQUEST} + memory: ${CELERY_SCHEDULER_MEMORY_REQUEST} + env: + - name: DJANGO_LOG_LEVEL + value: ${DJANGO_LOG_LEVEL} + - name: DJANGO_DEBUG + value: ${DJANGO_DEBUG} + - name: APP_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: PERMISSION_SEEDING_ENABLED + value: 'False' + - name: ROLE_SEEDING_ENABLED + value: 'False' + - name: GROUP_SEEDING_ENABLED + value: 'False' + - name: CLOWDER_ENABLED + value: ${CLOWDER_ENABLED} + - name: CW_NULL_WORKAROUND + value: ${CW_NULL_WORKAROUND} + - name: GLITCHTIP_DSN + valueFrom: + secretKeyRef: + name: ${GLITCHTIP_SECRET} + key: dsn + optional: true + - name: PRINCIPAL_USER_DOMAIN + value: ${PRINCIPAL_USER_DOMAIN} + - name: PRINCIPAL_CLEANUP_DELETION_ENABLED_UMB + value: ${PRINCIPAL_CLEANUP_DELETION_ENABLED_UMB} + - name: UMB_JOB_ENABLED + value: ${UMB_JOB_ENABLED} + + - name: service + minReplicas: ${{MIN_REPLICAS}} + webServices: + public: + enabled: true + apiPath: rbac + podSpec: + image: ${IMAGE}:${IMAGE_TAG} + initContainers: + - env: + inheritEnv: true + command: + - sh + - /opt/rbac/deploy/init-container-setup.sh + livenessProbe: + httpGet: + path: /api/rbac/v1/status/ + port: 8000 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 3 + readinessProbe: + httpGet: + path: /api/rbac/v1/status/ + port: 8000 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + failureThreshold: 3 + timeoutSeconds: 3 + volumes: + - configMap: + name: ${CONFIG_MAP_NAME} + name: default-role-config + - configMap: + name: ${MODEL_ACCESS_PERMISSIONS} + name: model-access-permissions + volumeMounts: + - mountPath: /opt/rbac/rbac/management/role/definitions + name: default-role-config + - mountPath: /opt/rbac/rbac/management/role/permissions + name: model-access-permissions + resources: + limits: + cpu: ${CPU_LIMIT} + memory: ${MEMORY_LIMIT} + requests: + cpu: ${CPU_REQUEST} + memory: ${MEMORY_REQUEST} + env: + - name: GLITCHTIP_DSN + valueFrom: + secretKeyRef: + name: ${GLITCHTIP_SECRET} + key: dsn + optional: true + - name: SERVICE_PSKS + valueFrom: + secretKeyRef: + key: psks.json + name: ${RBAC_PSKS} + optional: false + - name: PGSSLMODE + value: ${PGSSLMODE} + - name: DJANGO_SECRET_KEY + valueFrom: + secretKeyRef: + key: django-secret-key + name: rbac-secret + optional: false + - name: PRINCIPAL_PROXY_SERVICE_PROTOCOL + valueFrom: + secretKeyRef: + key: principal-proxy-protocol + name: rbac-secret + optional: false + - name: PRINCIPAL_PROXY_SERVICE_HOST + valueFrom: + secretKeyRef: + key: principal-proxy-host + name: rbac-secret + optional: false + - name: PRINCIPAL_PROXY_SERVICE_PORT + valueFrom: + secretKeyRef: + key: principal-proxy-port + name: rbac-secret + optional: false + - name: PRINCIPAL_PROXY_SERVICE_PATH + value: '' + - name: PRINCIPAL_PROXY_USER_ENV + valueFrom: + secretKeyRef: + key: principal-proxy-env + name: rbac-secret + optional: false + - name: PRINCIPAL_PROXY_CLIENT_ID + valueFrom: + secretKeyRef: + key: client-id + name: insights-rbac + optional: false + - name: PRINCIPAL_PROXY_API_TOKEN + valueFrom: + secretKeyRef: + key: token + name: insights-rbac + optional: false + - name: PRINCIPAL_PROXY_SERVICE_SSL_VERIFY + valueFrom: + secretKeyRef: + key: principal-proxy-ssl-verify + name: rbac-secret + optional: true + - name: PRINCIPAL_PROXY_SERVICE_SOURCE_CERT + valueFrom: + secretKeyRef: + key: principal-proxy-source-cert + name: rbac-secret + optional: true + - name: POD_CPU_LIMIT + valueFrom: + resourceFieldRef: + containerName: rbac-service + resource: limits.cpu + - name: ACCESS_CACHE_ENABLED + value: ${ACCESS_CACHE_ENABLED} + - name: APP_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: DJANGO_DEBUG + value: ${DJANGO_DEBUG} + - name: API_PATH_PREFIX + value: ${API_PATH_PREFIX} + - name: DEVELOPMENT + value: ${DEVELOPMENT} + - name: RBAC_LOG_LEVEL + value: ${RBAC_LOG_LEVEL} + - name: DJANGO_LOG_LEVEL + value: ${DJANGO_LOG_LEVEL} + - name: DJANGO_LOG_FORMATTER + value: ${DJANGO_LOG_FORMATTER} + - name: DJANGO_LOG_HANDLERS + value: ${DJANGO_LOG_HANDLERS} + - name: DJANGO_LOG_DIRECTORY + value: ${DJANGO_LOG_DIRECTORY} + - name: DJANGO_LOGGING_FILE + value: ${DJANGO_LOGGING_FILE} + - name: ENV_NAME + value: ${ENV_NAME} + - name: PERMISSION_SEEDING_ENABLED + value: ${PERMISSION_SEEDING_ENABLED} + - name: ROLE_SEEDING_ENABLED + value: ${ROLE_SEEDING_ENABLED} + - name: GROUP_SEEDING_ENABLED + value: ${GROUP_SEEDING_ENABLED} + - name: BYPASS_BOP_VERIFICATION + value: ${BYPASS_BOP_VERIFICATION} + - name: REPLICATION_TO_RELATION_ENABLED + value: ${REPLICATION_TO_RELATION_ENABLED} + - name: ROLE_CREATE_ALLOW_LIST + value: ${ROLE_CREATE_ALLOW_LIST} + - name: V2_MIGRATION_APP_EXCLUDE_LIST + value: ${V2_MIGRATION_APP_EXCLUDE_LIST} + - name: V2_MIGRATION_RESOURCE_EXCLUDE_LIST + value: ${V2_MIGRATION_RESOURCE_EXCLUDE_LIST} + - name: RBAC_DESTRUCTIVE_API_ENABLED_UNTIL + value: ${RBAC_DESTRUCTIVE_API_ENABLED_UNTIL} + - name: RBAC_DESTRUCTIVE_SEEDING_ENABLED_UNTIL + value: ${RBAC_DESTRUCTIVE_SEEDING_ENABLED_UNTIL} + - name: CLOWDER_ENABLED + value: ${CLOWDER_ENABLED} + - name: APP_NAMESPACE + value: ${APP_NAMESPACE} + - name: CW_NULL_WORKAROUND + value: ${CW_NULL_WORKAROUND} + - name: REDIS_MAX_CONNECTIONS + value: ${REDIS_MAX_CONNECTIONS} + - name: REDIS_SOCKET_CONNECT_TIMEOUT + value: ${REDIS_SOCKET_CONNECT_TIMEOUT} + - name: REDIS_SOCKET_TIMEOUT + value: ${REDIS_SOCKET_TIMEOUT} + - name: NOTIFICATIONS_ENABLED + value: ${NOTIFICATIONS_ENABLED} + - name: GUNICORN_WORKER_MULTIPLIER + value: ${GUNICORN_WORKER_MULTIPLIER} + - name: GUNICORN_THREAD_LIMIT + value: ${GUNICORN_THREAD_LIMIT} + - name: NOTIFICATIONS_TOPIC + value: ${NOTIFICATIONS_TOPIC} + - name: KAFKA_ENABLED + value: ${KAFKA_ENABLED} + - name: EXTERNAL_SYNC_TOPIC + value: ${EXTERNAL_SYNC_TOPIC} + - name: EXTERNAL_CHROME_TOPIC + value: ${EXTERNAL_CHROME_TOPIC} + - name: MIGRATE_AND_SEED_ON_INIT + value: ${SERVICE_MIGRATE_AND_SEED_ON_INIT} + - name: USE_CLOWDER_CA_FOR_BOP + value: ${USE_CLOWDER_CA_FOR_BOP} + - name: IT_BYPASS_IT_CALLS + value: ${IT_BYPASS_IT_CALLS} + - name: IT_BYPASS_PERMISSIONS_MODIFY_SERVICE_ACCOUNTS + value: ${IT_BYPASS_PERMISSIONS_MODIFY_SERVICE_ACCOUNTS} + - name: IT_BYPASS_TOKEN_VALIDATION + value: ${IT_BYPASS_TOKEN_VALIDATION} + - name: IT_SERVICE_BASE_PATH + value: ${IT_SERVICE_BASE_PATH} + - name: IT_SERVICE_HOST + value: ${IT_SERVICE_HOST} + - name: IT_SERVICE_PORT + value: ${IT_SERVICE_PORT} + - name: IT_SERVICE_PROTOCOL_SCHEME + value: ${IT_SERVICE_PROTOCOL_SCHEME} + - name: IT_SERVICE_TIMEOUT_SECONDS + value: ${IT_SERVICE_TIMEOUT_SECONDS} + - name: IT_TOKEN_JKWS_CACHE_LIFETIME + value: ${IT_TOKEN_JKWS_CACHE_LIFETIME} + - name: V2_APIS_ENABLED + value: ${V2_APIS_ENABLED} + - name: READ_ONLY_API_MODE + value: ${READ_ONLY_API_MODE} + + jobs: + - name: tenant-org-id-populator + podSpec: + image: quay.io/cloudservices/tenant-utils:latest + command: + - ./org-id-column-populator + - -C + - -a + - account_id + - -o + - org_id + - -t + - api_tenant + - --ean-translator-addr + - http://${TENANT_TRANSLATOR_HOST}:${TENANT_TRANSLATOR_PORT} + - --batch-size + - "50" + env: + - name: LOG_FORMAT + value: ${LOG_FORMAT} + - name: LOG_BATCH_FREQUENCY + value: '1' + resources: + limits: + cpu: 300m + memory: 1Gi + requests: + cpu: 50m + memory: 512Mi + - name: cross-account-request-target-org-populator + podSpec: + image: quay.io/cloudservices/tenant-utils:latest + command: + - ./org-id-column-populator + - -C + - -a + - target_account + - -o + - target_org + - -t + - api_crossaccountrequest + - --ean-translator-addr + - http://${TENANT_TRANSLATOR_HOST}:${TENANT_TRANSLATOR_PORT} + - --batch-size + - "50" + env: + - name: LOG_FORMAT + value: ${LOG_FORMAT} + - name: LOG_BATCH_FREQUENCY + value: '1' + resources: + limits: + cpu: 300m + memory: 1Gi + requests: + cpu: 50m + memory: 512Mi +- apiVersion: v1 + kind: ConfigMap + metadata: + name: rbac-env + data: + api-path-prefix: /api/rbac + app-config: /opt/rbac/rbac/gunicorn.py + app-domain: ${APP_DOMAIN} + app-home: /opt/rbac/rbac + app-module: rbac.wsgi + app-namespace: rbac-stage + database-engine: postgresql + database-name: rbac + database-service-name: POSTGRES_SQL + development: "False" + django-debug: "False" + django-log-directory: "" + django-log-formatter: simple + django-log-handlers: console + django-log-level: INFO + django-logging-file: "" + postgres-sql-service-host: rbac-pgsql.rbac-stage.svc + postgres-sql-service-port: "5432" + rbac-log-level: INFO - - apiVersion: v1 - kind: Secret - metadata: - name: rbac-psks - data: - psks.json: >- - ewogICJhZHZpc29yIjogewogICAgImFsdC1zZWNyZXQiOiAiMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEiCiAgfSwKICAiYXBwcm92YWwiOiB7CiAgICAiYWx0LXNlY3JldCI6ICIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMiIKICB9LAogICJub3RpZmljYXRpb25zIjogewogICAgImFsdC1zZWNyZXQiOiAiMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMiCiAgfQp9 - type: Opaque - - apiVersion: v1 - kind: Secret - metadata: - name: insights-rbac - data: - client-id: aW5zaWdodHMtcmJhYw== - token: MjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMg== - type: Opaque - - apiVersion: v1 - kind: Secret - metadata: - name: rbac-secret - data: - django-secret-key: MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTE= - principal-proxy-api-token: >- - MjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMg== - principal-proxy-client-id: aW5zaWdodHMtcmJhYw== - principal-proxy-env: c3RhZ2U= - principal-proxy-host: bWJvcA== - principal-proxy-port: ODA5MA== - principal-proxy-protocol: aHR0cA== - principal-proxy-source-cert: RmFsc2U= - principal-proxy-ssl-verify: RmFsc2U= - sentry-dsn: "" - type: Opaque - - apiVersion: v1 - kind: Secret - metadata: - name: service-accounts - data: - cert: MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTE= - key: MjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMg== - type: Opaque +- apiVersion: v1 + kind: Secret + metadata: + name: rbac-psks + data: + psks.json: >- + ewogICJhZHZpc29yIjogewogICAgImFsdC1zZWNyZXQiOiAiMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTEiCiAgfSwKICAiYXBwcm92YWwiOiB7CiAgICAiYWx0LXNlY3JldCI6ICIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMiIKICB9LAogICJub3RpZmljYXRpb25zIjogewogICAgImFsdC1zZWNyZXQiOiAiMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMiCiAgfQp9 + type: Opaque +- apiVersion: v1 + kind: Secret + metadata: + name: insights-rbac + data: + client-id: aW5zaWdodHMtcmJhYw== + token: MjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMg== + type: Opaque +- apiVersion: v1 + kind: Secret + metadata: + name: rbac-secret + data: + django-secret-key: MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTE= + principal-proxy-api-token: >- + MjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMg== + principal-proxy-client-id: aW5zaWdodHMtcmJhYw== + principal-proxy-env: c3RhZ2U= + principal-proxy-host: bWJvcA== + principal-proxy-port: ODA5MA== + principal-proxy-protocol: aHR0cA== + principal-proxy-source-cert: RmFsc2U= + principal-proxy-ssl-verify: RmFsc2U= + sentry-dsn: '' + type: Opaque +- apiVersion: v1 + kind: Secret + metadata: + name: service-accounts + data: + cert: MTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTExMTE= + key: MjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMg== + type: Opaque parameters: - - description: Image name - name: IMAGE - value: quay.io/cloudservices/rbac - - description: Determines Clowder deployment - name: CLOWDER_ENABLED - value: "true" - - description: ClowdEnv Name - name: ENV_NAME - required: true - - description: Initial amount of memory the Django container will request. - displayName: Memory Request - name: MEMORY_REQUEST - value: 512Mi - - description: Maximum amount of memory the Django container can use. - displayName: Memory Limit - name: MEMORY_LIMIT - value: 1Gi - - description: Initial amount of cpu the Django container will request. - displayName: CPU Request - name: CPU_REQUEST - value: 200m - - description: Maximum amount of cpu the Django container can use. - displayName: CPU Limit - name: CPU_LIMIT - value: 700m - - displayName: RBAC PSKs - name: RBAC_PSKS - value: rbac-psks - - displayName: Service Dependency Name - name: SERVICE_DEPENDENCY_NAME - value: rbac-pgsql - - displayName: API Prefix Path - name: API_PATH_PREFIX - value: /api/rbac - - displayName: Development - name: DEVELOPMENT - value: "false" - - displayName: Rbac log level - name: RBAC_LOG_LEVEL - value: INFO - - displayName: Django log level - name: DJANGO_LOG_LEVEL - value: INFO - - displayName: Django log formatter - name: DJANGO_LOG_FORMATTER - value: simple - - displayName: Django log handlers - name: DJANGO_LOG_HANDLERS - value: console,ecs - - displayName: Django log directory - name: DJANGO_LOG_DIRECTORY - - displayName: Django logging file - name: DJANGO_LOGGING_FILE - - description: Name of the rbac-config config map - name: CONFIG_MAP_NAME - value: rbac-config - - description: Name of the predefined access permissions config map - name: MODEL_ACCESS_PERMISSIONS - value: model-access-permissions - - description: minimum number of pods to use when autoscaling is enabled - name: MIN_REPLICAS - value: "1" - - description: maximum number of pods to use when autoscaling is enabled - name: MAX_REPLICAS - value: "1" - - description: minimum number of pods to use when autoscaling is enabled for worker service - name: MIN_WORKER_REPLICAS - value: "1" - - description: minimum number of pods to use when autoscaling is enabled for scheduler service - name: MIN_SCHEDULER_REPLICAS - value: "1" - - description: target CPU utilization for the service - name: TARGET_CPU_UTILIZATION - value: "90" - - description: "Options can be found in the doc: https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-SSLMODE-STATEMENTS" - displayName: Postgres SSL mode - name: PGSSLMODE - value: prefer - - description: Python boolean value to enable/disable permission seeding on app boot - name: PERMISSION_SEEDING_ENABLED - required: true - - description: Python boolean value to enable/disable role seeding on app boot - name: ROLE_SEEDING_ENABLED - required: true - - description: Python boolean value to enable/disable group seeding on app boot - name: GROUP_SEEDING_ENABLED - required: true - - description: Enable the RBAC access cache - name: ACCESS_CACHE_ENABLED - value: "True" - - description: Bypass interaction with the BOP service - name: BYPASS_BOP_VERIFICATION - value: "False" - - description: Application allow list for role creation in RBAC - name: ROLE_CREATE_ALLOW_LIST - value: cost-management,remediations,inventory,drift,policies,advisor,vulnerability,compliance,automation-analytics,notifications,patch,integrations,ros,staleness,config-manager,idmsvc - - description: Application exclude list for v2 migration (all permissions) - name: V2_MIGRATION_APP_EXCLUDE_LIST - value: approval - - description: Application exclude list for v2 migration (resource definitions only) - name: V2_MIGRATION_RESOURCE_EXCLUDE_LIST - value: rbac:workspace - - description: Timestamp expiration allowance on destructive actions through the internal RBAC API - name: RBAC_DESTRUCTIVE_API_ENABLED_UNTIL - value: "" - - description: Timestamp expiration allowance on destructive actions through the seeding job - name: RBAC_DESTRUCTIVE_SEEDING_ENABLED_UNTIL - value: "" - - description: Image tag - name: IMAGE_TAG - required: true - - description: Name of DB secret - name: DB_SECRET_NAME - value: rbac-db - - description: The name assigned to all frontend objects defined in this template. - displayName: Name - name: NAME - value: rbac - - description: Initial amount of CPU the Flower container will request. - displayName: Celery scheduler CPU Resource Request - name: CELERY_SCHEDULER_CPU_REQUEST - value: 100m - - description: Maximum amount of CPU the scheduler container can use. - displayName: CPU Limit - name: CELERY_SCHEDULER_CPU_LIMIT - value: 300m - - description: Initial amount of memory the scheduler container will request. - displayName: Celery scheduler Memory Resource Request - name: CELERY_SCHEDULER_MEMORY_REQUEST - value: 256Mi - - description: Maximum amount of memory the scheduler container can use. - displayName: Memory Limit - name: CELERY_SCHEDULER_MEMORY_LIMIT - value: 512Mi - - description: Initial amount of CPU the worker container will request. - displayName: Celery worker CPU Resource Request - name: CELERY_WORKER_CPU_REQUEST - value: 100m - - description: Maximum amount of CPU the worker container can use. - displayName: CPU Limit - name: CELERY_WORKER_CPU_LIMIT - value: 300m - - description: Initial amount of memory the worker container will request. - displayName: Celery worker Memory Resource Request - name: CELERY_WORKER_MEMORY_REQUEST - value: 256Mi - - description: Maximum amount of memory the worker container can use. - displayName: Memory Limit - name: CELERY_WORKER_MEMORY_LIMIT - value: 512Mi - - description: Initial amount of CPU the init worker container will request. - displayName: RBAC worker init container CPU Resource Request - name: INIT_WORKER_CPU_REQUEST - value: 500m - - description: Maximum amount of CPU the init worker container can use. - displayName: RBAC worker init container CPU Resource Limit - name: INIT_WORKER_CPU_LIMIT - value: 2000m - - description: Initial amount of memory the init worker container will request. - displayName: RBAC worker init container Memory Resource Request - name: INIT_WORKER_MEMORY_REQUEST - value: 512Mi - - description: Maximum amount of memory the init worker container can use. - displayName: RBAC worker init container Memory Resource Limit - name: INIT_WORKER_MEMORY_LIMIT - value: 3Gi - - displayName: Django Debug - name: DJANGO_DEBUG - value: "False" - - displayName: Django log level - name: DJANGO_LOG_LEVEL - value: INFO - - description: "Options can be found in the doc: https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-SSLMODE-STATEMENTS" - displayName: Postgres SSL mode - name: PGSSLMODE - value: prefer - - description: Name of the rbac-config config map - name: CONFIG_MAP_NAME - value: rbac-config - - description: Name of the predefined access permissions config map - name: MODEL_ACCESS_PERMISSIONS - value: model-access-permissions - - description: Name of DB secret - name: DB_SECRET_NAME - value: rbac-db - - name: APP_NAMESPACE - value: "rbac" - - name: CW_NULL_WORKAROUND - value: "true" - - name: CELERY_INITIAL_DELAY_SEC - value: "30" - - name: CELERY_PERIOD_SEC - value: "300" - - description: Default number of threads to use for seeding - name: MAX_SEED_THREADS - value: "2" - - description: max_connections for redis client - name: REDIS_MAX_CONNECTIONS - value: "10" - - description: socket connect timeout for redis - name: REDIS_SOCKET_CONNECT_TIMEOUT - value: "0.1" - - description: socket timeout for redis - name: REDIS_SOCKET_TIMEOUT - value: "0.1" - - description: Enable sending out notification events - name: NOTIFICATIONS_ENABLED - value: "False" - - description: Enable sending out notification events of Red Hat changes - name: NOTIFICATIONS_RH_ENABLED - value: "False" - - name: TENANT_TRANSLATOR_HOST - required: true - - name: TENANT_TRANSLATOR_PORT - value: "8892" - - name: GUNICORN_WORKER_MULTIPLIER - value: "2" - - name: GUNICORN_THREAD_LIMIT - value: "10" - - name: NOTIFICATIONS_TOPIC - value: "platform.notifications.ingress" - - description: Enable kafka - name: KAFKA_ENABLED - value: "False" - - name: EXTERNAL_SYNC_TOPIC - value: "platform.rbac.sync" - - name: EXTERNAL_CHROME_TOPIC - value: "platform.chrome" - - name: SERVICE_MIGRATE_AND_SEED_ON_INIT - value: "True" - - name: WORKER_MIGRATE_AND_SEED_ON_INIT - value: "False" - - name: GLITCHTIP_SECRET - value: "rbac-secret" - - name: USE_CLOWDER_CA_FOR_BOP - value: "False" - - name: IT_BYPASS_IT_CALLS - description: Bypass calling IT for fetching real service accounts and to use mocked responses instead? - value: "False" - - name: IT_BYPASS_PERMISSIONS_MODIFY_SERVICE_ACCOUNTS - description: Bypass the permissions check for when a user wants to add or remove a service account from a group? - value: "False" - - name: IT_BYPASS_TOKEN_VALIDATION - description: Bypass validating the token that the user must provide in the Authorization header for making IT calls? - value: "False" - - name: IT_SERVICE_BASE_PATH - description: Path of the IT service's API - value: "/auth/realms/redhat-external/apis" - - name: IT_SERVICE_HOST - description: Host of the IT service - required: true - - name: IT_SERVICE_PORT - description: Port of the IT service - value: "443" - - name: IT_SERVICE_PROTOCOL_SCHEME - description: Protocol scheme of the IT service - value: "https" - - name: IT_SERVICE_TIMEOUT_SECONDS - description: Number of seconds to wait for a response from IT before timing out and failing the request - value: "10" - - name: IT_TOKEN_JKWS_CACHE_LIFETIME - value: "28800" - - name: PRINCIPAL_USER_DOMAIN - description: > - Kessel requires principal IDs to be qualified by a domain, - in order to future proof integration of identities from multiple issuers. - RBAC currently expects all principals to either come from itself (cross-account), - or from a single identity infrastructure domain (identity header, SSO, BOP). - This defines that single domain. - value: "redhat.com" - - name: PRINCIPAL_CLEANUP_DELETION_ENABLED_UMB - description: Allow cleanup job to delete principals via messages from UMB - value: "False" - - name: UMB_JOB_ENABLE - description: Temp env to enable the UMB job - value: "True" - - name: UMB_HOST - description: Host of the UMB service - value: "localhost" - - name: UMB_PORT - description: Port of the UMB service - value: "61612" - - name: SA_NAME - description: Name of the rbac service account - value: "nonprod-hcc-rbac" - - name: BONFIRE_DEPENDENCIES - description: A comma separated list of non ClowdApp dependencies for bonfire to deploy - value: "model-access-permissions-yml-stage,rbac-config-yml-stage" - - name: RELATION_API_SERVER - description: The gRPC API server to use for the relation - value: "localhost:9000" - - name: REPLICATION_TO_RELATION_ENABLED - description: Enable replication to Relation API - value: "False" - - name: V2_APIS_ENABLED - description: Flag to explicitly enable v2 API endpoints - - name: READ_ONLY_API_MODE - description: Enforce GET only on RBAC APIs - value: "False" +- description: Image name + name: IMAGE + value: quay.io/cloudservices/rbac +- description: Determines Clowder deployment + name: CLOWDER_ENABLED + value: "true" +- description: ClowdEnv Name + name: ENV_NAME + required: true +- description: Initial amount of memory the Django container will request. + displayName: Memory Request + name: MEMORY_REQUEST + value: 512Mi +- description: Maximum amount of memory the Django container can use. + displayName: Memory Limit + name: MEMORY_LIMIT + value: 1Gi +- description: Initial amount of cpu the Django container will request. + displayName: CPU Request + name: CPU_REQUEST + value: 200m +- description: Maximum amount of cpu the Django container can use. + displayName: CPU Limit + name: CPU_LIMIT + value: 700m +- displayName: RBAC PSKs + name: RBAC_PSKS + value: rbac-psks +- displayName: Service Dependency Name + name: SERVICE_DEPENDENCY_NAME + value: rbac-pgsql +- displayName: API Prefix Path + name: API_PATH_PREFIX + value: /api/rbac +- displayName: Development + name: DEVELOPMENT + value: 'false' +- displayName: Rbac log level + name: RBAC_LOG_LEVEL + value: INFO +- displayName: Django log level + name: DJANGO_LOG_LEVEL + value: INFO +- displayName: Django log formatter + name: DJANGO_LOG_FORMATTER + value: simple +- displayName: Django log handlers + name: DJANGO_LOG_HANDLERS + value: console,ecs +- displayName: Django log directory + name: DJANGO_LOG_DIRECTORY +- displayName: Django logging file + name: DJANGO_LOGGING_FILE +- description: Name of the rbac-config config map + name: CONFIG_MAP_NAME + value: rbac-config +- description: Name of the predefined access permissions config map + name: MODEL_ACCESS_PERMISSIONS + value: model-access-permissions +- description: minimum number of pods to use when autoscaling is enabled + name: MIN_REPLICAS + value: '1' +- description: maximum number of pods to use when autoscaling is enabled + name: MAX_REPLICAS + value: '1' +- description: minimum number of pods to use when autoscaling is enabled for worker service + name: MIN_WORKER_REPLICAS + value: '1' +- description: minimum number of pods to use when autoscaling is enabled for scheduler service + name: MIN_SCHEDULER_REPLICAS + value: '1' +- description: target CPU utilization for the service + name: TARGET_CPU_UTILIZATION + value: '90' +- description: 'Options can be found in the doc: https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-SSLMODE-STATEMENTS' + displayName: Postgres SSL mode + name: PGSSLMODE + value: prefer +- description: Python boolean value to enable/disable permission seeding on app boot + name: PERMISSION_SEEDING_ENABLED + required: true +- description: Python boolean value to enable/disable role seeding on app boot + name: ROLE_SEEDING_ENABLED + required: true +- description: Python boolean value to enable/disable group seeding on app boot + name: GROUP_SEEDING_ENABLED + required: true +- description: Enable the RBAC access cache + name: ACCESS_CACHE_ENABLED + value: 'True' +- description: Bypass interaction with the BOP service + name: BYPASS_BOP_VERIFICATION + value: 'False' +- description: Application allow list for role creation in RBAC + name: ROLE_CREATE_ALLOW_LIST + value: cost-management,remediations,inventory,drift,policies,advisor,vulnerability,compliance,automation-analytics,notifications,patch,integrations,ros,staleness,config-manager,idmsvc +- description: Application exclude list for v2 migration (all permissions) + name: V2_MIGRATION_APP_EXCLUDE_LIST + value: approval +- description: Application exclude list for v2 migration (resource definitions only) + name: V2_MIGRATION_RESOURCE_EXCLUDE_LIST + value: rbac:workspace +- description: Timestamp expiration allowance on destructive actions through the internal RBAC API + name: RBAC_DESTRUCTIVE_API_ENABLED_UNTIL + value: '' +- description: Timestamp expiration allowance on destructive actions through the seeding job + name: RBAC_DESTRUCTIVE_SEEDING_ENABLED_UNTIL + value: '' +- description: Image tag + name: IMAGE_TAG + required: true +- description: Name of DB secret + name: DB_SECRET_NAME + value: rbac-db +- description: The name assigned to all frontend objects defined in this template. + displayName: Name + name: NAME + value: rbac +- description: Initial amount of CPU the Flower container will request. + displayName: Celery scheduler CPU Resource Request + name: CELERY_SCHEDULER_CPU_REQUEST + value: 100m +- description: Maximum amount of CPU the scheduler container can use. + displayName: CPU Limit + name: CELERY_SCHEDULER_CPU_LIMIT + value: 300m +- description: Initial amount of memory the scheduler container will request. + displayName: Celery scheduler Memory Resource Request + name: CELERY_SCHEDULER_MEMORY_REQUEST + value: 256Mi +- description: Maximum amount of memory the scheduler container can use. + displayName: Memory Limit + name: CELERY_SCHEDULER_MEMORY_LIMIT + value: 512Mi +- description: Initial amount of CPU the worker container will request. + displayName: Celery worker CPU Resource Request + name: CELERY_WORKER_CPU_REQUEST + value: 100m +- description: Maximum amount of CPU the worker container can use. + displayName: CPU Limit + name: CELERY_WORKER_CPU_LIMIT + value: 300m +- description: Initial amount of memory the worker container will request. + displayName: Celery worker Memory Resource Request + name: CELERY_WORKER_MEMORY_REQUEST + value: 256Mi +- description: Maximum amount of memory the worker container can use. + displayName: Memory Limit + name: CELERY_WORKER_MEMORY_LIMIT + value: 512Mi +- description: Initial amount of CPU the init worker container will request. + displayName: RBAC worker init container CPU Resource Request + name: INIT_WORKER_CPU_REQUEST + value: 500m +- description: Maximum amount of CPU the init worker container can use. + displayName: RBAC worker init container CPU Resource Limit + name: INIT_WORKER_CPU_LIMIT + value: 2000m +- description: Initial amount of memory the init worker container will request. + displayName: RBAC worker init container Memory Resource Request + name: INIT_WORKER_MEMORY_REQUEST + value: 512Mi +- description: Maximum amount of memory the init worker container can use. + displayName: RBAC worker init container Memory Resource Limit + name: INIT_WORKER_MEMORY_LIMIT + value: 3Gi +- displayName: Django Debug + name: DJANGO_DEBUG + value: 'False' +- displayName: Django log level + name: DJANGO_LOG_LEVEL + value: INFO +- description: 'Options can be found in the doc: https://www.postgresql.org/docs/current/libpq-ssl.html#LIBPQ-SSL-SSLMODE-STATEMENTS' + displayName: Postgres SSL mode + name: PGSSLMODE + value: prefer +- description: Name of the rbac-config config map + name: CONFIG_MAP_NAME + value: rbac-config +- description: Name of the predefined access permissions config map + name: MODEL_ACCESS_PERMISSIONS + value: model-access-permissions +- description: Name of DB secret + name: DB_SECRET_NAME + value: rbac-db +- name: APP_NAMESPACE + value: 'rbac' +- name: CW_NULL_WORKAROUND + value: 'true' +- name: CELERY_INITIAL_DELAY_SEC + value: "30" +- name: CELERY_PERIOD_SEC + value: "300" +- description: Default number of threads to use for seeding + name: MAX_SEED_THREADS + value: "2" +- description: max_connections for redis client + name: REDIS_MAX_CONNECTIONS + value: "10" +- description: socket connect timeout for redis + name: REDIS_SOCKET_CONNECT_TIMEOUT + value: "0.1" +- description: socket timeout for redis + name: REDIS_SOCKET_TIMEOUT + value: "0.1" +- description: Enable sending out notification events + name: NOTIFICATIONS_ENABLED + value: 'False' +- description: Enable sending out notification events of Red Hat changes + name: NOTIFICATIONS_RH_ENABLED + value: 'False' +- name: TENANT_TRANSLATOR_HOST + required: true +- name: TENANT_TRANSLATOR_PORT + value: '8892' +- name: GUNICORN_WORKER_MULTIPLIER + value: '2' +- name: GUNICORN_THREAD_LIMIT + value: '10' +- name: NOTIFICATIONS_TOPIC + value: 'platform.notifications.ingress' +- description: Enable kafka + name: KAFKA_ENABLED + value: 'False' +- name: EXTERNAL_SYNC_TOPIC + value: 'platform.rbac.sync' +- name: EXTERNAL_CHROME_TOPIC + value: 'platform.chrome' +- name: SERVICE_MIGRATE_AND_SEED_ON_INIT + value: 'True' +- name: WORKER_MIGRATE_AND_SEED_ON_INIT + value: 'False' +- name: GLITCHTIP_SECRET + value: 'rbac-secret' +- name: USE_CLOWDER_CA_FOR_BOP + value: 'False' +- name: IT_BYPASS_IT_CALLS + description: Bypass calling IT for fetching real service accounts and to use mocked responses instead? + value: 'False' +- name: IT_BYPASS_PERMISSIONS_MODIFY_SERVICE_ACCOUNTS + description: Bypass the permissions check for when a user wants to add or remove a service account from a group? + value: 'False' +- name: IT_BYPASS_TOKEN_VALIDATION + description: Bypass validating the token that the user must provide in the Authorization header for making IT calls? + value: 'False' +- name: IT_SERVICE_BASE_PATH + description: Path of the IT service's API + value: '/auth/realms/redhat-external/apis' +- name: IT_SERVICE_HOST + description: Host of the IT service + required: true +- name: IT_SERVICE_PORT + description: Port of the IT service + value: '443' +- name: IT_SERVICE_PROTOCOL_SCHEME + description: Protocol scheme of the IT service + value: 'https' +- name: IT_SERVICE_TIMEOUT_SECONDS + description: Number of seconds to wait for a response from IT before timing out and failing the request + value: '10' +- name: IT_TOKEN_JKWS_CACHE_LIFETIME + value: '28800' +- name: PRINCIPAL_USER_DOMAIN + description: > + Kessel requires principal IDs to be qualified by a domain, + in order to future proof integration of identities from multiple issuers. + RBAC currently expects all principals to either come from itself (cross-account), + or from a single identity infrastructure domain (identity header, SSO, BOP). + This defines that single domain. + value: 'redhat.com' +- name: PRINCIPAL_CLEANUP_DELETION_ENABLED_UMB + description: Allow cleanup job to delete principals via messages from UMB + value: 'False' +- name: UMB_JOB_ENABLE + description: Temp env to enable the UMB job + value: 'True' +- name: UMB_HOST + description: Host of the UMB service + value: 'localhost' +- name: UMB_PORT + description: Port of the UMB service + value: '61612' +- name: SA_NAME + description: Name of the rbac service account + value: 'nonprod-hcc-rbac' +- name: BONFIRE_DEPENDENCIES + description: A comma separated list of non ClowdApp dependencies for bonfire to deploy + value: "model-access-permissions-yml-stage,rbac-config-yml-stage" +- name: RELATION_API_SERVER + description: The gRPC API server to use for the relation + value: "localhost:9000" +- name: REPLICATION_TO_RELATION_ENABLED + description: Enable replication to Relation API + value: "False" +- name: V2_APIS_ENABLED + description: Flag to explicitly enable v2 API endpoints +- name: READ_ONLY_API_MODE + description: Enforce GET only on RBAC APIs + value: 'False' From 09cb3d4bd8f9b3b1afd396ef670ab6cb2e000b96 Mon Sep 17 00:00:00 2001 From: Alec Henninger Date: Fri, 11 Oct 2024 13:22:46 -0400 Subject: [PATCH 55/55] Update description --- deploy/rbac-clowdapp.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/rbac-clowdapp.yml b/deploy/rbac-clowdapp.yml index 17f71c2d8..3f037bb91 100644 --- a/deploy/rbac-clowdapp.yml +++ b/deploy/rbac-clowdapp.yml @@ -751,7 +751,7 @@ parameters: - description: Application exclude list for v2 migration (all permissions) name: V2_MIGRATION_APP_EXCLUDE_LIST value: approval -- description: Application exclude list for v2 migration (resource definitions only) +- description: Resources (by namespace:name) exclude list for v2 migration (resource definitions only) name: V2_MIGRATION_RESOURCE_EXCLUDE_LIST value: rbac:workspace - description: Timestamp expiration allowance on destructive actions through the internal RBAC API