diff --git a/rust/feature-flags/src/api/endpoint.rs b/rust/feature-flags/src/api/endpoint.rs index e995cfd5dc15e..b083ee573c9e2 100644 --- a/rust/feature-flags/src/api/endpoint.rs +++ b/rust/feature-flags/src/api/endpoint.rs @@ -46,7 +46,6 @@ pub async fn flags( let context = RequestContext { state, ip, - meta: meta.0, headers, body, }; diff --git a/rust/feature-flags/src/api/handler.rs b/rust/feature-flags/src/api/handler.rs index 0fdade8d95128..7a6bef7eed098 100644 --- a/rust/feature-flags/src/api/handler.rs +++ b/rust/feature-flags/src/api/handler.rs @@ -58,7 +58,6 @@ pub struct FlagsQueryParams { pub struct RequestContext { pub state: State, pub ip: IpAddr, - pub meta: FlagsQueryParams, pub headers: HeaderMap, pub body: Bytes, } @@ -82,11 +81,24 @@ pub struct FeatureFlagEvaluationContext { hash_key_override: Option, } +/// Process a feature flag request and return the evaluated flags +/// +/// ## Flow +/// 1. Decodes and validates the request +/// 2. Extracts and verifies the authentication token +/// 3. Retrieves team information +/// 4. Processes person and group properties +/// 5. Retrieves feature flags +/// 6. Evaluates flags based on the context +/// +/// ## Error Handling +/// - Returns early if any step fails +/// - Maintains error context through the FlagError enum +/// - Individual flag evaluation failures don't fail the entire request pub async fn process_request(context: RequestContext) -> Result { let RequestContext { state, ip, - meta: _, // TODO use this headers, body, } = context; @@ -95,12 +107,12 @@ pub async fn process_request(context: RequestContext) -> Result Result>, + existing_overrides: Option>>, +) -> Option>> { + match groups { + Some(groups) => { + let group_key_overrides: HashMap> = groups + .into_iter() + .map(|(group_type, group_key)| { + let mut properties = existing_overrides + .as_ref() + .and_then(|g| g.get(&group_type)) + .cloned() + .unwrap_or_default(); + + properties.insert("$group_key".to_string(), group_key); + + (group_type, properties) + }) + .collect(); + + let mut result = existing_overrides.unwrap_or_default(); + result.extend(group_key_overrides); + Some(result) + } + None => existing_overrides, + } +} + /// Decode a request into a `FlagRequest` /// - Currently only supports JSON requests // TODO support all supported content types @@ -738,4 +787,61 @@ mod tests { assert!(!result.error_while_computing_flags); assert_eq!(result.feature_flags["test_flag"], FlagValue::Boolean(true)); } + + #[test] + fn test_process_group_property_overrides() { + // Test case 1: Both groups and existing overrides + let groups = HashMap::from([ + ("project".to_string(), json!("project_123")), + ("organization".to_string(), json!("org_456")), + ]); + + let mut existing_overrides = HashMap::new(); + let mut project_props = HashMap::new(); + project_props.insert("industry".to_string(), json!("tech")); + existing_overrides.insert("project".to_string(), project_props); + + let result = + process_group_property_overrides(Some(groups.clone()), Some(existing_overrides)); + + assert!(result.is_some()); + let result = result.unwrap(); + + // Check project properties + let project_props = result.get("project").expect("Project properties missing"); + assert_eq!(project_props.get("industry"), Some(&json!("tech"))); + assert_eq!(project_props.get("$group_key"), Some(&json!("project_123"))); + + // Check organization properties + let org_props = result + .get("organization") + .expect("Organization properties missing"); + assert_eq!(org_props.get("$group_key"), Some(&json!("org_456"))); + + // Test case 2: Only groups, no existing overrides + let result = process_group_property_overrides(Some(groups.clone()), None); + + assert!(result.is_some()); + let result = result.unwrap(); + assert_eq!(result.len(), 2); + assert_eq!( + result.get("project").unwrap().get("$group_key"), + Some(&json!("project_123")) + ); + + // Test case 3: No groups, only existing overrides + let mut existing_overrides = HashMap::new(); + let mut project_props = HashMap::new(); + project_props.insert("industry".to_string(), json!("tech")); + existing_overrides.insert("project".to_string(), project_props); + + let result = process_group_property_overrides(None, Some(existing_overrides.clone())); + + assert!(result.is_some()); + assert_eq!(result.unwrap(), existing_overrides); + + // Test case 4: Neither groups nor existing overrides + let result = process_group_property_overrides(None, None); + assert!(result.is_none()); + } } diff --git a/rust/feature-flags/src/api/types.rs b/rust/feature-flags/src/api/types.rs index 3eb81b7d1adad..0f04f2a5b40a5 100644 --- a/rust/feature-flags/src/api/types.rs +++ b/rust/feature-flags/src/api/types.rs @@ -1,4 +1,5 @@ use serde::{Deserialize, Serialize}; +use serde_json::Value; use std::collections::HashMap; #[derive(Debug, PartialEq, Eq, Deserialize, Serialize)] @@ -18,4 +19,5 @@ pub enum FlagValue { pub struct FlagsResponse { pub error_while_computing_flags: bool, pub feature_flags: HashMap, + pub feature_flag_payloads: HashMap, // flag key -> payload } diff --git a/rust/feature-flags/src/cohort/cohort_operations.rs b/rust/feature-flags/src/cohort/cohort_operations.rs index 60afc7ca30f1c..b992e8f66473f 100644 --- a/rust/feature-flags/src/cohort/cohort_operations.rs +++ b/rust/feature-flags/src/cohort/cohort_operations.rs @@ -302,8 +302,6 @@ mod tests { .find(|c| c.id == main_cohort.id) .expect("Failed to find main cohort"); - println!("fetched_main_cohort: {:?}", fetched_main_cohort); - let dependencies = fetched_main_cohort.extract_dependencies().unwrap(); let expected_dependencies: HashSet = [dependent_cohort.id].iter().cloned().collect(); diff --git a/rust/feature-flags/src/flags/flag_matching.rs b/rust/feature-flags/src/flags/flag_matching.rs index 04b9dc67c7939..09287fde4fef0 100644 --- a/rust/feature-flags/src/flags/flag_matching.rs +++ b/rust/feature-flags/src/flags/flag_matching.rs @@ -308,9 +308,25 @@ impl FeatureFlagMatcher { error_while_computing_flags: initial_error || flags_response.error_while_computing_flags, feature_flags: flags_response.feature_flags, + feature_flag_payloads: flags_response.feature_flag_payloads, } } + /// Processes hash key overrides for feature flags with experience continuity enabled. + /// + /// This method handles the logic for managing hash key overrides, which are used to ensure + /// consistent feature flag experiences across different distinct IDs (e.g., when a user logs in). + /// It performs the following steps: + /// + /// 1. Checks if a hash key override needs to be written by comparing the current distinct ID + /// with the provided hash key + /// 2. If needed, writes the hash key override to the database using the writer connection + /// 3. Increments metrics to track successful/failed hash key override writes + /// 4. Retrieves and returns the current hash key overrides for the target distinct IDs + /// + /// Returns a tuple containing: + /// - Option>: The hash key overrides if successfully retrieved, None if there was an error + /// - bool: Whether there was an error during processing (true = error occurred) async fn process_hash_key_override( &self, hash_key: String, @@ -397,15 +413,22 @@ impl FeatureFlagMatcher { } } - async fn evaluate_flags_with_overrides( + /// Evaluates feature flags with property and hash key overrides. + /// + /// This function evaluates feature flags in two steps: + /// 1. First, it evaluates flags that can be computed using only the provided property overrides + /// 2. Then, for remaining flags that need database properties, it fetches and caches those properties + /// before evaluating those flags + pub async fn evaluate_flags_with_overrides( &mut self, feature_flags: FeatureFlagList, person_property_overrides: Option>, group_property_overrides: Option>>, hash_key_overrides: Option>, ) -> FlagsResponse { - let mut result = HashMap::new(); let mut error_while_computing_flags = false; + let mut feature_flags_map = HashMap::new(); + let mut feature_flag_payloads_map = HashMap::new(); let mut flags_needing_db_properties = Vec::new(); // Step 1: Evaluate flags with locally computable property overrides first @@ -425,7 +448,11 @@ impl FeatureFlagMatcher { { Ok(Some(flag_match)) => { let flag_value = self.flag_match_to_value(&flag_match); - result.insert(flag.key.clone(), flag_value); + feature_flags_map.insert(flag.key.clone(), flag_value); + + if let Some(payload) = flag_match.payload { + feature_flag_payloads_map.insert(flag.key.clone(), payload); + } } Ok(None) => { flags_needing_db_properties.push(flag.clone()); @@ -448,21 +475,51 @@ impl FeatureFlagMatcher { // Step 2: Fetch and cache properties for remaining flags (just one DB lookup for all of relevant properties) if !flags_needing_db_properties.is_empty() { - let group_type_indexes: HashSet = flags_needing_db_properties + let group_type_indexes_required: HashSet = flags_needing_db_properties .iter() .filter_map(|flag| flag.get_group_type_index()) .collect(); + // Map group names to group_type_index and group_keys + let group_type_to_key_map: HashMap = self + .groups + .iter() + .filter_map(|(group_type, group_key_value)| { + let group_key = group_key_value.as_str()?.to_string(); + self.group_type_mapping_cache + .group_types_to_indexes + .get(group_type) + .cloned() + .map(|group_type_index| (group_type_index, group_key)) + }) + .collect(); + + // Extract group_keys that are relevant to the required group_type_indexes + let group_keys: HashSet = group_type_to_key_map + .iter() + .filter_map(|(group_type_index, group_key)| { + if group_type_indexes_required.contains(group_type_index) { + Some(group_key.clone()) + } else { + None + } + }) + .collect(); + + // Extract group_type_indexes for the required flags + let group_type_indexes: HashSet = group_type_indexes_required.clone(); + let reader = self.reader.clone(); let distinct_id = self.distinct_id.clone(); let team_id = self.team_id; - match fetch_and_locally_cache_all_properties( + match fetch_and_locally_cache_all_relevant_properties( &mut self.properties_cache, reader, distinct_id, team_id, &group_type_indexes, + &group_keys, ) .await { @@ -487,9 +544,6 @@ impl FeatureFlagMatcher { } // Step 3: Evaluate remaining flags with cached properties - // At this point we've already done a round of flag evaluations with locally computable property overrides - // This step is for flags that couldn't be evaluated locally due to missing property values, - // so we do a single query to fetch all of the remaining properties, and then proceed with flag evaluations for flag in flags_needing_db_properties { match self .get_match(&flag, None, hash_key_overrides.clone()) @@ -497,7 +551,11 @@ impl FeatureFlagMatcher { { Ok(flag_match) => { let flag_value = self.flag_match_to_value(&flag_match); - result.insert(flag.key.clone(), flag_value); + feature_flags_map.insert(flag.key.clone(), flag_value); + + if let Some(payload) = flag_match.payload { + feature_flag_payloads_map.insert(flag.key.clone(), payload); + } } Err(e) => { error_while_computing_flags = true; @@ -519,7 +577,8 @@ impl FeatureFlagMatcher { FlagsResponse { error_while_computing_flags, - feature_flags: result, + feature_flags: feature_flags_map, + feature_flag_payloads: feature_flag_payloads_map, } } @@ -638,6 +697,10 @@ impl FeatureFlagMatcher { property_overrides: Option>, hash_key_overrides: Option>, ) -> Result { + let ha = self + .hashed_identifier(flag, hash_key_overrides.clone()) + .await?; + println!("hashed_identifier: {:?}", ha); if self .hashed_identifier(flag, hash_key_overrides.clone()) .await? @@ -1061,8 +1124,49 @@ impl FeatureFlagMatcher { let reader = self.reader.clone(); let team_id = self.team_id; + // groups looks like this {"project": "project_123"} + // and then the group type index looks like this {"project": 1} + // so I want my group keys to look like this ["project_123"], + // but they need to be aware of the different group types + // Retrieve group_type_name using group_type_index from the cache + let group_type_mapping = self + .group_type_mapping_cache + .group_type_index_to_group_type_map() + .await?; + let group_type_name = match group_type_mapping.get(&group_type_index) { + Some(name) => name.clone(), + None => { + error!( + "No group_type_name found for group_type_index {}", + group_type_index + ); + return Err(FlagError::NoGroupTypeMappings); + } + }; + + // Retrieve the corresponding group_key from self.groups using group_type_name + let group_key = match self.groups.get(&group_type_name) { + Some(Value::String(key)) => key.clone(), + Some(_) => { + error!( + "Group key for group_type_name '{}' is not a string", + group_type_name + ); + return Err(FlagError::NoGroupTypeMappings); + } + None => { + // If there's no group_key provided for this group_type_name, we consider that there are no properties to fetch + return Ok(HashMap::new()); + } + }; let db_properties = - fetch_group_properties_from_db(reader, team_id, group_type_index).await?; + fetch_group_properties_from_db(reader, team_id, group_type_index, group_key).await?; + + inc( + DB_GROUP_PROPERTIES_READS_COUNTER, + &[("team_id".to_string(), team_id.to_string())], + 1, + ); inc( DB_GROUP_PROPERTIES_READS_COUNTER, @@ -1140,10 +1244,13 @@ impl FeatureFlagMatcher { .await? .get(&group_type_index) .and_then(|group_type_name| self.groups.get(group_type_name)) - .cloned() - .unwrap_or_default(); + .and_then(|v| v.as_str()) + // NB: we currently use empty string ("") as the hashed identifier for group flags without a group key, + // and I don't want to break parity with the old service since I don't want the hash values to change + .unwrap_or("") + .to_string(); - Ok(group_key.to_string()) + Ok(group_key) } else { // Person-based flag // Use hash key overrides for experience continuity @@ -1172,9 +1279,9 @@ impl FeatureFlagMatcher { .hashed_identifier(feature_flag, hash_key_overrides) .await?; if hashed_identifier.is_empty() { - // Return a hash value that will make the flag evaluate to false - // TODO make this cleaner – we should have a way to return a default value - return Ok(0.0); + // Return a hash value that will make the flag evaluate to false; since we + // can't evaluate a flag without an identifier. + return Ok(0.0); // NB: A flag with 0.0 hash will always evaluate to false } let hash_key = format!("{}.{}{}", feature_flag.key, hashed_identifier, salt); let mut hasher = Sha1::new(); @@ -1248,7 +1355,7 @@ impl FeatureFlagMatcher { /// Evaluate static cohort filters by checking if the person is in each cohort. async fn evaluate_static_cohorts( reader: PostgresReader, - person_id: i32, // Change this parameter from distinct_id to person_id + person_id: i32, cohort_ids: Vec, ) -> Result, FlagError> { let mut conn = reader.get_connection().await?; @@ -1268,7 +1375,7 @@ async fn evaluate_static_cohorts( let rows = sqlx::query(query) .bind(&cohort_ids) - .bind(person_id) // Bind person_id directly + .bind(person_id) .fetch_all(&mut *conn) .await?; @@ -1448,7 +1555,6 @@ fn build_cohort_dependency_graph( } } - // Check for cycles, this is an directed acyclic graph so we use is_cyclic_directed if is_cyclic_directed(&graph) { return Err(FlagError::CohortDependencyCycle(format!( "Cyclic dependency detected starting at cohort {}", @@ -1463,52 +1569,62 @@ fn build_cohort_dependency_graph( /// /// This function fetches both person and group properties for a specified distinct ID and team ID. /// It updates the properties cache with the fetched properties and returns the result. -async fn fetch_and_locally_cache_all_properties( +async fn fetch_and_locally_cache_all_relevant_properties( properties_cache: &mut PropertiesCache, reader: PostgresReader, distinct_id: String, team_id: TeamId, group_type_indexes: &HashSet, + group_keys: &HashSet, ) -> Result<(), FlagError> { let mut conn = reader.as_ref().get_connection().await?; let query = r#" - SELECT - person.person_id, - person.person_properties, - group_properties.group_properties - FROM ( - SELECT - "posthog_person"."id" AS person_id, - "posthog_person"."properties" AS person_properties - FROM "posthog_person" - INNER JOIN "posthog_persondistinctid" - ON "posthog_person"."id" = "posthog_persondistinctid"."person_id" - WHERE - "posthog_persondistinctid"."distinct_id" = $1 - AND "posthog_persondistinctid"."team_id" = $2 - AND "posthog_person"."team_id" = $2 - LIMIT 1 - ) AS person, - ( - SELECT - json_object_agg( - "posthog_group"."group_type_index", - "posthog_group"."group_properties" - ) AS group_properties - FROM "posthog_group" - WHERE - "posthog_group"."team_id" = $2 - AND "posthog_group"."group_type_index" = ANY($3) - ) AS group_properties + SELECT + ( + SELECT "posthog_person"."id" + FROM "posthog_person" + INNER JOIN "posthog_persondistinctid" + ON "posthog_person"."id" = "posthog_persondistinctid"."person_id" + WHERE + "posthog_persondistinctid"."distinct_id" = $1 + AND "posthog_persondistinctid"."team_id" = $2 + AND "posthog_person"."team_id" = $2 + LIMIT 1 + ) AS person_id, + ( + SELECT "posthog_person"."properties" + FROM "posthog_person" + INNER JOIN "posthog_persondistinctid" + ON "posthog_person"."id" = "posthog_persondistinctid"."person_id" + WHERE + "posthog_persondistinctid"."distinct_id" = $1 + AND "posthog_persondistinctid"."team_id" = $2 + AND "posthog_person"."team_id" = $2 + LIMIT 1 + ) AS person_properties, + ( + SELECT + json_object_agg( + "posthog_group"."group_type_index", + "posthog_group"."group_properties" + ) + FROM "posthog_group" + WHERE + "posthog_group"."team_id" = $2 + AND "posthog_group"."group_type_index" = ANY($3) + AND "posthog_group"."group_key" = ANY($4) + ) AS group_properties "#; let group_type_indexes_vec: Vec = group_type_indexes.iter().cloned().collect(); + let group_keys_vec: Vec = group_keys.iter().cloned().collect(); let row: (Option, Option, Option) = sqlx::query_as(query) .bind(&distinct_id) .bind(team_id) .bind(&group_type_indexes_vec) + .bind(&group_keys_vec) // Bind group_keys_vec to $4 .fetch_optional(&mut *conn) .await? .unwrap_or((None, None, None)); @@ -1602,6 +1718,7 @@ async fn fetch_group_properties_from_db( reader: PostgresReader, team_id: TeamId, group_type_index: GroupTypeIndex, + group_key: String, ) -> Result, FlagError> { let mut conn = reader.as_ref().get_connection().await?; @@ -1609,13 +1726,15 @@ async fn fetch_group_properties_from_db( SELECT "posthog_group"."group_properties" FROM "posthog_group" WHERE ("posthog_group"."team_id" = $1 - AND "posthog_group"."group_type_index" = $2) + AND "posthog_group"."group_type_index" = $2 + AND "posthog_group"."group_key" = $3) LIMIT 1 "#; let row: Option = sqlx::query_scalar(query) .bind(team_id) .bind(group_type_index) + .bind(group_key) .fetch_optional(&mut *conn) .await?; diff --git a/rust/feature-flags/src/utils/test_utils.rs b/rust/feature-flags/src/utils/test_utils.rs index 1c6cf2b1caafe..99501fcaa8b78 100644 --- a/rust/feature-flags/src/utils/test_utils.rs +++ b/rust/feature-flags/src/utils/test_utils.rs @@ -261,6 +261,29 @@ pub async fn insert_new_team_in_pg( Ok(team) } +pub async fn insert_group_type_mapping_in_pg( + client: Arc, + team_id: i32, + group_type: &str, + group_type_index: i32, +) -> Result<(), Error> { + let mut conn = client.get_connection().await?; + let res = sqlx::query( + r#"INSERT INTO posthog_grouptypemapping + (team_id, project_id, group_type, group_type_index, name_singular, name_plural) + VALUES + ($1, $1, $2, $3, NULL, NULL) + ON CONFLICT (team_id, group_type) DO NOTHING"#, + ) + .bind(team_id) + .bind(group_type) + .bind(group_type_index) + .execute(&mut *conn) + .await?; + assert_eq!(res.rows_affected(), 1); + Ok(()) +} + pub async fn insert_flag_for_team_in_pg( client: Arc, team_id: i32, @@ -454,3 +477,55 @@ pub async fn add_person_to_cohort( Ok(()) } + +#[derive(Debug)] +pub struct Group { + pub id: i32, + pub team_id: i32, + pub group_type_index: i32, + pub group_key: String, + pub group_properties: Value, +} + +pub async fn create_group_in_pg( + client: Arc, + team_id: i32, + group_type: &str, + group_key: &str, + group_properties: Value, +) -> Result { + // First, retrieve the group_type_index from grouptypemapping + let mut conn = client.get_connection().await?; + let row = sqlx::query( + r#"SELECT group_type_index FROM posthog_grouptypemapping + WHERE team_id = $1 AND group_type = $2"#, + ) + .bind(team_id) + .bind(group_type) + .fetch_one(&mut *conn) + .await?; + let group_type_index: i32 = row.get("group_type_index"); + + // Insert the group with all non-nullable fields + let res = sqlx::query( + r#"INSERT INTO posthog_group + (team_id, group_type_index, group_key, group_properties, created_at, properties_last_updated_at, properties_last_operation, version) + VALUES ($1, $2, $3, $4, '2024-06-17', '{}'::jsonb, '{}'::jsonb, 0) + RETURNING id"#, + ) + .bind(team_id) + .bind(group_type_index) + .bind(group_key) + .bind(group_properties.clone()) + .fetch_one(&mut *conn) + .await?; + let group_id: i32 = res.get("id"); + + Ok(Group { + id: group_id, + team_id, + group_type_index, + group_key: group_key.to_string(), + group_properties, + }) +} diff --git a/rust/feature-flags/tests/test_flags.rs b/rust/feature-flags/tests/test_flags.rs index 918a73ede6faa..9ee793596c0b1 100644 --- a/rust/feature-flags/tests/test_flags.rs +++ b/rust/feature-flags/tests/test_flags.rs @@ -1,6 +1,7 @@ use anyhow::Result; use assert_json_diff::assert_json_include; +use rand::Rng; use reqwest::StatusCode; use serde_json::{json, Value}; @@ -8,8 +9,9 @@ use crate::common::*; use feature_flags::config::DEFAULT_TEST_CONFIG; use feature_flags::utils::test_utils::{ - insert_flags_for_team_in_redis, insert_new_team_in_pg, insert_new_team_in_redis, - setup_pg_reader_client, setup_redis_client, + create_group_in_pg, insert_flags_for_team_in_redis, insert_new_team_in_pg, + insert_new_team_in_redis, insert_person_for_team_in_pg, setup_pg_reader_client, + setup_redis_client, }; pub mod common; @@ -91,7 +93,7 @@ async fn it_rejects_invalid_headers_flag_request() -> Result<()> { .await; assert_eq!(StatusCode::BAD_REQUEST, res.status()); - // We don't want to deserialize the data into a flagResponse struct here, + // We don't want to deserialize the data into a FlagsResponse struct here, // because we want to assert the shape of the raw json data. let response_text = res.text().await?; @@ -193,7 +195,6 @@ async fn it_handles_malformed_json() -> Result<()> { assert_eq!(StatusCode::BAD_REQUEST, res.status()); let response_text = res.text().await?; - println!("Response text: {:?}", response_text); assert!( response_text.contains("Failed to decode request: invalid JSON"), @@ -400,7 +401,7 @@ async fn it_handles_flag_with_property_filter() -> Result<()> { } #[tokio::test] -async fn it_handles_flag_with_group_properties() -> Result<()> { +async fn it_matches_flags_to_a_request_with_group_property_overrides() -> Result<()> { let config = DEFAULT_TEST_CONFIG.clone(); let distinct_id = "user_distinct_id".to_string(); @@ -500,3 +501,244 @@ async fn it_handles_flag_with_group_properties() -> Result<()> { Ok(()) } + +#[tokio::test] +async fn test_feature_flags_with_json_payloads() -> Result<()> { + let config = DEFAULT_TEST_CONFIG.clone(); + let distinct_id = "example_id".to_string(); + let redis_client = setup_redis_client(Some(config.redis_url.clone())); + let pg_client = setup_pg_reader_client(None).await; + + // Insert a new team into Redis and retrieve the team details + let team = insert_new_team_in_redis(redis_client.clone()) + .await + .unwrap(); + let token = team.api_token; + + insert_new_team_in_pg(pg_client.clone(), Some(team.id)) + .await + .unwrap(); + + insert_person_for_team_in_pg( + pg_client.clone(), + team.id, + distinct_id.clone(), + Some(json!({"email": "tim@posthog.com"})), + ) + .await?; + + let flag_json = json!([{ + "id": 1, + "key": "filter-by-property", + "name": "Filter by property", + "active": true, + "deleted": false, + "team_id": team.id, + "filters": { + "groups": [ + { + "properties": [ + { + "key": "email", + "value": "tim@posthog.com", + "operator": "exact", + "type": "person", + } + ], + "rollout_percentage": null, + } + ], + "payloads": { + "true": { + "color": "blue" + } + }, + }, + }]); + + insert_flags_for_team_in_redis(redis_client, team.id, Some(flag_json.to_string())).await?; + + let server = ServerHandle::for_config(config).await; + + let payload = json!({ + "token": token, + "distinct_id": distinct_id, + }); + + let res = server.send_flags_request(payload.to_string()).await; + + assert_eq!(StatusCode::OK, res.status()); + + let json_data = res.json::().await?; + + assert_json_include!( + actual: json_data, + expected: json!({ + "featureFlagPayloads": { + "filter-by-property": { "color": "blue" } + } + }) + ); + + Ok(()) +} + +#[tokio::test] +async fn test_feature_flags_with_group_relationships() -> Result<()> { + let config = DEFAULT_TEST_CONFIG.clone(); + let distinct_id = "example_id".to_string(); + let redis_client = setup_redis_client(Some(config.redis_url.clone())); + let pg_client = setup_pg_reader_client(None).await; + let team_id = rand::thread_rng().gen_range(1..10_000_000); + let team = insert_new_team_in_pg(pg_client.clone(), Some(team_id)) + .await + .unwrap(); + + let token = team.api_token; + + // Create a group of type "organization" (group_type_index 1) with group_key "foo" and specific properties + create_group_in_pg( + pg_client.clone(), + team.id, + "organization", + "foo", + json!({"email": "posthog@example.com"}), + ) + .await?; + + // Create a group of type "project" (group_type_index 0) with group_key "bar" and specific properties + create_group_in_pg( + pg_client.clone(), + team.id, + "project", + "bar", + json!({"name": "Project Bar"}), + ) + .await?; + + // Define feature flags + let flags_json = json!([ + { + "id": 1, + "key": "default-no-prop-group-flag", + "name": "This is a feature flag with default params, no filters.", + "active": true, + "deleted": false, + "team_id": team.id, + "filters": { + "aggregation_group_type_index": 0, + "groups": [{"rollout_percentage": null}] + } + }, + { + "id": 2, + "key": "groups-flag", + "name": "This is a group-based flag", + "active": true, + "deleted": false, + "team_id": team.id, + "filters": { + "aggregation_group_type_index": 1, + "groups": [ + { + "properties": [ + { + "key": "email", + "value": "posthog", + "operator": "icontains", + "type": "group", + "group_type_index": 1 + } + ], + "rollout_percentage": null + } + ] + } + } + ]); + + // Insert the feature flags into Redis + insert_flags_for_team_in_redis(redis_client.clone(), team.id, Some(flags_json.to_string())) + .await?; + + let server = ServerHandle::for_config(config).await; + + // First Decision: Without specifying any groups + { + let payload = json!({ + "token": token, + "distinct_id": distinct_id + }); + + let res = server.send_flags_request(payload.to_string()).await; + assert_eq!(res.status(), StatusCode::OK); + + let json_data = res.json::().await?; + assert_json_include!( + actual: json_data, + expected: json!({ + "errorWhileComputingFlags": false, + "featureFlags": { + "default-no-prop-group-flag": false, // if we don't specify any groups in the request, the flags should be false + "groups-flag": false + } + }) + ); + } + + // Second Decision: With non-matching group overrides + { + let payload = json!({ + "token": token, + "distinct_id": distinct_id, + "groups": { + "organization": "foo2", // Does not match existing group_key "foo" + "project": "bar" // Matches existing project group + } + }); + + let res = server.send_flags_request(payload.to_string()).await; + assert_eq!(res.status(), StatusCode::OK); + + let json_data = res.json::().await?; + assert_json_include!( + actual: json_data, + expected: json!({ + "errorWhileComputingFlags": false, + "featureFlags": { + "default-no-prop-group-flag": true, + "groups-flag": false + } + }) + ); + } + + // Third Decision: With matching group + { + let payload = json!({ + "token": token, + "distinct_id": distinct_id, + "groups": { + "organization": "foo", // Matches existing group_key for organization "foo" + "project": "bar" // Matches existing group_key for project "bar" + } + }); + + let res = server.send_flags_request(payload.to_string()).await; + assert_eq!(res.status(), StatusCode::OK); + + let json_data = res.json::().await?; + assert_json_include!( + actual: json_data, + expected: json!({ + "errorWhileComputingFlags": false, + "featureFlags": { + "default-no-prop-group-flag": true, + "groups-flag": true + } + }) + ); + } + + Ok(()) +}