From b892cafb2195ddff4b1df8858f7f4f73ec597f0a Mon Sep 17 00:00:00 2001 From: Benjamin Lee Date: Fri, 3 May 2024 15:56:27 -0700 Subject: [PATCH] implement room.timeline.(not_)rooms filter on /sync I asked in #matrix-spec:matrix.org and go clarification that we should be omitting the timeline field completely for rooms that are filtered out by the timeline.(not_)rooms filter. Ruma's skip_serializing_if attribute on the timeline field will currently cause it to be omitted when events is empty. If [this fix][1] is merged, it will be omitted only when events is empty, prev_batch is None, and limited is false. [1]: https://github.com/ruma/ruma/pull/1796 --- src/api/client_server/sync.rs | 47 ++++++++++++++++++++++++----------- src/utils/filter.rs | 6 ++++- 2 files changed, 38 insertions(+), 15 deletions(-) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index b38211810..869fe471e 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -184,6 +184,7 @@ pub(crate) async fn sync_events_route( lazy_load_enabled, lazy_load_send_redundant, full_state, + &compiled_filter, &mut device_list_updates, &mut left_encrypted_users, ) @@ -225,6 +226,7 @@ pub(crate) async fn sync_events_route( &next_batch_string, full_state, lazy_load_enabled, + &compiled_filter, ) .instrument(Span::current()) .await?; @@ -379,9 +381,10 @@ pub(crate) async fn sync_events_route( } #[tracing::instrument(skip_all, fields(user_id = %sender_user, room_id = %room_id))] +#[allow(clippy::too_many_arguments)] async fn handle_left_room( since: u64, room_id: &RoomId, sender_user: &UserId, left_rooms: &mut BTreeMap, - next_batch_string: &str, full_state: bool, lazy_load_enabled: bool, + next_batch_string: &str, full_state: bool, lazy_load_enabled: bool, filter: &CompiledFilterDefinition<'_>, ) -> Result<()> { { // Get and drop the lock to wait for remaining operations to finish @@ -408,6 +411,20 @@ async fn handle_left_room( return Ok(()); } + let timeline = if filter.room.timeline.room_allowed(room_id) { + Timeline { + limited: false, + prev_batch: Some(next_batch_string.to_owned()), + events: vec![], + } + } else { + Timeline { + limited: false, + prev_batch: None, + events: vec![], + } + }; + if !services().rooms.metadata.exists(room_id)? { // This is just a rejected invite, not a room we know // Insert a leave event anyways @@ -440,11 +457,7 @@ async fn handle_left_room( account_data: RoomAccountData { events: Vec::new(), }, - timeline: Timeline { - limited: false, - prev_batch: Some(next_batch_string.to_owned()), - events: Vec::new(), - }, + timeline, state: State { events: vec![event.to_sync_state_event()], }, @@ -529,11 +542,7 @@ async fn handle_left_room( account_data: RoomAccountData { events: Vec::new(), }, - timeline: Timeline { - limited: false, - prev_batch: Some(next_batch_string.to_owned()), - events: Vec::new(), - }, + timeline, state: State { events: left_state_events, }, @@ -592,8 +601,10 @@ async fn process_presence_updates( async fn load_joined_room( sender_user: &UserId, sender_device: &DeviceId, room_id: &RoomId, since: u64, sincecount: PduCount, next_batch: u64, next_batchcount: PduCount, lazy_load_enabled: bool, lazy_load_send_redundant: bool, - full_state: bool, device_list_updates: &mut HashSet, left_encrypted_users: &mut HashSet, + full_state: bool, filter: &CompiledFilterDefinition<'_>, device_list_updates: &mut HashSet, + left_encrypted_users: &mut HashSet, ) -> Result { + // TODO: can we skip this when the room is filtered out? { // Get and drop the lock to wait for remaining operations to finish // This will make sure the we have all events until next_batch @@ -610,7 +621,7 @@ async fn load_joined_room( drop(insert_lock); }; - let (timeline_pdus, limited) = load_timeline(sender_user, room_id, sincecount, 10)?; + let (timeline_pdus, limited) = load_timeline(sender_user, room_id, sincecount, 10, Some(filter))?; let send_notification_counts = !timeline_pdus.is_empty() || services() @@ -1083,9 +1094,17 @@ async fn load_joined_room( fn load_timeline( sender_user: &UserId, room_id: &RoomId, roomsincecount: PduCount, limit: u64, + filter: Option<&CompiledFilterDefinition<'_>>, ) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> { let timeline_pdus; let limited; + + if let Some(filter) = filter { + if !filter.room.timeline.room_allowed(room_id) { + return Ok((vec![], false)); + } + } + if services() .rooms .timeline @@ -1482,7 +1501,7 @@ pub(crate) async fn sync_events_v4_route( for (room_id, (required_state_request, timeline_limit, roomsince)) in &todo_rooms { let roomsincecount = PduCount::Normal(*roomsince); - let (timeline_pdus, limited) = load_timeline(&sender_user, room_id, roomsincecount, *timeline_limit)?; + let (timeline_pdus, limited) = load_timeline(&sender_user, room_id, roomsincecount, *timeline_limit, None)?; if roomsince != &0 && timeline_pdus.is_empty() { continue; diff --git a/src/utils/filter.rs b/src/utils/filter.rs index caf9cef33..3b2f9af2d 100644 --- a/src/utils/filter.rs +++ b/src/utils/filter.rs @@ -12,7 +12,9 @@ //! In `/messages`, if the room is rejected by the filter, we can skip the //! entire request. The outer loop of our `/sync` implementation is over rooms, //! and so we are able to skip work for an entire room if it is rejected by the -//! top-level `filter.rooms.room`. +//! top-level `filter.rooms.room`. Similarly, when a room is rejected for all +//! events in a particular category, we can skip work generating events in that +//! category for the rejected room. use std::{collections::HashSet, hash::Hash}; @@ -134,6 +136,7 @@ pub(crate) struct CompiledFilterDefinition<'a> { pub(crate) struct CompiledRoomFilter<'a> { rooms: AllowDenyList<'a, RoomId>, + pub(crate) timeline: CompiledRoomEventFilter<'a>, } pub(crate) struct CompiledRoomEventFilter<'a> { @@ -163,6 +166,7 @@ impl<'a> TryFrom<&'a RoomFilter> for CompiledRoomFilter<'a> { // TODO: consider calculating the intersection of room filters in // all of the sub-filters rooms: AllowDenyList::from_slices(source.rooms.as_deref(), &source.not_rooms), + timeline: (&source.timeline).try_into()?, }) } }